def __init__(self, job, jobrunner_conn, internal_storage): self.jobrunner_conn = jobrunner_conn self.internal_storage = internal_storage self.lithops_config = job.config self.executor_id = job.executor_id self.call_id = job.call_id self.job_id = job.job_id self.func_key = job.func_key self.data_key = job.data_key self.data_byte_range = job.data_byte_range self.output_key = create_output_key(JOBS_PREFIX, self.executor_id, self.job_id, self.call_id) # Setup stats class self.stats = stats(job.jr_stats_file) # Setup prometheus for live metrics prom_enabled = self.lithops_config['lithops'].get('monitoring') prom_config = self.lithops_config.get('prometheus', {}) self.prometheus = PrometheusExporter(prom_enabled, prom_config) mode = self.lithops_config['lithops']['mode'] self.customized_runtime = self.lithops_config[mode].get( 'customized_runtime', False)
def get_call_output(self, executor_id, job_id, call_id): """ Get the output of a call. :param executor_id: executor ID of the call :param call_id: call ID of the call :return: Output of the call. """ output_key = create_output_key(executor_id, job_id, call_id) try: return self.storage.get_object(self.bucket, output_key) except StorageNoSuchKeyError: return None
def __init__(self, job, jobrunner_conn, internal_storage): self.job = job self.jobrunner_conn = jobrunner_conn self.internal_storage = internal_storage self.lithops_config = job.config self.output_key = create_output_key(job.executor_id, job.job_id, job.call_id) # Setup stats class self.stats = JobStats(self.job.stats_file) # Setup prometheus for live metrics prom_enabled = self.lithops_config['lithops'].get('telemetry') prom_config = self.lithops_config.get('prometheus', {}) self.prometheus = PrometheusExporter(prom_enabled, prom_config)
def __init__(self, task, jobrunner_conn, internal_storage): self.task = task self.jobrunner_conn = jobrunner_conn self.internal_storage = internal_storage self.lithops_config = task.config self.output_key = create_output_key(JOBS_PREFIX, self.task.executor_id, self.task.job_id, self.task.id) # Setup stats class self.stats = stats(self.task.stats_file) # Setup prometheus for live metrics prom_enabled = self.lithops_config['lithops'].get('monitoring') prom_config = self.lithops_config.get('prometheus', {}) self.prometheus = PrometheusExporter(prom_enabled, prom_config)
def function_handler(event): start_tstamp = time.time() log_level = event['log_level'] cloud_logging_config(log_level) logger.debug("Action handler started") extra_env = event.get('extra_env', {}) os.environ.update(extra_env) os.environ.update({'LITHOPS_FUNCTION': 'True', 'PYTHONUNBUFFERED': 'True'}) os.environ.pop('LITHOPS_TOTAL_EXECUTORS', None) config = event['config'] call_id = event['call_id'] job_id = event['job_id'] executor_id = event['executor_id'] exec_id = "{}/{}/{}".format(executor_id, job_id, call_id) logger.info("Execution-ID: {}".format(exec_id)) runtime_name = event['runtime_name'] runtime_memory = event['runtime_memory'] execution_timeout = event['execution_timeout'] logger.debug("Runtime name: {}".format(runtime_name)) logger.debug("Runtime memory: {}MB".format(runtime_memory)) logger.debug("Function timeout: {}s".format(execution_timeout)) func_key = event['func_key'] data_key = event['data_key'] data_byte_range = event['data_byte_range'] storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) call_status = CallStatus(config, internal_storage) call_status.response['host_submit_tstamp'] = event['host_submit_tstamp'] call_status.response['worker_start_tstamp'] = start_tstamp context_dict = { 'python_version': os.environ.get("PYTHON_VERSION"), 'call_id': call_id, 'job_id': job_id, 'executor_id': executor_id, 'activation_id': os.environ.get('__PW_ACTIVATION_ID') } call_status.response.update(context_dict) show_memory_peak = strtobool(os.environ.get('SHOW_MEMORY_PEAK', 'False')) try: if version.__version__ != event['lithops_version']: msg = ( "Lithops version mismatch. Host version: {} - Runtime version: {}" .format(event['lithops_version'], version.__version__)) raise RuntimeError('HANDLER', msg) # send init status event call_status.send('__init__') # call_status.response['free_disk_bytes'] = free_disk_space("/tmp") custom_env = { 'LITHOPS_CONFIG': json.dumps(config), 'LITHOPS_EXECUTION_ID': exec_id, 'PYTHONPATH': "{}:{}".format(os.getcwd(), LITHOPS_LIBS_PATH) } os.environ.update(custom_env) jobrunner_stats_dir = os.path.join(STORAGE_FOLDER, storage_config['bucket'], JOBS_PREFIX, executor_id, job_id, call_id) os.makedirs(jobrunner_stats_dir, exist_ok=True) jobrunner_stats_filename = os.path.join(jobrunner_stats_dir, 'jobrunner.stats.txt') jobrunner_config = { 'lithops_config': config, 'call_id': call_id, 'job_id': job_id, 'executor_id': executor_id, 'func_key': func_key, 'data_key': data_key, 'log_level': log_level, 'data_byte_range': data_byte_range, 'output_key': create_output_key(JOBS_PREFIX, executor_id, job_id, call_id), 'stats_filename': jobrunner_stats_filename } if show_memory_peak: mm_handler_conn, mm_conn = Pipe() memory_monitor = Thread(target=memory_monitor_worker, args=(mm_conn, )) memory_monitor.start() handler_conn, jobrunner_conn = Pipe() jobrunner = JobRunner(jobrunner_config, jobrunner_conn, internal_storage) logger.debug('Starting JobRunner process') local_execution = strtobool( os.environ.get('__PW_LOCAL_EXECUTION', 'False')) jrp = Thread(target=jobrunner.run) if local_execution else Process( target=jobrunner.run) jrp.start() jrp.join(execution_timeout) logger.debug('JobRunner process finished') if jrp.is_alive(): # If process is still alive after jr.join(job_max_runtime), kill it try: jrp.terminate() except Exception: # thread does not have terminate method pass msg = ('Function exceeded maximum time of {} seconds and was ' 'killed'.format(execution_timeout)) raise TimeoutError('HANDLER', msg) if show_memory_peak: mm_handler_conn.send('STOP') memory_monitor.join() peak_memory_usage = int(mm_handler_conn.recv()) logger.info("Peak memory usage: {}".format( sizeof_fmt(peak_memory_usage))) call_status.response['peak_memory_usage'] = peak_memory_usage if not handler_conn.poll(): logger.error( 'No completion message received from JobRunner process') logger.debug('Assuming memory overflow...') # Only 1 message is returned by jobrunner when it finishes. # If no message, this means that the jobrunner process was killed. # 99% of times the jobrunner is killed due an OOM, so we assume here an OOM. msg = 'Function exceeded maximum memory and was killed' raise MemoryError('HANDLER', msg) if os.path.exists(jobrunner_stats_filename): with open(jobrunner_stats_filename, 'r') as fid: for l in fid.readlines(): key, value = l.strip().split(" ", 1) try: call_status.response[key] = float(value) except Exception: call_status.response[key] = value if key in [ 'exception', 'exc_pickle_fail', 'result', 'new_futures' ]: call_status.response[key] = eval(value) except Exception: # internal runtime exceptions print('----------------------- EXCEPTION !-----------------------', flush=True) traceback.print_exc(file=sys.stdout) print('----------------------------------------------------------', flush=True) call_status.response['exception'] = True pickled_exc = pickle.dumps(sys.exc_info()) pickle.loads( pickled_exc) # this is just to make sure they can be unpickled call_status.response['exc_info'] = str(pickled_exc) finally: call_status.response['worker_end_tstamp'] = time.time() call_status.send('__end__') for key in extra_env: os.environ.pop(key) logger.info("Finished")