def function_invoker(event): if __version__ != event['cloudbutton_version']: raise Exception("WRONGVERSION", "PyWren version mismatch", __version__, event['cloudbutton_version']) if event['log_level']: cloud_logging_config(event['log_level']) log_level = logging.getLevelName(logger.getEffectiveLevel()) custom_env = {'CLOUDBUTTON_FUNCTION': 'True', 'PYTHONUNBUFFERED': 'True', 'CLOUDBUTTON_LOGLEVEL': log_level} os.environ.update(custom_env) config = event['config'] num_invokers = event['invokers'] invoker = FunctionInvoker(config, num_invokers, log_level) invoker.run(event['job_description'])
def __init__(self, jr_config, jobrunner_conn, internal_storage): self.jr_config = jr_config self.jobrunner_conn = jobrunner_conn self.internal_storage = internal_storage log_level = self.jr_config['log_level'] cloud_logging_config(log_level) self.cloudbutton_config = self.jr_config['cloudbutton_config'] self.call_id = self.jr_config['call_id'] self.job_id = self.jr_config['job_id'] self.executor_id = self.jr_config['executor_id'] self.func_key = self.jr_config['func_key'] self.data_key = self.jr_config['data_key'] self.data_byte_range = self.jr_config['data_byte_range'] self.output_key = self.jr_config['output_key'] self.stats = stats(self.jr_config['stats_filename'])
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import os from cloudbutton.config import cloud_logging_config from cloudbutton.engine.agent.handler import function_handler cloud_logging_config(logging.INFO) logger = logging.getLogger('__main__') def main(event, context): logger.info("Starting AWS Lambda Function execution") os.environ['__OW_ACTIVATION_ID'] = context.aws_request_id os.environ['__PW_ACTIVATION_ID'] = context.aws_request_id function_handler(event) return {"Execution": "Finished"}
def function_handler(event): start_tstamp = time.time() log_level = event['log_level'] cloud_logging_config(log_level) logger.debug("Action handler started") extra_env = event.get('extra_env', {}) os.environ.update(extra_env) os.environ.update({'CLOUDBUTTON_FUNCTION': 'True', 'PYTHONUNBUFFERED': 'True'}) config = event['config'] call_id = event['call_id'] job_id = event['job_id'] executor_id = event['executor_id'] exec_id = "{}/{}/{}".format(executor_id, job_id, call_id) logger.info("Execution-ID: {}".format(exec_id)) runtime_name = event['runtime_name'] runtime_memory = event['runtime_memory'] execution_timeout = event['execution_timeout'] logger.debug("Runtime name: {}".format(runtime_name)) logger.debug("Runtime memory: {}MB".format(runtime_memory)) logger.debug("Function timeout: {}s".format(execution_timeout)) func_key = event['func_key'] data_key = event['data_key'] data_byte_range = event['data_byte_range'] storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) call_status = CallStatus(config, internal_storage) call_status.response['host_submit_tstamp'] = event['host_submit_tstamp'] call_status.response['start_tstamp'] = start_tstamp context_dict = { 'cloudbutton_version': os.environ.get("CLOUDBUTTON_VERSION"), 'call_id': call_id, 'job_id': job_id, 'executor_id': executor_id, 'activation_id': os.environ.get('__PW_ACTIVATION_ID') } call_status.response.update(context_dict) show_memory_peak = strtobool(os.environ.get('SHOW_MEMORY_PEAK', 'False')) try: if version.__version__ != event['cloudbutton_version']: msg = ("Cloudbutton version mismatch. Host version: {} - Runtime version: {}" .format(event['cloudbutton_version'], version.__version__)) raise RuntimeError('HANDLER', msg) # send init status event call_status.send('__init__') # call_status.response['free_disk_bytes'] = free_disk_space("/tmp") custom_env = {'CLOUDBUTTON_CONFIG': json.dumps(config), 'CLOUDBUTTON_EXECUTION_ID': exec_id, 'PYTHONPATH': "{}:{}".format(os.getcwd(), LIBS_PATH)} os.environ.update(custom_env) jobrunner_stats_dir = os.path.join(STORAGE_FOLDER, storage_config['bucket'], JOBS_PREFIX, executor_id, job_id, call_id) os.makedirs(jobrunner_stats_dir, exist_ok=True) jobrunner_stats_filename = os.path.join(jobrunner_stats_dir, 'jobrunner.stats.txt') jobrunner_config = {'cloudbutton_config': config, 'call_id': call_id, 'job_id': job_id, 'executor_id': executor_id, 'func_key': func_key, 'data_key': data_key, 'log_level': log_level, 'data_byte_range': data_byte_range, 'output_key': create_output_key(JOBS_PREFIX, executor_id, job_id, call_id), 'stats_filename': jobrunner_stats_filename} if show_memory_peak: mm_handler_conn, mm_conn = Pipe() memory_monitor = Thread(target=memory_monitor_worker, args=(mm_conn, )) memory_monitor.start() handler_conn, jobrunner_conn = Pipe() jobrunner = JobRunner(jobrunner_config, jobrunner_conn, internal_storage) logger.debug('Starting JobRunner process') local_execution = strtobool(os.environ.get('__PW_LOCAL_EXECUTION', 'False')) jrp = Thread(target=jobrunner.run) if local_execution else Process(target=jobrunner.run) jrp.start() jrp.join(execution_timeout) logger.debug('JobRunner process finished') if jrp.is_alive(): # If process is still alive after jr.join(job_max_runtime), kill it try: jrp.terminate() except Exception: # thread does not have terminate method pass msg = ('Function exceeded maximum time of {} seconds and was ' 'killed'.format(execution_timeout)) raise TimeoutError('HANDLER', msg) if show_memory_peak: mm_handler_conn.send('STOP') memory_monitor.join() peak_memory_usage = int(mm_handler_conn.recv()) logger.info("Peak memory usage: {}".format(sizeof_fmt(peak_memory_usage))) call_status.response['peak_memory_usage'] = peak_memory_usage if not handler_conn.poll(): logger.error('No completion message received from JobRunner process') logger.debug('Assuming memory overflow...') # Only 1 message is returned by jobrunner when it finishes. # If no message, this means that the jobrunner process was killed. # 99% of times the jobrunner is killed due an OOM, so we assume here an OOM. msg = 'Function exceeded maximum memory and was killed' raise MemoryError('HANDLER', msg) if os.path.exists(jobrunner_stats_filename): with open(jobrunner_stats_filename, 'r') as fid: for l in fid.readlines(): key, value = l.strip().split(" ", 1) try: call_status.response[key] = float(value) except Exception: call_status.response[key] = value if key in ['exception', 'exc_pickle_fail', 'result', 'new_futures']: call_status.response[key] = eval(value) except Exception: # internal runtime exceptions print('----------------------- EXCEPTION !-----------------------', flush=True) traceback.print_exc(file=sys.stdout) print('----------------------------------------------------------', flush=True) call_status.response['exception'] = True pickled_exc = pickle.dumps(sys.exc_info()) pickle.loads(pickled_exc) # this is just to make sure they can be unpickled call_status.response['exc_info'] = str(pickled_exc) finally: call_status.response['end_tstamp'] = time.time() call_status.send('__end__') for key in extra_env: os.environ.pop(key) logger.info("Finished")