def _dump_events_to_files(events_with_added_uuid): current_time_millis = int(round(time.time() * 1000)) for event in events_with_added_uuid: save_file( os.path.join(EVENTS_TMP_DIR, "%s_%s" % (current_time_millis, event["uuid"])), json.dumps(event["event"]), )
def exec_lambda_code(script, handler_function='handler', lambda_cwd=None, lambda_env=None): if lambda_cwd or lambda_env: exec_mutex.acquire() if lambda_cwd: previous_cwd = os.getcwd() os.chdir(lambda_cwd) sys.path = [lambda_cwd] + sys.path if lambda_env: previous_env = dict(os.environ) os.environ.update(lambda_env) # generate lambda file name lambda_id = 'l_%s' % short_uid() lambda_file = LAMBDA_SCRIPT_PATTERN.replace('*', lambda_id) save_file(lambda_file, script) # delete temporary .py and .pyc files on exit TMP_FILES.append(lambda_file) TMP_FILES.append('%sc' % lambda_file) try: handler_module = imp.load_source(lambda_id, lambda_file) module_vars = handler_module.__dict__ except Exception as e: LOG.error('Unable to exec: %s %s' % (script, traceback.format_exc())) raise e finally: if lambda_cwd or lambda_env: if lambda_cwd: os.chdir(previous_cwd) sys.path.pop(0) if lambda_env: os.environ = previous_env exec_mutex.release() return module_vars[handler_function]
def download_and_extract(archive_url, target_dir, retries=0, sleep=3, tmp_archive=None): mkdir(target_dir) tmp_archive = tmp_archive or new_tmp_file() if not os.path.exists(tmp_archive): # create temporary placeholder file, to avoid duplicate parallel downloads save_file(tmp_archive, '') for i in range(retries + 1): try: download(archive_url, tmp_archive) break except Exception: time.sleep(sleep) _, ext = os.path.splitext(tmp_archive) if ext == '.zip': unzip(tmp_archive, target_dir) elif ext == '.gz' or ext == '.bz2': untar(tmp_archive, target_dir) else: raise Exception('Unsupported archive format: %s' % ext)
def install_dynamodb_local(): if not os.path.exists(INSTALL_DIR_DDB): LOGGER.info( 'Downloading and installing local DynamoDB server. This may take some time.' ) mkdir(INSTALL_DIR_DDB) # download and extract archive download_and_extract_with_retry(DYNAMODB_JAR_URL, TMP_ARCHIVE_DDB, INSTALL_DIR_DDB) # fix for Alpine, otherwise DynamoDBLocal fails with: # DynamoDBLocal_lib/libsqlite4java-linux-amd64.so: __memcpy_chk: symbol not found if is_alpine(): ddb_libs_dir = '%s/DynamoDBLocal_lib' % INSTALL_DIR_DDB patched_marker = '%s/alpine_fix_applied' % ddb_libs_dir if not os.path.exists(patched_marker): patched_lib = ( 'https://rawgit.com/bhuisgen/docker-alpine/master/alpine-dynamodb/' + 'rootfs/usr/local/dynamodb/DynamoDBLocal_lib/libsqlite4java-linux-amd64.so' ) patched_jar = ( 'https://rawgit.com/bhuisgen/docker-alpine/master/alpine-dynamodb/' + 'rootfs/usr/local/dynamodb/DynamoDBLocal_lib/sqlite4java.jar') run("curl -L -o %s/libsqlite4java-linux-amd64.so '%s'" % (ddb_libs_dir, patched_lib)) run("curl -L -o %s/sqlite4java.jar '%s'" % (ddb_libs_dir, patched_jar)) save_file(patched_marker, '')
def get_machine_id(): global MACHINE_ID if MACHINE_ID: return MACHINE_ID # determine MACHINE_ID from config files configs_map = {} config_file_tmp = get_config_file_tempdir() config_file_home = get_config_file_homedir() for config_file in (config_file_home, config_file_tmp): if config_file: local_configs = load_file(config_file) local_configs = json.loads(to_str(local_configs)) configs_map[config_file] = local_configs if 'machine_id' in local_configs: MACHINE_ID = local_configs['machine_id'] break # if we can neither find NOR create the config files, fall back to process id if not configs_map: return PROCESS_ID # assign default id if empty if not MACHINE_ID: MACHINE_ID = short_uid() # update MACHINE_ID in all config files for config_file, configs in configs_map.items(): configs['machine_id'] = MACHINE_ID save_file(config_file, json.dumps(configs)) return MACHINE_ID
def install_dynamodb_local(): if OVERWRITE_DDB_FILES_IN_DOCKER and in_docker(): rm_rf(INSTALL_DIR_DDB) is_in_alpine = is_alpine() if not os.path.exists(INSTALL_PATH_DDB_JAR): log_install_msg('DynamoDB') # download and extract archive tmp_archive = os.path.join(tempfile.gettempdir(), 'localstack.ddb.zip') dynamodb_url = DYNAMODB_JAR_URL_ALPINE if is_in_alpine else DYNAMODB_JAR_URL download_and_extract_with_retry(dynamodb_url, tmp_archive, INSTALL_DIR_DDB) # fix logging configuration for DynamoDBLocal log4j2_config = """<Configuration status="WARN"> <Appenders> <Console name="Console" target="SYSTEM_OUT"> <PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/> </Console> </Appenders> <Loggers> <Root level="WARN"><AppenderRef ref="Console"/></Root> </Loggers> </Configuration>""" log4j2_file = os.path.join(INSTALL_DIR_DDB, 'log4j2.xml') save_file(log4j2_file, log4j2_config) run('cd "%s" && zip -u DynamoDBLocal.jar log4j2.xml || true' % INSTALL_DIR_DDB)
def set_archive_code(code, lambda_name, zip_file_content=None): # get metadata lambda_arn = func_arn(lambda_name) lambda_details = arn_to_lambda[lambda_arn] is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL # Stop/remove any containers that this arn uses. LAMBDA_EXECUTOR.cleanup(lambda_arn) if is_local_mount: # Mount or use a local folder lambda executors can reference # WARNING: this means we're pointing lambda_cwd to a local path in the user's # file system! We must ensure that there is no data loss (i.e., we must *not* add # this folder to TMP_FILES or similar). return code['S3Key'] # get file content zip_file_content = zip_file_content or get_zip_bytes(code) # Save the zip file to a temporary file that the lambda executors can reference code_sha_256 = base64.standard_b64encode(hashlib.sha256(zip_file_content).digest()) lambda_details.get_version('$LATEST')['CodeSize'] = len(zip_file_content) lambda_details.get_version('$LATEST')['CodeSha256'] = code_sha_256.decode('utf-8') tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid()) mkdir(tmp_dir) tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME) save_file(tmp_file, zip_file_content) TMP_FILES.append(tmp_dir) lambda_details.cwd = tmp_dir return tmp_dir
def start_sqs(port=None, asynchronous=False, update_listener=None): port = port or config.PORT_SQS install_elasticmq() backend_port = DEFAULT_PORT_SQS_BACKEND # create config file config_params = """ include classpath("application.conf") node-address { protocol = http host = "%s" port = %s context-path = "" } rest-sqs { enabled = true bind-port = %s bind-hostname = "0.0.0.0" sqs-limits = strict } """ % (LOCALSTACK_HOSTNAME, port, backend_port) config_file = os.path.join(TMP_FOLDER, 'sqs.%s.conf' % short_uid()) TMP_FILES.append(config_file) save_file(config_file, config_params) # start process cmd = ('java -Dconfig.file=%s -Xmx%s -jar %s/elasticmq-server.jar' % (config_file, MAX_HEAP_SIZE, INSTALL_DIR_ELASTICMQ)) print('Starting mock SQS (%s port %s)...' % (get_service_protocol(), port)) start_proxy_for_service('sqs', port, backend_port, update_listener) return do_run(cmd, asynchronous)
def get_java_handler(zip_file_content, handler, main_file): """Creates a Java handler from an uploaded ZIP or JAR. :type zip_file_content: bytes :param zip_file_content: ZIP file bytes. :type handler: str :param handler: The lambda handler path. :type main_file: str :param main_file: Filepath to the uploaded ZIP or JAR file. :returns: function or flask.Response """ if not is_jar_archive(zip_file_content): with zipfile.ZipFile(BytesIO(zip_file_content)) as zip_ref: jar_entries = [e for e in zip_ref.infolist() if e.filename.endswith('.jar')] if len(jar_entries) != 1: raise ClientError('Expected exactly one *.jar entry in zip file, found %s' % len(jar_entries)) zip_file_content = zip_ref.read(jar_entries[0].filename) LOG.info('Found jar file %s with %s bytes in Lambda zip archive' % (jar_entries[0].filename, len(zip_file_content))) main_file = new_tmp_file() save_file(main_file, zip_file_content) if is_jar_archive(zip_file_content): def execute(event, context): result, log_output = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda( event, context, handler=handler, main_file=main_file) return result return execute, zip_file_content raise ClientError(error_response( 'Unable to extract Java Lambda handler - file is not a valid zip/jar file', 400, error_type='ValidationError'))
def get_lambda_code_param(params, _include_arch=False, **kwargs): code = params.get("Code", {}) zip_file = code.get("ZipFile") if zip_file and not is_base64(zip_file) and not is_zip_file( to_bytes(zip_file)): tmp_dir = new_tmp_dir() handler_file = get_handler_file_from_name( params["Handler"], runtime=params["Runtime"]) tmp_file = os.path.join(tmp_dir, handler_file) save_file(tmp_file, zip_file) # add 'cfn-response' module to archive - see: # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html cfn_response_tmp_file = get_cfn_response_mod_file() cfn_response_mod_dir = os.path.join(tmp_dir, "node_modules", "cfn-response") mkdir(cfn_response_mod_dir) cp_r( cfn_response_tmp_file, os.path.join(cfn_response_mod_dir, "index.js"), ) # create zip file zip_file = create_zip_file(tmp_dir, get_content=True) code["ZipFile"] = zip_file rm_rf(tmp_dir) if _include_arch and "Architectures" in params: code["Architectures"] = params.get("Architectures") return code
def install_dynamodb_local(): if not os.path.exists(INSTALL_DIR_DDB): LOGGER.info('Downloading and installing local DynamoDB server. This may take some time.') mkdir(INSTALL_DIR_DDB) # download and extract archive download_and_extract_with_retry(DYNAMODB_JAR_URL, TMP_ARCHIVE_DDB, INSTALL_DIR_DDB) # fix for Alpine, otherwise DynamoDBLocal fails with: # DynamoDBLocal_lib/libsqlite4java-linux-amd64.so: __memcpy_chk: symbol not found if is_alpine(): ddb_libs_dir = '%s/DynamoDBLocal_lib' % INSTALL_DIR_DDB patched_marker = '%s/alpine_fix_applied' % ddb_libs_dir if not os.path.exists(patched_marker): patched_lib = ('https://rawgit.com/bhuisgen/docker-alpine/master/alpine-dynamodb/' + 'rootfs/usr/local/dynamodb/DynamoDBLocal_lib/libsqlite4java-linux-amd64.so') patched_jar = ('https://rawgit.com/bhuisgen/docker-alpine/master/alpine-dynamodb/' + 'rootfs/usr/local/dynamodb/DynamoDBLocal_lib/sqlite4java.jar') run("curl -L -o %s/libsqlite4java-linux-amd64.so '%s'" % (ddb_libs_dir, patched_lib)) run("curl -L -o %s/sqlite4java.jar '%s'" % (ddb_libs_dir, patched_jar)) save_file(patched_marker, '') # fix logging configuration for DynamoDBLocal log4j2_config = """<Configuration status="WARN"> <Appenders> <Console name="Console" target="SYSTEM_OUT"> <PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/> </Console> </Appenders> <Loggers> <Root level="WARN"><AppenderRef ref="Console"/></Root> </Loggers> </Configuration>""" log4j2_file = os.path.join(INSTALL_DIR_DDB, 'log4j2.xml') save_file(log4j2_file, log4j2_config) run('cd "%s" && zip -u DynamoDBLocal.jar log4j2.xml || true' % INSTALL_DIR_DDB)
def create_lambda_archive(script, get_content=False, libs=[], runtime=None, file_name=None): """Utility method to create a Lambda function archive""" tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX) TMP_FILES.append(tmp_dir) file_name = file_name or get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER, runtime=runtime) script_file = os.path.join(tmp_dir, file_name) if os.path.sep in script_file: mkdir(os.path.dirname(script_file)) save_file(script_file, script) # copy libs for lib in libs: paths = [lib, '%s.py' % lib] target_dir = tmp_dir root_folder = os.path.join(LOCALSTACK_VENV_FOLDER, 'lib/python*/site-packages') if lib == 'localstack': paths = ['localstack/*.py', 'localstack/utils'] root_folder = LOCALSTACK_ROOT_FOLDER target_dir = os.path.join(tmp_dir, lib) mkdir(target_dir) for path in paths: file_to_copy = os.path.join(root_folder, path) for file_path in glob.glob(file_to_copy): name = os.path.join(target_dir, file_path.split(os.path.sep)[-1]) if os.path.isdir(file_path): shutil.copytree(file_path, name) else: shutil.copyfile(file_path, name) # create zip file return create_zip_file(tmp_dir, get_content=get_content)
def _do_start_ssl_proxy_with_client_auth(port: int, target: str, client_cert_key: Tuple[str, str]): base_url = f"{'https://' if '://' not in target else ''}{target.rstrip('/')}" # prepare cert files (TODO: check whether/how we can pass cert strings to requests.request(..) directly) cert_file = client_cert_key[0] if not os.path.exists(cert_file): cert_file = new_tmp_file() save_file(cert_file, client_cert_key[0]) key_file = client_cert_key[1] if not os.path.exists(key_file): key_file = new_tmp_file() save_file(key_file, client_cert_key[1]) cert_params = (cert_file, key_file) # define forwarding listener class Listener(ProxyListener): def forward_request(self, method, path, data, headers): url = f"{base_url}{path}" result = requests.request(method=method, url=url, data=data, headers=headers, cert=cert_params, verify=False) return result proxy_thread = start_proxy_server(port, update_listener=Listener(), use_ssl=True) return proxy_thread
def generate_processor_script(events_file, log_file=None): script_file = os.path.join(tempfile.gettempdir(), 'kclipy.%s.processor.py' % short_uid()) if log_file: log_file = "'%s'" % log_file else: log_file = 'None' content = """#!/usr/bin/env python import os, sys, glob, json, socket, time, logging, subprocess, tempfile logging.basicConfig(level=logging.INFO) for path in glob.glob('%s/lib/python*/site-packages'): sys.path.insert(0, path) sys.path.insert(0, '%s') from localstack.config import DEFAULT_ENCODING from localstack.utils.kinesis import kinesis_connector from localstack.utils.common import timestamp events_file = '%s' log_file = %s error_log = os.path.join(tempfile.gettempdir(), 'kclipy.error.log') if __name__ == '__main__': sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) num_tries = 3 sleep_time = 2 error = None for i in range(0, num_tries): try: sock.connect(events_file) error = None break except Exception as e: error = e if i < num_tries: msg = '%%s: Unable to connect to UNIX socket. Retrying.' %% timestamp() subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True) time.sleep(sleep_time) if error: print("WARN: Unable to connect to UNIX socket after retrying: %%s" %% error) raise error def receive_msg(records, checkpointer, shard_id): try: # records is a list of amazon_kclpy.messages.Record objects -> convert to JSON records_dicts = [j._json_dict for j in records] message_to_send = {'shard_id': shard_id, 'records': records_dicts} string_to_send = '%%s\\n' %% json.dumps(message_to_send) bytes_to_send = string_to_send.encode(DEFAULT_ENCODING) sock.send(bytes_to_send) except Exception as e: msg = "WARN: Unable to forward event: %%s" %% e print(msg) subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True) kinesis_connector.KinesisProcessor.run_processor(log_file=log_file, processor_func=receive_msg) """ % (LOCALSTACK_VENV_FOLDER, LOCALSTACK_ROOT_FOLDER, events_file, log_file) save_file(script_file, content) chmod_r(script_file, 0o755) TMP_FILES.append(script_file) return script_file
def _execute(self, func_arn, func_details, event, context=None, version=None): lambda_cwd = func_details.cwd runtime = func_details.runtime handler = func_details.handler environment = func_details.envvars.copy() # configure USE_SSL in environment if config.USE_SSL: environment['USE_SSL'] = '1' # prepare event body if not event: LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn) event = {} event_body = json.dumps(json_safe(event)) stdin = self.prepare_event(environment, event_body) docker_host = config.DOCKER_HOST_FROM_CONTAINER environment['HOSTNAME'] = docker_host environment['LOCALSTACK_HOSTNAME'] = docker_host environment['_HANDLER'] = handler if func_details.timeout: environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout) if context: environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn # custom command to execute in the container command = '' # if running a Java Lambda, set up classpath arguments if is_java_lambda(runtime): java_opts = Util.get_java_opts() stdin = None # copy executor jar into temp directory target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR)) if not os.path.exists(target_file): cp_r(LAMBDA_EXECUTOR_JAR, target_file) # TODO cleanup once we have custom Java Docker image taskdir = '/var/task' save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body) classpath = Util.get_java_classpath(target_file) command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" % (taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE)) # accept any self-signed certificates for outgoing calls from the Lambda if is_nodejs_runtime(runtime): environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0' # determine the command to be executed (implemented by subclasses) cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd) # lambci writes the Lambda result to stdout and logs to stderr, fetch it from there! LOG.info('Running lambda cmd: %s' % cmd) result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details) return result
def create_lambda_archive( script: str, get_content: bool = False, libs: List[str] = None, runtime: str = None, file_name: str = None, exclude_func: Callable[[str], bool] = None, ): """Utility method to create a Lambda function archive""" if libs is None: libs = [] runtime = runtime or LAMBDA_DEFAULT_RUNTIME with tempfile.TemporaryDirectory(prefix=ARCHIVE_DIR_PREFIX) as tmp_dir: file_name = file_name or get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER, runtime=runtime) script_file = os.path.join(tmp_dir, file_name) if os.path.sep in script_file: mkdir(os.path.dirname(script_file)) # create __init__.py files along the path to allow Python imports path = file_name.split(os.path.sep) for i in range(1, len(path)): save_file(os.path.join(tmp_dir, *(path[:i] + ["__init__.py"])), "") save_file(script_file, script) chmod_r(script_file, 0o777) # copy libs for lib in libs: paths = [lib, "%s.py" % lib] try: module = importlib.import_module(lib) paths.append(module.__file__) except Exception: pass target_dir = tmp_dir root_folder = os.path.join(LOCALSTACK_VENV_FOLDER, "lib/python*/site-packages") if lib == "localstack": paths = ["localstack/*.py", "localstack/utils"] root_folder = LOCALSTACK_ROOT_FOLDER target_dir = os.path.join(tmp_dir, lib) mkdir(target_dir) for path in paths: file_to_copy = path if path.startswith("/") else os.path.join(root_folder, path) for file_path in glob.glob(file_to_copy): name = os.path.join(target_dir, file_path.split(os.path.sep)[-1]) if os.path.isdir(file_path): copy_dir(file_path, name) else: shutil.copyfile(file_path, name) if exclude_func: for dirpath, folders, files in os.walk(tmp_dir): for name in list(folders) + list(files): full_name = os.path.join(dirpath, name) relative = os.path.relpath(full_name, start=tmp_dir) if exclude_func(relative): rm_rf(full_name) # create zip file result = create_zip_file(tmp_dir, get_content=get_content) return result
def get_or_create_file(config_file): if os.path.exists(config_file): return config_file try: save_file(config_file, '{}') return config_file except Exception: pass
def test_get_java_lib_folder_classpath_no_directories(self): base_dir = new_tmp_dir() jar_file = os.path.join(base_dir, 'foo.jar') save_file(jar_file, '') lib_file = os.path.join(base_dir, 'lib', 'lib.jar') mkdir(os.path.dirname(lib_file)) save_file(lib_file, '') self.assertEquals('.:foo.jar:lib/lib.jar', lambda_executors.Util.get_java_classpath(jar_file))
def generate_processor_script(events_file, log_file=None): script_file = os.path.join(tempfile.gettempdir(), 'kclipy.%s.processor.py' % short_uid()) if log_file: log_file = "'%s'" % log_file else: log_file = 'None' content = """#!/usr/bin/env python import os, sys, glob, json, socket, time, logging, tempfile import subprocess32 as subprocess logging.basicConfig(level=logging.INFO) for path in glob.glob('%s/lib/python*/site-packages'): sys.path.insert(0, path) sys.path.insert(0, '%s') from localstack.config import DEFAULT_ENCODING from localstack.utils.kinesis import kinesis_connector from localstack.utils.common import timestamp events_file = '%s' log_file = %s error_log = os.path.join(tempfile.gettempdir(), 'kclipy.error.log') if __name__ == '__main__': sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) num_tries = 3 sleep_time = 2 error = None for i in range(0, num_tries): try: sock.connect(events_file) error = None break except Exception as e: error = e if i < num_tries: msg = '%%s: Unable to connect to UNIX socket. Retrying.' %% timestamp() subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True) time.sleep(sleep_time) if error: print("WARN: Unable to connect to UNIX socket after retrying: %%s" %% error) raise error def receive_msg(records, checkpointer, shard_id): try: # records is a list of amazon_kclpy.messages.Record objects -> convert to JSON records_dicts = [j._json_dict for j in records] message_to_send = {'shard_id': shard_id, 'records': records_dicts} string_to_send = '%%s\\n' %% json.dumps(message_to_send) bytes_to_send = string_to_send.encode(DEFAULT_ENCODING) sock.send(bytes_to_send) except Exception as e: msg = "WARN: Unable to forward event: %%s" %% e print(msg) subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True) kinesis_connector.KinesisProcessor.run_processor(log_file=log_file, processor_func=receive_msg) """ % (LOCALSTACK_VENV_FOLDER, LOCALSTACK_ROOT_FOLDER, events_file, log_file) save_file(script_file, content) chmod_r(script_file, 0o755) TMP_FILES.append(script_file) return script_file
def test_save_load_file(tmp_path): file_name = tmp_path / ("normal_permissions_%s" % short_uid()) content = "some_content_%s" % short_uid() more_content = "some_more_content_%s" % short_uid() save_file(file_name, content) assert content == load_file(file_name) save_file(file_name, more_content, append=True) assert content + more_content == load_file(file_name)
def execute(event, context): event_file = EVENT_FILE_PATTERN.replace('*', short_uid()) save_file(event_file, json.dumps(event)) TMP_FILES.append(event_file) class_name = arn_to_lambda[arn].handler.split('::')[0] classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file) cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file) result, log_output = run_lambda_executor(cmd) LOG.info('Lambda output: %s' % log_output.replace('\n', '\n> ')) return result
def _execute(self, func_arn, func_details, event, context=None, version=None): lambda_cwd = func_details.cwd runtime = func_details.runtime handler = func_details.handler environment = func_details.envvars.copy() # configure USE_SSL in environment if config.USE_SSL: environment['USE_SSL'] = '1' # prepare event body if not event: LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn) event = {} event_body = json.dumps(json_safe(event)) stdin = self.prepare_event(environment, event_body) docker_host = config.DOCKER_HOST_FROM_CONTAINER environment['HOSTNAME'] = docker_host environment['LOCALSTACK_HOSTNAME'] = docker_host if context: environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn # custom command to execute in the container command = '' # if running a Java Lambda, set up classpath arguments if runtime == LAMBDA_RUNTIME_JAVA8: java_opts = Util.get_java_opts() stdin = None # copy executor jar into temp directory target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR)) if not os.path.exists(target_file): cp_r(LAMBDA_EXECUTOR_JAR, target_file) # TODO cleanup once we have custom Java Docker image taskdir = '/var/task' save_file(os.path.join(lambda_cwd, LAMBDA_EVENT_FILE), event_body) classpath = Util.get_java_classpath(target_file) command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" % (taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, LAMBDA_EVENT_FILE)) # determine the command to be executed (implemented by subclasses) cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd) # lambci writes the Lambda result to stdout and logs to stderr, fetch it from there! LOG.debug('Running lambda cmd: %s' % cmd) result, log_output = self.run_lambda_executor(cmd, stdin, environment) log_formatted = log_output.strip().replace('\n', '\n> ') LOG.debug('Lambda %s result / log output:\n%s\n>%s' % (func_arn, result.strip(), log_formatted)) return result, log_output
def execute_java_lambda(self, event, context, handler, main_file): event_file = EVENT_FILE_PATTERN.replace('*', short_uid()) save_file(event_file, json.dumps(event)) TMP_FILES.append(event_file) class_name = handler.split('::')[0] classpath = '%s:%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file, Util.get_java_classpath(main_file)) cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file) result, log_output = self.run_lambda_executor(cmd) LOG.debug('Lambda result / log output:\n%s\n> %s' % ( result.strip(), log_output.strip().replace('\n', '\n> '))) return result, log_output
def _dump_events_to_files(events_with_added_uuid): try: _create_and_register_temp_dir() current_time_millis = int(round(time.time() * 1000)) for event in events_with_added_uuid: target = os.path.join( _get_events_tmp_dir(), "%s_%s" % (current_time_millis, event["uuid"]) ) save_file(target, json.dumps(event["event"])) except Exception as e: LOG.info("Unable to dump events to tmp dir %s: %s", _get_events_tmp_dir(), e)
def test_get_java_lib_folder_classpath_no_directories(self): base_dir = new_tmp_dir() jar_file = os.path.join(base_dir, "foo.jar") save_file(jar_file, "") lib_file = os.path.join(base_dir, "lib", "lib.jar") mkdir(os.path.dirname(lib_file)) save_file(lib_file, "") classpath = lambda_executors.Util.get_java_classpath(jar_file) self.assertIn(":foo.jar", classpath) self.assertIn("lib/lib.jar:", classpath) self.assertIn(":*.jar", classpath)
def execute_java_lambda(self, event, context, main_file, func_details=None): handler = func_details.handler opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else '' event_file = EVENT_FILE_PATTERN.replace('*', short_uid()) save_file(event_file, json.dumps(json_safe(event))) TMP_FILES.append(event_file) class_name = handler.split('::')[0] classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR) cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file) LOG.warning(cmd) result = self.run_lambda_executor(cmd, func_details=func_details) return result
def test_save_load_file_with_permissions(tmp_path): file_name = tmp_path / ("special_permissions_%s" % short_uid()) content = "some_content_%s" % short_uid() more_content = "some_more_content_%s" % short_uid() permissions = 0o600 save_file(file_name, content, permissions=permissions) assert permissions == os.stat(file_name).st_mode & 0o777 assert content == load_file(file_name) save_file(file_name, more_content, append=True) assert permissions == os.stat(file_name).st_mode & 0o777 assert content + more_content == load_file(file_name)
def get_lambda_code_param(params, **kwargs): code = params.get('Code', {}) zip_file = code.get('ZipFile') if zip_file and not common.is_base64(zip_file): tmp_dir = common.new_tmp_dir() handler_file = get_handler_file_from_name(params['Handler'], runtime=params['Runtime']) tmp_file = os.path.join(tmp_dir, handler_file) common.save_file(tmp_file, zip_file) zip_file = create_zip_file(tmp_file, get_content=True) code['ZipFile'] = zip_file common.rm_rf(tmp_dir) return code
def get_local_config_file(): file_name = '.localstack' home_dir = expanduser("~") for folder in (home_dir, TMP_FOLDER): config_file = os.path.join(folder, file_name) if os.path.exists(config_file): return config_file try: save_file(config_file, '{}') return config_file except Exception as e: pass
def create_zip_file(file_path, zip_file=None, get_content=False, content_root=None, mode="w"): """ Creates a zipfile to the designated file_path. By default, a new zip file is created but the mode parameter can be used to append to an existing zip file """ base_dir = file_path if not os.path.isdir(file_path): base_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX) shutil.copy(file_path, base_dir) TMP_FILES.append(base_dir) tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX) full_zip_file = zip_file if not full_zip_file: zip_file_name = "archive.zip" full_zip_file = os.path.join(tmp_dir, zip_file_name) # special case where target folder is empty -> create empty zip file if is_empty_dir(base_dir): # see https://stackoverflow.com/questions/25195495/how-to-create-an-empty-zip-file#25195628 content = ( b"PK\x05\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" ) if get_content: return content save_file(full_zip_file, content) return full_zip_file # create zip file if is_alpine(): # todo: extend CLI with the new parameters create_zip_file_cli(source_path=file_path, base_dir=base_dir, zip_file=full_zip_file) else: create_zip_file_python( source_path=file_path, base_dir=base_dir, zip_file=full_zip_file, content_root=content_root, mode=mode, ) if not get_content: TMP_FILES.append(tmp_dir) return full_zip_file with open(full_zip_file, "rb") as file_obj: zip_file_content = file_obj.read() rm_dir(tmp_dir) return zip_file_content
def download_and_extract(): if not os.path.exists(tmp_archive): # create temporary placeholder file, to avoid duplicate parallel downloads save_file(tmp_archive, '') download(archive_url, tmp_archive) _, ext = os.path.splitext(tmp_archive) if ext == '.zip': unzip(tmp_archive, target_dir) elif ext == '.gz' or ext == '.bz2': untar(tmp_archive, target_dir) else: raise Exception('Unsupported archive format: %s' % ext)
def install_elasticsearch(): if not os.path.exists(INSTALL_DIR_ES): log_install_msg('Elasticsearch') mkdir(INSTALL_DIR_INFRA) # download and extract archive tmp_archive = os.path.join(tempfile.gettempdir(), 'localstack.es.zip') download_and_extract_with_retry(ELASTICSEARCH_JAR_URL, tmp_archive, INSTALL_DIR_INFRA) elasticsearch_dir = glob.glob( os.path.join(INSTALL_DIR_INFRA, 'elasticsearch*')) if not elasticsearch_dir: raise Exception('Unable to find Elasticsearch folder in %s' % INSTALL_DIR_INFRA) shutil.move(elasticsearch_dir[0], INSTALL_DIR_ES) for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'): dir_path = '%s/%s' % (INSTALL_DIR_ES, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777) # install default plugins for plugin in ELASTICSEARCH_PLUGIN_LIST: if is_alpine(): # https://github.com/pires/docker-elasticsearch/issues/56 os.environ['ES_TMPDIR'] = '/tmp' plugin_binary = os.path.join(INSTALL_DIR_ES, 'bin', 'elasticsearch-plugin') print('install elasticsearch-plugin %s' % (plugin)) run('%s install -b %s' % (plugin_binary, plugin)) # delete some plugins to free up space for plugin in ELASTICSEARCH_DELETE_MODULES: module_dir = os.path.join(INSTALL_DIR_ES, 'modules', plugin) rm_rf(module_dir) # disable x-pack-ml plugin (not working on Alpine) xpack_dir = os.path.join(INSTALL_DIR_ES, 'modules', 'x-pack-ml', 'platform') rm_rf(xpack_dir) # patch JVM options file - replace hardcoded heap size settings jvm_options_file = os.path.join(INSTALL_DIR_ES, 'config', 'jvm.options') if os.path.exists(jvm_options_file): jvm_options = load_file(jvm_options_file) jvm_options_replaced = re.sub(r'(^-Xm[sx][a-zA-Z0-9\.]+$)', r'# \1', jvm_options, flags=re.MULTILINE) if jvm_options != jvm_options_replaced: save_file(jvm_options_file, jvm_options_replaced)
def execute_java_lambda(self, event, context, handler, main_file): event_file = EVENT_FILE_PATTERN.replace('*', short_uid()) save_file(event_file, json.dumps(event)) TMP_FILES.append(event_file) class_name = handler.split('::')[0] classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file) cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file) async = False # flip async flag depending on origin if 'Records' in event: # TODO: add more event supporting async lambda execution if 'Sns' in event['Records'][0]: async = True if 'dynamodb' in event['Records'][0]: async = True result, log_output = self.run_lambda_executor(cmd, async=async)
def create_config_file(config_file, executableName, streamName, applicationName, credentialsProvider=None, **kwargs): if not credentialsProvider: credentialsProvider = 'DefaultAWSCredentialsProviderChain' content = """ executableName = %s streamName = %s applicationName = %s AWSCredentialsProvider = %s processingLanguage = python/2.7 regionName = us-east-1 """ % (executableName, streamName, applicationName, credentialsProvider) # optional properties for key, value in kwargs.iteritems(): content += """ %s = %s""" % (key, value) content = content.replace(' ', '') save_file(config_file, content)
def create_lambda_archive(script, stream=None, get_content=False, libs=[], runtime=None): """Utility method to create a Lambda function archive""" tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX) TMP_FILES.append(tmp_dir) file_name = get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER, runtime=runtime) script_file = '%s/%s' % (tmp_dir, file_name) save_file(script_file, script) # copy libs for lib in libs: paths = [lib, '%s.py' % lib] target_dir = tmp_dir root_folder = '%s/lib/python*/site-packages' % LOCALSTACK_VENV_FOLDER if lib == 'localstack': paths = ['localstack/*.py', 'localstack/utils'] root_folder = LOCALSTACK_ROOT_FOLDER target_dir = '%s/%s/' % (tmp_dir, lib) mkdir(target_dir) for path in paths: file_to_copy = '%s/%s' % (root_folder, path) for file_path in glob.glob(file_to_copy): run('cp -r %s %s/' % (file_path, target_dir)) # create zip file return create_zip_file(tmp_dir, get_content=True)
def log(self, s): s = '%s\n' % s if self.log_file: save_file(self.log_file, s, append=True)
def set_function_code(code, lambda_name): def generic_handler(event, context): raise Exception(('Unable to find executor for Lambda function "%s". ' + 'Note that Node.js and .NET Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name) lambda_handler = generic_handler lambda_cwd = None arn = func_arn(lambda_name) runtime = arn_to_lambda[arn].runtime handler_name = arn_to_lambda.get(arn).handler lambda_environment = arn_to_lambda.get(arn).envvars if not handler_name: handler_name = LAMBDA_DEFAULT_HANDLER handler_file = get_handler_file_from_name(handler_name, runtime=runtime) handler_function = get_handler_function_from_name(handler_name, runtime=runtime) # Stop/remove any containers that this arn uses. LAMBDA_EXECUTOR.cleanup(arn) if 'S3Bucket' in code: s3_client = aws_stack.connect_to_service('s3') bytes_io = BytesIO() try: s3_client.download_fileobj(code['S3Bucket'], code['S3Key'], bytes_io) zip_file_content = bytes_io.getvalue() except Exception as e: return error_response('Unable to fetch Lambda archive from S3: %s' % e, 404) elif 'ZipFile' in code: zip_file_content = code['ZipFile'] zip_file_content = base64.b64decode(zip_file_content) else: return error_response('No valid Lambda archive specified.', 400) # save tmp file tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid()) mkdir(tmp_dir) tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME) save_file(tmp_file, zip_file_content) TMP_FILES.append(tmp_dir) lambda_cwd = tmp_dir # check if this is a ZIP file is_zip = is_zip_file(zip_file_content) if is_zip: unzip(tmp_file, tmp_dir) main_file = '%s/%s' % (tmp_dir, handler_file) if not os.path.isfile(main_file): # check if this is a zip file that contains a single JAR file jar_files = glob.glob('%s/*.jar' % tmp_dir) if len(jar_files) == 1: main_file = jar_files[0] if os.path.isfile(main_file): # make sure the file is actually readable, then read contents ensure_readable(main_file) with open(main_file, 'rb') as file_obj: zip_file_content = file_obj.read() else: file_list = run('ls -la %s' % tmp_dir) LOG.debug('Lambda archive content:\n%s' % file_list) return error_response('Unable to find handler script in Lambda archive.', 400, error_type='ValidationError') # it could be a JAR file (regardless of whether wrapped in a ZIP file or not) is_jar = is_jar_archive(zip_file_content) if is_jar: def execute(event, context): result, log_output = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(event, context, handler=arn_to_lambda[arn].handler, main_file=main_file) return result lambda_handler = execute elif runtime.startswith('python') and not use_docker(): try: lambda_handler = exec_lambda_code(zip_file_content, handler_function=handler_function, lambda_cwd=lambda_cwd, lambda_env=lambda_environment) except Exception as e: raise Exception('Unable to get handler function from lambda code.', e) if not is_zip and not is_jar: raise Exception('Uploaded Lambda code is neither a ZIP nor JAR file.') add_function_mapping(lambda_name, lambda_handler, lambda_cwd) return {'FunctionName': lambda_name}
LOGGER = logging.getLogger(__name__) def start_sqs(port=PORT_SQS, async=False, update_listener=None): install_elasticmq() backend_port = DEFAULT_PORT_SQS_BACKEND # create config file config = """ include classpath("application.conf") node-address { protocol = http host = "%s" port = %s context-path = "" } rest-sqs { enabled = true bind-port = %s bind-hostname = "0.0.0.0" sqs-limits = strict } """ % (LOCALSTACK_HOSTNAME, port, backend_port) config_file = os.path.join(TMP_FOLDER, 'sqs.%s.conf' % short_uid()) TMP_FILES.append(config_file) save_file(config_file, config) # start process cmd = ('java -Dconfig.file=%s -jar %s/elasticmq-server.jar' % (config_file, INSTALL_DIR_ELASTICMQ)) print('Starting mock SQS (%s port %s)...' % (get_service_protocol(), port)) start_proxy_for_service('sqs', port, backend_port, update_listener) return do_run(cmd, async)