Beispiel #1
0
def exec_lambda_code(script,
                     handler_function='handler',
                     lambda_cwd=None,
                     lambda_env=None):
    if lambda_cwd or lambda_env:
        exec_mutex.acquire()
        if lambda_cwd:
            previous_cwd = os.getcwd()
            os.chdir(lambda_cwd)
            sys.path = [lambda_cwd] + sys.path
        if lambda_env:
            previous_env = dict(os.environ)
            os.environ.update(lambda_env)
    # generate lambda file name
    lambda_id = 'l_%s' % short_uid()
    lambda_file = LAMBDA_SCRIPT_PATTERN.replace('*', lambda_id)
    save_file(lambda_file, script)
    # delete temporary .py and .pyc files on exit
    TMP_FILES.append(lambda_file)
    TMP_FILES.append('%sc' % lambda_file)
    try:
        handler_module = imp.load_source(lambda_id, lambda_file)
        module_vars = handler_module.__dict__
    except Exception as e:
        LOG.error('Unable to exec: %s %s' % (script, traceback.format_exc()))
        raise e
    finally:
        if lambda_cwd or lambda_env:
            if lambda_cwd:
                os.chdir(previous_cwd)
                sys.path.pop(0)
            if lambda_env:
                os.environ = previous_env
            exec_mutex.release()
    return module_vars[handler_function]
def start_sqs(port=PORT_SQS, asynchronous=False, update_listener=None):
    install_elasticmq()
    backend_port = DEFAULT_PORT_SQS_BACKEND
    # create config file
    config = """
    include classpath("application.conf")
    node-address {
        protocol = http
        host = "%s"
        port = %s
        context-path = ""
    }
    rest-sqs {
        enabled = true
        bind-port = %s
        bind-hostname = "0.0.0.0"
        sqs-limits = strict
    }
    """ % (LOCALSTACK_HOSTNAME, port, backend_port)
    config_file = os.path.join(TMP_FOLDER, 'sqs.%s.conf' % short_uid())
    TMP_FILES.append(config_file)
    save_file(config_file, config)
    # start process
    cmd = ('java -Dconfig.file=%s -jar %s/elasticmq-server.jar' % (config_file, INSTALL_DIR_ELASTICMQ))
    print('Starting mock SQS (%s port %s)...' % (get_service_protocol(), port))
    start_proxy_for_service('sqs', port, backend_port, update_listener)
    return do_run(cmd, asynchronous)
Beispiel #3
0
def set_archive_code(code, lambda_name, zip_file_content=None):

    # get metadata
    lambda_arn = func_arn(lambda_name)
    lambda_details = arn_to_lambda[lambda_arn]
    is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(lambda_arn)

    if is_local_mount:
        # Mount or use a local folder lambda executors can reference
        # WARNING: this means we're pointing lambda_cwd to a local path in the user's
        # file system! We must ensure that there is no data loss (i.e., we must *not* add
        # this folder to TMP_FILES or similar).
        return code['S3Key']

    # get file content
    zip_file_content = zip_file_content or get_zip_bytes(code)

    # Save the zip file to a temporary file that the lambda executors can reference
    code_sha_256 = base64.standard_b64encode(
        hashlib.sha256(zip_file_content).digest())
    lambda_details.get_version('$LATEST')['CodeSize'] = len(zip_file_content)
    lambda_details.get_version('$LATEST')['CodeSha256'] = code_sha_256.decode(
        'utf-8')
    tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
    mkdir(tmp_dir)
    tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
    save_file(tmp_file, zip_file_content)
    TMP_FILES.append(tmp_dir)
    lambda_details.cwd = tmp_dir
    return tmp_dir
Beispiel #4
0
def create_zip_file(file_path, get_content=False):
    base_dir = file_path
    if not os.path.isdir(file_path):
        base_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
        shutil.copy(file_path, base_dir)
        TMP_FILES.append(base_dir)
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    zip_file_name = 'archive.zip'
    full_zip_file = os.path.join(tmp_dir, zip_file_name)
    # create zip file
    with zipfile.ZipFile(full_zip_file, 'w') as zip_file:
        for root, dirs, files in os.walk(base_dir):
            for name in files:
                full_name = os.path.join(root, name)
                relative = root[len(base_dir):].lstrip(os.path.sep)
                dest = os.path.join(relative, name)
                zip_file.write(full_name, dest)
    if not get_content:
        TMP_FILES.append(tmp_dir)
        shutil.rmtree(tmp_dir)
        return full_zip_file
    zip_file_content = None
    with open(full_zip_file, 'rb') as file_obj:
        zip_file_content = file_obj.read()
    shutil.rmtree(tmp_dir)
    return zip_file_content
Beispiel #5
0
def exec_lambda_code(script, handler_function='handler', lambda_cwd=None, lambda_env=None):
    if lambda_cwd or lambda_env:
        exec_mutex.acquire()
        if lambda_cwd:
            previous_cwd = os.getcwd()
            os.chdir(lambda_cwd)
            sys.path = [lambda_cwd] + sys.path
        if lambda_env:
            previous_env = dict(os.environ)
            os.environ.update(lambda_env)
    # generate lambda file name
    lambda_id = 'l_%s' % short_uid()
    lambda_file = LAMBDA_SCRIPT_PATTERN.replace('*', lambda_id)
    save_file(lambda_file, script)
    # delete temporary .py and .pyc files on exit
    TMP_FILES.append(lambda_file)
    TMP_FILES.append('%sc' % lambda_file)
    try:
        handler_module = imp.load_source(lambda_id, lambda_file)
        module_vars = handler_module.__dict__
    except Exception as e:
        LOG.error('Unable to exec: %s %s' % (script, traceback.format_exc()))
        raise e
    finally:
        if lambda_cwd or lambda_env:
            if lambda_cwd:
                os.chdir(previous_cwd)
                sys.path.pop(0)
            if lambda_env:
                os.environ = previous_env
            exec_mutex.release()
    return module_vars[handler_function]
Beispiel #6
0
def start_sqs_elasticmq(port=None, asynchronous=False, update_listener=None):
    global PORT_SQS_BACKEND

    port = port or config.PORT_SQS
    install_elasticmq()
    PORT_SQS_BACKEND = get_free_tcp_port()
    # create config file
    config_params = """
    include classpath("application.conf")
    node-address {
        protocol = http
        host = "%s"
        port = %s
        context-path = ""
    }
    rest-sqs {
        enabled = true
        bind-port = %s
        bind-hostname = "0.0.0.0"
        sqs-limits = strict
    }
    """ % (LOCALSTACK_HOSTNAME, port, PORT_SQS_BACKEND)
    config_file = os.path.join(TMP_FOLDER, 'sqs.%s.conf' % short_uid())
    TMP_FILES.append(config_file)
    save_file(config_file, config_params)
    # start process
    cmd = ('java -Dconfig.file=%s -Xmx%s -jar %s/elasticmq-server.jar' %
           (config_file, MAX_HEAP_SIZE, INSTALL_DIR_ELASTICMQ))
    print(
        'Starting mock SQS service in %s ports %s (recommended) and %s (deprecated)...'
        % (get_service_protocol(), config.EDGE_PORT, port))
    start_proxy_for_service('sqs', port, PORT_SQS_BACKEND, update_listener)
    return do_run(cmd, asynchronous)
Beispiel #7
0
def create_lambda_archive(script, get_content=False, libs=[], runtime=None):
    """Utility method to create a Lambda function archive"""
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    TMP_FILES.append(tmp_dir)
    file_name = get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER,
                                           runtime=runtime)
    script_file = os.path.join(tmp_dir, file_name)
    save_file(script_file, script)
    # copy libs
    for lib in libs:
        paths = [lib, '%s.py' % lib]
        target_dir = tmp_dir
        root_folder = os.path.join(LOCALSTACK_VENV_FOLDER,
                                   'lib/python*/site-packages')
        if lib == 'localstack':
            paths = ['localstack/*.py', 'localstack/utils']
            root_folder = LOCALSTACK_ROOT_FOLDER
            target_dir = os.path.join(tmp_dir, lib)
            mkdir(target_dir)
        for path in paths:
            file_to_copy = os.path.join(root_folder, path)
            for file_path in glob.glob(file_to_copy):
                name = os.path.join(target_dir,
                                    file_path.split(os.path.sep)[-1])
                if os.path.isdir(file_path):
                    shutil.copytree(file_path, name)
                else:
                    shutil.copyfile(file_path, name)

    # create zip file
    return create_zip_file(tmp_dir, get_content=get_content)
Beispiel #8
0
def start_kcl_client_process(stream_name, listener_script, log_file=None, env=None, configs={},
        endpoint_url=None, ddb_lease_table_suffix=None, env_vars={}, region_name=None,
        kcl_log_level=DEFAULT_KCL_LOG_LEVEL, log_subscribers=[]):
    env = aws_stack.get_environment(env)
    # make sure to convert stream ARN to stream name
    stream_name = aws_stack.kinesis_stream_name(stream_name)
    # decide which credentials provider to use
    credentialsProvider = None
    if (('AWS_ASSUME_ROLE_ARN' in os.environ or 'AWS_ASSUME_ROLE_ARN' in env_vars) and
            ('AWS_ASSUME_ROLE_SESSION_NAME' in os.environ or 'AWS_ASSUME_ROLE_SESSION_NAME' in env_vars)):
        # use special credentials provider that can assume IAM roles and handle temporary STS auth tokens
        credentialsProvider = 'cloud.localstack.DefaultSTSAssumeRoleSessionCredentialsProvider'
        # pass through env variables to child process
        for var_name in ['AWS_ASSUME_ROLE_ARN', 'AWS_ASSUME_ROLE_SESSION_NAME',
                'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN']:
            if var_name in os.environ and var_name not in env_vars:
                env_vars[var_name] = os.environ[var_name]
    if aws_stack.is_local_env(env):
        # need to disable CBOR protocol, enforce use of plain JSON,
        # see https://github.com/mhart/kinesalite/issues/31
        env_vars['AWS_CBOR_DISABLE'] = 'true'
    if kcl_log_level or (len(log_subscribers) > 0):
        if not log_file:
            log_file = LOG_FILE_PATTERN.replace('*', short_uid())
            TMP_FILES.append(log_file)
        run('touch %s' % log_file)
        # start log output reader thread which will read the KCL log
        # file and print each line to stdout of this process...
        reader_thread = OutputReaderThread({'file': log_file, 'level': kcl_log_level,
            'log_prefix': 'KCL', 'log_subscribers': log_subscribers})
        reader_thread.start()

    # construct stream info
    stream_info = get_stream_info(stream_name, log_file, env=env, endpoint_url=endpoint_url,
        ddb_lease_table_suffix=ddb_lease_table_suffix, env_vars=env_vars)
    props_file = stream_info['properties_file']
    # set kcl config options
    kwargs = {
        'metricsLevel': 'NONE',
        'initialPositionInStream': 'LATEST'
    }
    # set parameters for local connection
    if aws_stack.is_local_env(env):
        kwargs['kinesisEndpoint'] = '%s:%s' % (HOSTNAME, config.PORT_KINESIS)
        kwargs['dynamodbEndpoint'] = '%s:%s' % (HOSTNAME, config.PORT_DYNAMODB)
        kwargs['kinesisProtocol'] = get_service_protocol()
        kwargs['dynamodbProtocol'] = get_service_protocol()
        kwargs['disableCertChecking'] = 'true'
    kwargs.update(configs)
    # create config file
    kclipy_helper.create_config_file(config_file=props_file, executableName=listener_script,
        streamName=stream_name, applicationName=stream_info['app_name'],
        credentialsProvider=credentialsProvider, region_name=region_name, **kwargs)
    TMP_FILES.append(props_file)
    # start stream consumer
    stream = KinesisStream(id=stream_name, params=stream_info)
    thread_consumer = KinesisProcessorThread.start_consumer(stream)
    TMP_THREADS.append(thread_consumer)
    return thread_consumer
def generate_processor_script(events_file, log_file=None):
    script_file = os.path.join(tempfile.gettempdir(),
                               'kclipy.%s.processor.py' % short_uid())
    if log_file:
        log_file = "'%s'" % log_file
    else:
        log_file = 'None'
    content = """#!/usr/bin/env python
import os, sys, glob, json, socket, time, logging, subprocess, tempfile
logging.basicConfig(level=logging.INFO)
for path in glob.glob('%s/lib/python*/site-packages'):
    sys.path.insert(0, path)
sys.path.insert(0, '%s')
from localstack.config import DEFAULT_ENCODING
from localstack.utils.kinesis import kinesis_connector
from localstack.utils.common import timestamp
events_file = '%s'
log_file = %s
error_log = os.path.join(tempfile.gettempdir(), 'kclipy.error.log')
if __name__ == '__main__':
    sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)

    num_tries = 3
    sleep_time = 2
    error = None
    for i in range(0, num_tries):
        try:
            sock.connect(events_file)
            error = None
            break
        except Exception as e:
            error = e
            if i < num_tries:
                msg = '%%s: Unable to connect to UNIX socket. Retrying.' %% timestamp()
                subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True)
                time.sleep(sleep_time)
    if error:
        print("WARN: Unable to connect to UNIX socket after retrying: %%s" %% error)
        raise error

    def receive_msg(records, checkpointer, shard_id):
        try:
            # records is a list of amazon_kclpy.messages.Record objects -> convert to JSON
            records_dicts = [j._json_dict for j in records]
            message_to_send = {'shard_id': shard_id, 'records': records_dicts}
            string_to_send = '%%s\\n' %% json.dumps(message_to_send)
            bytes_to_send = string_to_send.encode(DEFAULT_ENCODING)
            sock.send(bytes_to_send)
        except Exception as e:
            msg = "WARN: Unable to forward event: %%s" %% e
            print(msg)
            subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True)
    kinesis_connector.KinesisProcessor.run_processor(log_file=log_file, processor_func=receive_msg)
    """ % (LOCALSTACK_VENV_FOLDER, LOCALSTACK_ROOT_FOLDER, events_file,
           log_file)
    save_file(script_file, content)
    chmod_r(script_file, 0o755)
    TMP_FILES.append(script_file)
    return script_file
Beispiel #10
0
def generate_processor_script(events_file, log_file=None):
    script_file = os.path.join(tempfile.gettempdir(), 'kclipy.%s.processor.py' % short_uid())
    if log_file:
        log_file = "'%s'" % log_file
    else:
        log_file = 'None'
    content = """#!/usr/bin/env python
import os, sys, glob, json, socket, time, logging, tempfile
import subprocess32 as subprocess
logging.basicConfig(level=logging.INFO)
for path in glob.glob('%s/lib/python*/site-packages'):
    sys.path.insert(0, path)
sys.path.insert(0, '%s')
from localstack.config import DEFAULT_ENCODING
from localstack.utils.kinesis import kinesis_connector
from localstack.utils.common import timestamp
events_file = '%s'
log_file = %s
error_log = os.path.join(tempfile.gettempdir(), 'kclipy.error.log')
if __name__ == '__main__':
    sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)

    num_tries = 3
    sleep_time = 2
    error = None
    for i in range(0, num_tries):
        try:
            sock.connect(events_file)
            error = None
            break
        except Exception as e:
            error = e
            if i < num_tries:
                msg = '%%s: Unable to connect to UNIX socket. Retrying.' %% timestamp()
                subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True)
                time.sleep(sleep_time)
    if error:
        print("WARN: Unable to connect to UNIX socket after retrying: %%s" %% error)
        raise error

    def receive_msg(records, checkpointer, shard_id):
        try:
            # records is a list of amazon_kclpy.messages.Record objects -> convert to JSON
            records_dicts = [j._json_dict for j in records]
            message_to_send = {'shard_id': shard_id, 'records': records_dicts}
            string_to_send = '%%s\\n' %% json.dumps(message_to_send)
            bytes_to_send = string_to_send.encode(DEFAULT_ENCODING)
            sock.send(bytes_to_send)
        except Exception as e:
            msg = "WARN: Unable to forward event: %%s" %% e
            print(msg)
            subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True)
    kinesis_connector.KinesisProcessor.run_processor(log_file=log_file, processor_func=receive_msg)
    """ % (LOCALSTACK_VENV_FOLDER, LOCALSTACK_ROOT_FOLDER, events_file, log_file)
    save_file(script_file, content)
    chmod_r(script_file, 0o755)
    TMP_FILES.append(script_file)
    return script_file
Beispiel #11
0
def start_kcl_client_process(stream_name, listener_script, log_file=None, env=None, configs={},
        endpoint_url=None, ddb_lease_table_suffix=None, env_vars={},
        kcl_log_level=DEFAULT_KCL_LOG_LEVEL, log_subscribers=[]):
    env = aws_stack.get_environment(env)
    # decide which credentials provider to use
    credentialsProvider = None
    if (('AWS_ASSUME_ROLE_ARN' in os.environ or 'AWS_ASSUME_ROLE_ARN' in env_vars) and
            ('AWS_ASSUME_ROLE_SESSION_NAME' in os.environ or 'AWS_ASSUME_ROLE_SESSION_NAME' in env_vars)):
        # use special credentials provider that can assume IAM roles and handle temporary STS auth tokens
        credentialsProvider = 'com.atlassian.DefaultSTSAssumeRoleSessionCredentialsProvider'
        # pass through env variables to child process
        for var_name in ['AWS_ASSUME_ROLE_ARN', 'AWS_ASSUME_ROLE_SESSION_NAME',
                'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN']:
            if var_name in os.environ and var_name not in env_vars:
                env_vars[var_name] = os.environ[var_name]
    if env.region == REGION_LOCAL:
        # need to disable CBOR protocol, enforce use of plain JSON,
        # see https://github.com/mhart/kinesalite/issues/31
        env_vars['AWS_CBOR_DISABLE'] = 'true'
    if kcl_log_level or (len(log_subscribers) > 0):
        if not log_file:
            log_file = LOG_FILE_PATTERN.replace('*', short_uid())
            TMP_FILES.append(log_file)
        run('touch %s' % log_file)
        # start log output reader thread which will read the KCL log
        # file and print each line to stdout of this process...
        reader_thread = OutputReaderThread({'file': log_file, 'level': kcl_log_level,
            'log_prefix': 'KCL', 'log_subscribers': log_subscribers})
        reader_thread.start()

    # construct stream info
    stream_info = get_stream_info(stream_name, log_file, env=env, endpoint_url=endpoint_url,
        ddb_lease_table_suffix=ddb_lease_table_suffix, env_vars=env_vars)
    props_file = stream_info['properties_file']
    # set kcl config options
    kwargs = {
        'metricsLevel': 'NONE',
        'initialPositionInStream': 'LATEST'
    }
    # set parameters for local connection
    if env.region == REGION_LOCAL:
        kwargs['kinesisEndpoint'] = '%s:%s' % (HOSTNAME, config.PORT_KINESIS)
        kwargs['dynamodbEndpoint'] = '%s:%s' % (HOSTNAME, config.PORT_DYNAMODB)
        kwargs['kinesisProtocol'] = 'http%s' % ('s' if USE_SSL else '')
        kwargs['dynamodbProtocol'] = 'http%s' % ('s' if USE_SSL else '')
        kwargs['disableCertChecking'] = 'true'
    kwargs.update(configs)
    # create config file
    kclipy_helper.create_config_file(config_file=props_file, executableName=listener_script,
        streamName=stream_name, applicationName=stream_info['app_name'],
        credentialsProvider=credentialsProvider, **kwargs)
    TMP_FILES.append(props_file)
    # start stream consumer
    stream = KinesisStream(id=stream_name, params=stream_info)
    thread_consumer = KinesisProcessorThread.start_consumer(stream)
    TMP_THREADS.append(thread_consumer)
    return thread_consumer
 def execute(event, context):
     event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
     save_file(event_file, json.dumps(event))
     TMP_FILES.append(event_file)
     class_name = arn_to_lambda[arn].handler.split('::')[0]
     classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
     cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
     result, log_output = run_lambda_executor(cmd)
     LOG.info('Lambda output: %s' % log_output.replace('\n', '\n> '))
     return result
Beispiel #13
0
 def execute_java_lambda(self, event, context, handler, main_file):
     event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
     save_file(event_file, json.dumps(event))
     TMP_FILES.append(event_file)
     class_name = handler.split('::')[0]
     classpath = '%s:%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file, Util.get_java_classpath(main_file))
     cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
     result, log_output = self.run_lambda_executor(cmd)
     LOG.debug('Lambda result / log output:\n%s\n> %s' % (
         result.strip(), log_output.strip().replace('\n', '\n> ')))
     return result, log_output
Beispiel #14
0
 def __init__(self, params):
     props_file = params['properties_file']
     env_vars = params['env_vars']
     cmd = kclipy_helper.get_kcl_app_command('java',
         MULTI_LANG_DAEMON_CLASS, props_file)
     if not params['log_file']:
         params['log_file'] = '%s.log' % props_file
         TMP_FILES.append(params['log_file'])
     env = aws_stack.get_environment()
     quiet = aws_stack.is_local_env(env)
     ShellCommandThread.__init__(self, cmd, outfile=params['log_file'], env_vars=env_vars, quiet=quiet)
Beispiel #15
0
 def __init__(self, params):
     props_file = params['properties_file']
     env_vars = params['env_vars']
     cmd = kclipy_helper.get_kcl_app_command('java',
         MULTI_LANG_DAEMON_CLASS, props_file)
     if not params['log_file']:
         params['log_file'] = '%s.log' % props_file
         TMP_FILES.append(params['log_file'])
     # print(cmd)
     env = aws_stack.get_environment()
     quiet = env.region == REGION_LOCAL
     ShellCommandThread.__init__(self, cmd, outfile=params['log_file'], env_vars=env_vars, quiet=quiet)
 def execute_java_lambda(self, event, context, main_file, func_details=None):
     handler = func_details.handler
     opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
     event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
     save_file(event_file, json.dumps(json_safe(event)))
     TMP_FILES.append(event_file)
     class_name = handler.split('::')[0]
     classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)
     cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
     LOG.warning(cmd)
     result = self.run_lambda_executor(cmd, func_details=func_details)
     return result
Beispiel #17
0
 def __init__(self, params):
     props_file = params["properties_file"]
     env_vars = params["env_vars"]
     cmd = kclipy_helper.get_kcl_app_command("java", MULTI_LANG_DAEMON_CLASS, props_file)
     if not params["log_file"]:
         params["log_file"] = "%s.log" % props_file
         TMP_FILES.append(params["log_file"])
     env = aws_stack.get_environment()
     quiet = aws_stack.is_local_env(env)
     ShellCommandThread.__init__(
         self, cmd, outfile=params["log_file"], env_vars=env_vars, quiet=quiet
     )
Beispiel #18
0
def create_zip_file(file_path,
                    zip_file=None,
                    get_content=False,
                    content_root=None,
                    mode="w"):
    """
    Creates a zipfile to the designated file_path.

    By default, a new zip file is created but the mode parameter can be used to append to an existing zip file
    """
    base_dir = file_path
    if not os.path.isdir(file_path):
        base_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
        shutil.copy(file_path, base_dir)
        TMP_FILES.append(base_dir)
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    full_zip_file = zip_file
    if not full_zip_file:
        zip_file_name = "archive.zip"
        full_zip_file = os.path.join(tmp_dir, zip_file_name)
    # special case where target folder is empty -> create empty zip file
    if is_empty_dir(base_dir):
        # see https://stackoverflow.com/questions/25195495/how-to-create-an-empty-zip-file#25195628
        content = (
            b"PK\x05\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
        )
        if get_content:
            return content
        save_file(full_zip_file, content)
        return full_zip_file

    # create zip file
    if is_alpine():
        # todo: extend CLI with the new parameters
        create_zip_file_cli(source_path=file_path,
                            base_dir=base_dir,
                            zip_file=full_zip_file)
    else:
        create_zip_file_python(
            source_path=file_path,
            base_dir=base_dir,
            zip_file=full_zip_file,
            content_root=content_root,
            mode=mode,
        )
    if not get_content:
        TMP_FILES.append(tmp_dir)
        return full_zip_file
    with open(full_zip_file, "rb") as file_obj:
        zip_file_content = file_obj.read()
    rm_dir(tmp_dir)
    return zip_file_content
Beispiel #19
0
def listen_to_kinesis(stream_name, listener_func=None, processor_script=None,
        events_file=None, endpoint_url=None, log_file=None, configs={}, env=None,
        ddb_lease_table_suffix=None, env_vars={}, kcl_log_level=DEFAULT_KCL_LOG_LEVEL,
        log_subscribers=[], wait_until_started=False):
    """
    High-level function that allows to subscribe to a Kinesis stream
    and receive events in a listener function. A KCL client process is
    automatically started in the background.
    """
    env = aws_stack.get_environment(env)
    if not events_file:
        events_file = EVENTS_FILE_PATTERN.replace('*', short_uid())
        TMP_FILES.append(events_file)
    if not processor_script:
        processor_script = generate_processor_script(events_file, log_file=log_file)

    run('rm -f %s' % events_file)
    # start event reader thread (this process)
    ready_mutex = threading.Semaphore(0)
    thread = EventFileReaderThread(events_file, listener_func, ready_mutex=ready_mutex)
    thread.start()
    # Wait until the event reader thread is ready (to avoid 'Connection refused' error on the UNIX socket)
    ready_mutex.acquire()
    # start KCL client (background process)
    if processor_script[-4:] == '.pyc':
        processor_script = processor_script[0:-1]
    # add log listener that notifies when KCL is started
    if wait_until_started:
        listener = KclStartedLogListener()
        log_subscribers.append(listener)

    process = start_kcl_client_process(stream_name, processor_script,
        endpoint_url=endpoint_url, log_file=log_file, configs=configs, env=env,
        ddb_lease_table_suffix=ddb_lease_table_suffix, env_vars=env_vars, kcl_log_level=kcl_log_level,
        log_subscribers=log_subscribers)

    if wait_until_started:
        # Wait at most 90 seconds for initialization. Note that creating the DDB table can take quite a bit
        try:
            listener.sync_init.get(block=True, timeout=90)
        except Exception:
            raise Exception('Timeout when waiting for KCL initialization.')
        # wait at most 30 seconds for shard lease notification
        try:
            listener.sync_take_shard.get(block=True, timeout=30)
        except Exception:
            # this merely means that there is no shard available to take. Do nothing.
            pass

    return process
def listen_to_kinesis(stream_name, listener_func=None, processor_script=None,
        events_file=None, endpoint_url=None, log_file=None, configs={}, env=None,
        ddb_lease_table_suffix=None, env_vars={}, kcl_log_level=DEFAULT_KCL_LOG_LEVEL,
        log_subscribers=[], wait_until_started=False, fh_d_stream=None):
    """
    High-level function that allows to subscribe to a Kinesis stream
    and receive events in a listener function. A KCL client process is
    automatically started in the background.
    """
    env = aws_stack.get_environment(env)
    if not events_file:
        events_file = EVENTS_FILE_PATTERN.replace('*', short_uid())
        TMP_FILES.append(events_file)
    if not processor_script:
        processor_script = generate_processor_script(events_file, log_file=log_file)

    run('rm -f %s' % events_file)
    # start event reader thread (this process)
    ready_mutex = threading.Semaphore(0)
    thread = EventFileReaderThread(events_file, listener_func, ready_mutex=ready_mutex, fh_d_stream=fh_d_stream)
    thread.start()
    # Wait until the event reader thread is ready (to avoid 'Connection refused' error on the UNIX socket)
    ready_mutex.acquire()
    # start KCL client (background process)
    if processor_script[-4:] == '.pyc':
        processor_script = processor_script[0:-1]
    # add log listener that notifies when KCL is started
    if wait_until_started:
        listener = KclStartedLogListener()
        log_subscribers.append(listener)

    process = start_kcl_client_process(stream_name, processor_script,
        endpoint_url=endpoint_url, log_file=log_file, configs=configs, env=env,
        ddb_lease_table_suffix=ddb_lease_table_suffix, env_vars=env_vars, kcl_log_level=kcl_log_level,
        log_subscribers=log_subscribers)

    if wait_until_started:
        # Wait at most 90 seconds for initialization. Note that creating the DDB table can take quite a bit
        try:
            listener.sync_init.get(block=True, timeout=90)
        except Exception:
            raise Exception('Timeout when waiting for KCL initialization.')
        # wait at most 30 seconds for shard lease notification
        try:
            listener.sync_take_shard.get(block=True, timeout=30)
        except Exception:
            # this merely means that there is no shard available to take. Do nothing.
            pass

    return process
Beispiel #21
0
def create_lambda_archive(script,
                          get_content=False,
                          libs=[],
                          runtime=None,
                          file_name=None):
    """Utility method to create a Lambda function archive"""
    runtime = runtime or LAMBDA_DEFAULT_RUNTIME
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    TMP_FILES.append(tmp_dir)
    file_name = file_name or get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER,
                                                        runtime=runtime)
    script_file = os.path.join(tmp_dir, file_name)
    if os.path.sep in script_file:
        mkdir(os.path.dirname(script_file))
        # create __init__.py files along the path to allow Python imports
        path = file_name.split(os.path.sep)
        for i in range(1, len(path)):
            save_file(os.path.join(tmp_dir, *(path[:i] + ["__init__.py"])), "")
    save_file(script_file, script)
    chmod_r(script_file, 0o777)
    # copy libs
    for lib in libs:
        paths = [lib, "%s.py" % lib]
        try:
            module = importlib.import_module(lib)
            paths.append(module.__file__)
        except Exception:
            pass
        target_dir = tmp_dir
        root_folder = os.path.join(LOCALSTACK_VENV_FOLDER,
                                   "lib/python*/site-packages")
        if lib == "localstack":
            paths = ["localstack/*.py", "localstack/utils"]
            root_folder = LOCALSTACK_ROOT_FOLDER
            target_dir = os.path.join(tmp_dir, lib)
            mkdir(target_dir)
        for path in paths:
            file_to_copy = path if path.startswith("/") else os.path.join(
                root_folder, path)
            for file_path in glob.glob(file_to_copy):
                name = os.path.join(target_dir,
                                    file_path.split(os.path.sep)[-1])
                if os.path.isdir(file_path):
                    copy_dir(file_path, name)
                else:
                    shutil.copyfile(file_path, name)

    # create zip file
    result = create_zip_file(tmp_dir, get_content=get_content)
    return result
Beispiel #22
0
 def execute(event, context):
     event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
     save_file(event_file, json.dumps(event))
     TMP_FILES.append(event_file)
     class_name = arn_to_lambda[arn].handler.split('::')[0]
     classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
     cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
     async = False
     # flip async flag depending on origin
     if 'Records' in event:
         # TODO: add more event supporting async lambda execution
         if 'Sns' in event['Records'][0]:
             async = True
     result, log_output = run_lambda_executor(cmd, async=async)
Beispiel #23
0
 def execute_java_lambda(self, event, context, handler, main_file):
     event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
     save_file(event_file, json.dumps(event))
     TMP_FILES.append(event_file)
     class_name = handler.split('::')[0]
     classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
     cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
     async = False
     # flip async flag depending on origin
     if 'Records' in event:
         # TODO: add more event supporting async lambda execution
         if 'Sns' in event['Records'][0]:
             async = True
         if 'dynamodb' in event['Records'][0]:
             async = True
     result, log_output = self.run_lambda_executor(cmd, async=async)
 def execute_java_lambda(self, event, context, handler, main_file):
     event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
     save_file(event_file, json.dumps(event))
     TMP_FILES.append(event_file)
     class_name = handler.split('::')[0]
     classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
     cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
     asynchronous = False
     # flip asynchronous flag depending on origin
     if 'Records' in event:
         # TODO: add more event supporting asynchronous lambda execution
         if 'Sns' in event['Records'][0]:
             asynchronous = True
         if 'dynamodb' in event['Records'][0]:
             asynchronous = True
     result, log_output = self.run_lambda_executor(cmd, asynchronous=asynchronous)
     LOG.debug('Lambda result / log output:\n%s\n> %s' % (result.strip(), log_output.strip().replace('\n', '\n> ')))
     return result, log_output
Beispiel #25
0
def create_zip_file(file_path, include='*', get_content=False):
    base_dir = file_path
    if not os.path.isdir(file_path):
        base_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
        run('cp "%s" "%s"' % (file_path, base_dir))
        include = os.path.basename(file_path)
        TMP_FILES.append(base_dir)
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    zip_file_name = 'archive.zip'
    zip_file = '%s/%s' % (tmp_dir, zip_file_name)
    # create zip file
    run('cd "%s" && zip -r "%s" %s' % (base_dir, zip_file, include))
    if not get_content:
        TMP_FILES.append(tmp_dir)
        return zip_file
    zip_file_content = None
    with open(zip_file, 'rb') as file_obj:
        zip_file_content = file_obj.read()
    run('rm -r "%s"' % tmp_dir)
    return zip_file_content
Beispiel #26
0
def create_zip_file(file_path, include='*', get_content=False):
    base_dir = file_path
    if not os.path.isdir(file_path):
        base_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
        run('cp "%s" "%s"' % (file_path, base_dir))
        include = os.path.basename(file_path)
        TMP_FILES.append(base_dir)
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    zip_file_name = 'archive.zip'
    zip_file = '%s/%s' % (tmp_dir, zip_file_name)
    # create zip file
    run('cd "%s" && zip -r "%s" %s' % (base_dir, zip_file, include))
    if not get_content:
        TMP_FILES.append(tmp_dir)
        return zip_file
    zip_file_content = None
    with open(zip_file, "rb") as file_obj:
        zip_file_content = file_obj.read()
    run('rm -r "%s"' % tmp_dir)
    return zip_file_content
Beispiel #27
0
def create_zip_file(file_path, get_content=False):
    base_dir = file_path
    if not os.path.isdir(file_path):
        base_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
        shutil.copy(file_path, base_dir)
        TMP_FILES.append(base_dir)
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    zip_file_name = 'archive.zip'
    full_zip_file = os.path.join(tmp_dir, zip_file_name)
    # create zip file
    if is_alpine():
        create_zip_file_cli(file_path, base_dir, zip_file=full_zip_file)
    else:
        create_zip_file_python(file_path, base_dir, zip_file=full_zip_file)
    if not get_content:
        TMP_FILES.append(tmp_dir)
        return full_zip_file
    zip_file_content = None
    with open(full_zip_file, 'rb') as file_obj:
        zip_file_content = file_obj.read()
    rm_dir(tmp_dir)
    return zip_file_content
Beispiel #28
0
 def execute_java_lambda(self, event, context, main_file, func_details=None):
     handler = func_details.handler
     opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ""
     event_file = EVENT_FILE_PATTERN.replace("*", short_uid())
     save_file(event_file, json.dumps(json_safe(event)))
     TMP_FILES.append(event_file)
     class_name = handler.split("::")[0]
     classpath = "%s:%s:%s" % (
         main_file,
         Util.get_java_classpath(main_file),
         LAMBDA_EXECUTOR_JAR,
     )
     cmd = "java %s -cp %s %s %s %s" % (
         opts,
         classpath,
         LAMBDA_EXECUTOR_CLASS,
         class_name,
         event_file,
     )
     LOG.info(cmd)
     result = self._execute_in_custom_runtime(cmd, func_details=func_details)
     return result
Beispiel #29
0
def start_sqs_elasticmq(port=None, asynchronous=False, update_listener=None):
    global PORT_SQS_BACKEND

    port = port or config.PORT_SQS
    install_elasticmq()
    PORT_SQS_BACKEND = get_free_tcp_port()
    # create config file
    config_params = """
    include classpath("application.conf")
    node-address {
        protocol = http
        host = "%s"
        port = %s
        context-path = ""
    }
    rest-sqs {
        enabled = true
        bind-port = %s
        bind-hostname = "0.0.0.0"
        sqs-limits = strict
    }
    """ % (
        LOCALSTACK_HOSTNAME,
        port,
        PORT_SQS_BACKEND,
    )
    config_file = os.path.join(TMP_FOLDER, "sqs.%s.conf" % short_uid())
    TMP_FILES.append(config_file)
    save_file(config_file, config_params)
    # start process
    cmd = "java -Dconfig.file=%s -Xmx%s -jar %s/elasticmq-server.jar" % (
        config_file,
        MAX_HEAP_SIZE,
        INSTALL_DIR_ELASTICMQ,
    )
    log_startup_message("SQS")
    start_proxy_for_service("sqs", port, PORT_SQS_BACKEND, update_listener)
    return do_run(cmd, asynchronous)
Beispiel #30
0
def create_lambda_archive(script, stream=None, get_content=False, libs=[], runtime=None):
    """Utility method to create a Lambda function archive"""
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    TMP_FILES.append(tmp_dir)
    file_name = get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER, runtime=runtime)
    script_file = '%s/%s' % (tmp_dir, file_name)
    save_file(script_file, script)
    # copy libs
    for lib in libs:
        paths = [lib, '%s.py' % lib]
        target_dir = tmp_dir
        root_folder = '%s/lib/python*/site-packages' % LOCALSTACK_VENV_FOLDER
        if lib == 'localstack':
            paths = ['localstack/*.py', 'localstack/utils']
            root_folder = LOCALSTACK_ROOT_FOLDER
            target_dir = '%s/%s/' % (tmp_dir, lib)
            mkdir(target_dir)
        for path in paths:
            file_to_copy = '%s/%s' % (root_folder, path)
            for file_path in glob.glob(file_to_copy):
                run('cp -r %s %s/' % (file_path, target_dir))

    # create zip file
    return create_zip_file(tmp_dir, get_content=True)
Beispiel #31
0
def create_lambda_archive(script, stream=None, get_content=False, libs=[], runtime=None):
    """Utility method to create a Lambda function archive"""
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    TMP_FILES.append(tmp_dir)
    file_name = get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER, runtime=runtime)
    script_file = '%s/%s' % (tmp_dir, file_name)
    save_file(script_file, script)
    # copy libs
    for lib in libs:
        paths = [lib, '%s.py' % lib]
        target_dir = tmp_dir
        root_folder = '%s/lib/python*/site-packages' % LOCALSTACK_VENV_FOLDER
        if lib == 'localstack':
            paths = ['localstack/*.py', 'localstack/utils']
            root_folder = LOCALSTACK_ROOT_FOLDER
            target_dir = '%s/%s/' % (tmp_dir, lib)
            mkdir(target_dir)
        for path in paths:
            file_to_copy = '%s/%s' % (root_folder, path)
            for file_path in glob.glob(file_to_copy):
                run('cp -r %s %s/' % (file_path, target_dir))

    # create zip file
    return create_zip_file(tmp_dir, get_content=True)
Beispiel #32
0
def set_function_code(code, lambda_name):

    def generic_handler(event, context):
        raise Exception(('Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js and .NET Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)

    lambda_handler = generic_handler
    lambda_cwd = None
    arn = func_arn(lambda_name)
    runtime = arn_to_lambda[arn].runtime
    handler_name = arn_to_lambda.get(arn).handler
    lambda_environment = arn_to_lambda.get(arn).envvars
    if not handler_name:
        handler_name = LAMBDA_DEFAULT_HANDLER
    handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
    handler_function = get_handler_function_from_name(handler_name, runtime=runtime)

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(arn)

    if 'S3Bucket' in code:
        s3_client = aws_stack.connect_to_service('s3')
        bytes_io = BytesIO()
        try:
            s3_client.download_fileobj(code['S3Bucket'], code['S3Key'], bytes_io)
            zip_file_content = bytes_io.getvalue()
        except Exception as e:
            return error_response('Unable to fetch Lambda archive from S3: %s' % e, 404)
    elif 'ZipFile' in code:
        zip_file_content = code['ZipFile']
        zip_file_content = base64.b64decode(zip_file_content)
    else:
        return error_response('No valid Lambda archive specified.', 400)

    # save tmp file
    tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
    mkdir(tmp_dir)
    tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
    save_file(tmp_file, zip_file_content)
    TMP_FILES.append(tmp_dir)
    lambda_cwd = tmp_dir

    # check if this is a ZIP file
    is_zip = is_zip_file(zip_file_content)
    if is_zip:
        unzip(tmp_file, tmp_dir)
        main_file = '%s/%s' % (tmp_dir, handler_file)
        if not os.path.isfile(main_file):
            # check if this is a zip file that contains a single JAR file
            jar_files = glob.glob('%s/*.jar' % tmp_dir)
            if len(jar_files) == 1:
                main_file = jar_files[0]
        if os.path.isfile(main_file):
            # make sure the file is actually readable, then read contents
            ensure_readable(main_file)
            with open(main_file, 'rb') as file_obj:
                zip_file_content = file_obj.read()
        else:
            file_list = run('ls -la %s' % tmp_dir)
            LOG.debug('Lambda archive content:\n%s' % file_list)
            return error_response('Unable to find handler script in Lambda archive.', 400, error_type='ValidationError')

    # it could be a JAR file (regardless of whether wrapped in a ZIP file or not)
    is_jar = is_jar_archive(zip_file_content)
    if is_jar:

        def execute(event, context):
            result, log_output = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(event, context,
                handler=arn_to_lambda[arn].handler, main_file=main_file)
            return result

        lambda_handler = execute

    elif runtime.startswith('python') and not use_docker():
        try:
            lambda_handler = exec_lambda_code(zip_file_content,
                handler_function=handler_function, lambda_cwd=lambda_cwd,
                lambda_env=lambda_environment)
        except Exception as e:
            raise Exception('Unable to get handler function from lambda code.', e)

    if not is_zip and not is_jar:
        raise Exception('Uploaded Lambda code is neither a ZIP nor JAR file.')

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
Beispiel #33
0
def set_function_code(code, lambda_name):

    def generic_handler(event, context):
        raise Exception(('Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js and .NET Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)

    lambda_cwd = None
    arn = func_arn(lambda_name)
    lambda_details = arn_to_lambda[arn]
    runtime = lambda_details.runtime
    handler_name = lambda_details.handler
    lambda_environment = lambda_details.envvars
    if not handler_name:
        handler_name = LAMBDA_DEFAULT_HANDLER

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(arn)
    zip_file_content = None
    is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL

    if is_local_mount:
        # Mount or use a local folder lambda executors can reference
        # WARNING: this means we're pointing lambda_cwd to a local path in the user's
        # file system! We must ensure that there is no data loss (i.e., we must *not* add
        # this folder to TMP_FILES or similar).
        lambda_cwd = code['S3Key']
    else:
        # Save the zip file to a temporary file that the lambda executors can reference
        zip_file_content = get_zip_bytes(code)
        if isinstance(zip_file_content, Response):
            return zip_file_content
        tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
        mkdir(tmp_dir)
        tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
        save_file(tmp_file, zip_file_content)
        TMP_FILES.append(tmp_dir)
        lambda_cwd = tmp_dir

    # Set the appropriate lambda handler.
    lambda_handler = generic_handler
    if runtime == LAMBDA_RUNTIME_JAVA8:
        # The Lambda executors for Docker subclass LambdaExecutorContainers,
        # which runs Lambda in Docker by passing all *.jar files in the function
        # working directory as part of the classpath. Because of this, we need to
        # save the zip_file_content as a .jar here.
        if is_jar_archive(zip_file_content):
            jar_tmp_file = '{working_dir}/{file_name}'.format(
                working_dir=tmp_dir, file_name=LAMBDA_JAR_FILE_NAME)
            save_file(jar_tmp_file, zip_file_content)

        lambda_handler = get_java_handler(zip_file_content, handler_name, tmp_file)
        if isinstance(lambda_handler, Response):
            return lambda_handler
    else:
        handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
        handler_function = get_handler_function_from_name(handler_name, runtime=runtime)

        if not is_local_mount:
            # Lambda code must be uploaded in Zip format
            if not is_zip_file(zip_file_content):
                raise Exception(
                    'Uploaded Lambda code for runtime ({}) is not in Zip format'.format(runtime))
            unzip(tmp_file, lambda_cwd)

        main_file = '%s/%s' % (lambda_cwd, handler_file)
        if os.path.isfile(main_file):
            # make sure the file is actually readable, then read contents
            ensure_readable(main_file)
            with open(main_file, 'rb') as file_obj:
                zip_file_content = file_obj.read()
        else:
            # Raise an error if (1) this is not a local mount lambda, or (2) we're
            # running Lambdas locally (not in Docker), or (3) we're using remote Docker.
            # -> We do *not* want to raise an error if we're using local mount in non-remote Docker
            if not is_local_mount or not use_docker() or config.LAMBDA_REMOTE_DOCKER:
                file_list = run('ls -la %s' % lambda_cwd)
                LOG.debug('Lambda archive content:\n%s' % file_list)
                return error_response(
                    'Unable to find handler script in Lambda archive.', 400,
                    error_type='ValidationError')

        if runtime.startswith('python') and not use_docker():
            try:
                lambda_handler = exec_lambda_code(
                    zip_file_content,
                    handler_function=handler_function,
                    lambda_cwd=lambda_cwd,
                    lambda_env=lambda_environment)
            except Exception as e:
                raise Exception('Unable to get handler function from lambda code.', e)

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
Beispiel #34
0
LOGGER = logging.getLogger(__name__)


def start_sqs(port=PORT_SQS, async=False, update_listener=None):
    install_elasticmq()
    backend_port = DEFAULT_PORT_SQS_BACKEND
    # create config file
    config = """
    include classpath("application.conf")
    node-address {
        protocol = http
        host = "%s"
        port = %s
        context-path = ""
    }
    rest-sqs {
        enabled = true
        bind-port = %s
        bind-hostname = "0.0.0.0"
        sqs-limits = strict
    }
    """ % (LOCALSTACK_HOSTNAME, port, backend_port)
    config_file = os.path.join(TMP_FOLDER, 'sqs.%s.conf' % short_uid())
    TMP_FILES.append(config_file)
    save_file(config_file, config)
    # start process
    cmd = ('java -Dconfig.file=%s -jar %s/elasticmq-server.jar' % (config_file, INSTALL_DIR_ELASTICMQ))
    print('Starting mock SQS (%s port %s)...' % (get_service_protocol(), port))
    start_proxy_for_service('sqs', port, backend_port, update_listener)
    return do_run(cmd, async)
Beispiel #35
0
def set_function_code(code, lambda_name):
    def generic_handler(event, context):
        raise Exception((
            'Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js and .NET Core Lambdas currently require LAMBDA_EXECUTOR=docker'
        ) % lambda_name)

    lambda_cwd = None
    arn = func_arn(lambda_name)
    runtime = arn_to_lambda[arn].runtime
    handler_name = arn_to_lambda.get(arn).handler
    lambda_environment = arn_to_lambda.get(arn).envvars
    if not handler_name:
        handler_name = LAMBDA_DEFAULT_HANDLER

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(arn)

    # Save the zip file to a temporary file that the lambda executors can reference.
    zip_file_content = get_zip_bytes(code)
    if isinstance(zip_file_content, Response):
        return zip_file_content
    tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
    mkdir(tmp_dir)
    tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
    save_file(tmp_file, zip_file_content)
    TMP_FILES.append(tmp_dir)
    lambda_cwd = tmp_dir

    # Set the appropriate lambda handler.
    lambda_handler = generic_handler
    if runtime == LAMBDA_RUNTIME_JAVA8:
        # The Lambda executors for Docker subclass LambdaExecutorContainers,
        # which runs Lambda in Docker by passing all *.jar files in the function
        # working directory as part of the classpath. Because of this, we need to
        # save the zip_file_content as a .jar here.
        if is_jar_archive(zip_file_content):
            jar_tmp_file = '{working_dir}/{file_name}'.format(
                working_dir=tmp_dir, file_name=LAMBDA_JAR_FILE_NAME)
            save_file(jar_tmp_file, zip_file_content)

        lambda_handler = get_java_handler(zip_file_content, handler_name,
                                          tmp_file)
        if isinstance(lambda_handler, Response):
            return lambda_handler
    else:
        handler_file = get_handler_file_from_name(handler_name,
                                                  runtime=runtime)
        handler_function = get_handler_function_from_name(handler_name,
                                                          runtime=runtime)

        # Lambda code must be uploaded in the Zip format.
        if not is_zip_file(zip_file_content):
            raise Exception(
                'Uploaded Lambda code for runtime ({}) is not in Zip format'.
                format(runtime))

        unzip(tmp_file, tmp_dir)
        main_file = '%s/%s' % (tmp_dir, handler_file)
        if os.path.isfile(main_file):
            # make sure the file is actually readable, then read contents
            ensure_readable(main_file)
            with open(main_file, 'rb') as file_obj:
                zip_file_content = file_obj.read()
        else:
            file_list = run('ls -la %s' % tmp_dir)
            LOG.debug('Lambda archive content:\n%s' % file_list)
            return error_response(
                'Unable to find handler script in Lambda archive.',
                400,
                error_type='ValidationError')

        if runtime.startswith('python') and not use_docker():
            try:
                lambda_handler = exec_lambda_code(
                    zip_file_content,
                    handler_function=handler_function,
                    lambda_cwd=lambda_cwd,
                    lambda_env=lambda_environment)
            except Exception as e:
                raise Exception(
                    'Unable to get handler function from lambda code.', e)

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
Beispiel #36
0
def set_function_code(code, lambda_name):
    def generic_handler(event, context):
        raise Exception((
            'Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js Lambdas currently require LAMBDA_EXECUTOR=docker'
        ) % lambda_name)

    lambda_handler = generic_handler
    lambda_cwd = None
    arn = func_arn(lambda_name)
    runtime = arn_to_lambda[arn].runtime
    handler_name = arn_to_lambda.get(arn).handler
    lambda_environment = arn_to_lambda.get(arn).envvars
    if not handler_name:
        handler_name = LAMBDA_DEFAULT_HANDLER
    handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
    handler_function = get_handler_function_from_name(handler_name,
                                                      runtime=runtime)

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(arn)

    if 'S3Bucket' in code:
        s3_client = aws_stack.connect_to_service('s3')
        bytes_io = BytesIO()
        try:
            s3_client.download_fileobj(code['S3Bucket'], code['S3Key'],
                                       bytes_io)
            zip_file_content = bytes_io.getvalue()
        except Exception as e:
            return error_response(
                'Unable to fetch Lambda archive from S3: %s' % e, 404)
    elif 'ZipFile' in code:
        zip_file_content = code['ZipFile']
        zip_file_content = base64.b64decode(zip_file_content)
    else:
        return error_response('No valid Lambda archive specified.', 400)

    # save tmp file
    tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
    run('mkdir -p %s' % tmp_dir)
    tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
    save_file(tmp_file, zip_file_content)
    TMP_FILES.append(tmp_dir)
    lambda_cwd = tmp_dir

    # check if this is a ZIP file
    is_zip = is_zip_file(zip_file_content)
    if is_zip:
        unzip(tmp_file, tmp_dir)
        main_file = '%s/%s' % (tmp_dir, handler_file)
        if not os.path.isfile(main_file):
            # check if this is a zip file that contains a single JAR file
            jar_files = glob.glob('%s/*.jar' % tmp_dir)
            if len(jar_files) == 1:
                main_file = jar_files[0]
        if os.path.isfile(main_file):
            # make sure the file is actually readable, then read contents
            ensure_readable(main_file)
            with open(main_file, 'rb') as file_obj:
                zip_file_content = file_obj.read()
        else:
            file_list = run('ls -la %s' % tmp_dir)
            LOG.debug('Lambda archive content:\n%s' % file_list)
            return error_response(
                'Unable to find handler script in Lambda archive.',
                400,
                error_type='ValidationError')

    # it could be a JAR file (regardless of whether wrapped in a ZIP file or not)
    is_jar = is_jar_archive(zip_file_content)
    if is_jar:

        def execute(event, context):
            result, log_output = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(
                event,
                context,
                handler=arn_to_lambda[arn].handler,
                main_file=main_file)
            return result

        lambda_handler = execute

    elif runtime.startswith('python') and not use_docker():
        try:
            lambda_handler = exec_lambda_code(
                zip_file_content,
                handler_function=handler_function,
                lambda_cwd=lambda_cwd,
                lambda_env=lambda_environment)
        except Exception as e:
            raise Exception('Unable to get handler function from lambda code.',
                            e)

    if not is_zip and not is_jar:
        raise Exception('Uploaded Lambda code is neither a ZIP nor JAR file.')

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
Beispiel #37
0
def _create_and_register_temp_dir():
    if EVENTS_TMP_DIR not in TMP_FILES:
        mkdir(EVENTS_TMP_DIR)
        TMP_FILES.append(EVENTS_TMP_DIR)

def start_sqs(port=PORT_SQS, async=False, update_listener=None):
    install_elasticmq()
    backend_port = DEFAULT_PORT_SQS_BACKEND
    # create config file
    config = '''
    include classpath("application.conf")
    node-address {
        protocol = http
        host = "%s"
        port = %s
        context-path = ""
    }
    rest-sqs {
        enabled = true
        bind-port = %s
        bind-hostname = "0.0.0.0"
        sqs-limits = strict
    }
    ''' % (LOCALSTACK_HOSTNAME, port, backend_port)
    config_file = os.path.join(TMP_FOLDER, 'sqs.%s.conf' % short_uid())
    TMP_FILES.append(config_file)
    save_file(config_file, config)
    # start process
    cmd = ('java -Dconfig.file=%s -jar %s/elasticmq-server.jar' %
           (config_file, INSTALL_DIR_ELASTICMQ))
    print("Starting mock SQS (%s port %s)..." % (get_service_protocol(), port))
    start_proxy_for_service('sqs', port, backend_port, update_listener)
    return do_run(cmd, async)
Beispiel #39
0
def _create_and_register_temp_dir():
    tmp_dir = _get_events_tmp_dir()
    if tmp_dir not in TMP_FILES:
        mkdir(tmp_dir)
        TMP_FILES.append(tmp_dir)
Beispiel #40
0
 def do_download(param):
     tmp_file = tmp_file_pattern % param
     TMP_FILES.append(tmp_file)
     download('http://localhost:%s/%s' % (test_port, param), tmp_file)