Пример #1
0
    def destroy_docker_container(self, func_arn):
        """
        Stops and/or removes a docker container for a specific lambda function ARN.
        :param func_arn: The ARN of the lambda function.
        :return: None
        """
        with self.docker_container_lock:
            status = self.get_docker_container_status(func_arn)

            # Get the container name and id.
            container_name = self.get_container_name(func_arn)

            if status == 1:
                LOG.debug('Stopping container: %s' % container_name)
                cmd = (
                    'docker stop -t0 %s'
                ) % (container_name)

                LOG.debug(cmd)
                run(cmd, async=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)

                status = self.get_docker_container_status(func_arn)

            if status == -1:
                LOG.debug('Removing container: %s' % container_name)
                cmd = (
                    'docker rm %s'
                ) % (container_name)

                LOG.debug(cmd)
                run(cmd, async=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
Пример #2
0
def start_kcl_client_process(stream_name, listener_script, log_file=None, env=None, configs={},
        endpoint_url=None, ddb_lease_table_suffix=None, env_vars={},
        kcl_log_level=DEFAULT_KCL_LOG_LEVEL, log_subscribers=[]):
    env = aws_stack.get_environment(env)
    # decide which credentials provider to use
    credentialsProvider = None
    if (('AWS_ASSUME_ROLE_ARN' in os.environ or 'AWS_ASSUME_ROLE_ARN' in env_vars) and
            ('AWS_ASSUME_ROLE_SESSION_NAME' in os.environ or 'AWS_ASSUME_ROLE_SESSION_NAME' in env_vars)):
        # use special credentials provider that can assume IAM roles and handle temporary STS auth tokens
        credentialsProvider = 'com.atlassian.DefaultSTSAssumeRoleSessionCredentialsProvider'
        # pass through env variables to child process
        for var_name in ['AWS_ASSUME_ROLE_ARN', 'AWS_ASSUME_ROLE_SESSION_NAME',
                'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN']:
            if var_name in os.environ and var_name not in env_vars:
                env_vars[var_name] = os.environ[var_name]
    if env.region == REGION_LOCAL:
        # need to disable CBOR protocol, enforce use of plain JSON,
        # see https://github.com/mhart/kinesalite/issues/31
        env_vars['AWS_CBOR_DISABLE'] = 'true'
    if kcl_log_level or (len(log_subscribers) > 0):
        if not log_file:
            log_file = LOG_FILE_PATTERN.replace('*', short_uid())
            TMP_FILES.append(log_file)
        run('touch %s' % log_file)
        # start log output reader thread which will read the KCL log
        # file and print each line to stdout of this process...
        reader_thread = OutputReaderThread({'file': log_file, 'level': kcl_log_level,
            'log_prefix': 'KCL', 'log_subscribers': log_subscribers})
        reader_thread.start()

    # construct stream info
    stream_info = get_stream_info(stream_name, log_file, env=env, endpoint_url=endpoint_url,
        ddb_lease_table_suffix=ddb_lease_table_suffix, env_vars=env_vars)
    props_file = stream_info['properties_file']
    # set kcl config options
    kwargs = {
        'metricsLevel': 'NONE',
        'initialPositionInStream': 'LATEST'
    }
    # set parameters for local connection
    if env.region == REGION_LOCAL:
        kwargs['kinesisEndpoint'] = '%s:%s' % (HOSTNAME, config.PORT_KINESIS)
        kwargs['dynamodbEndpoint'] = '%s:%s' % (HOSTNAME, config.PORT_DYNAMODB)
        kwargs['kinesisProtocol'] = 'http%s' % ('s' if USE_SSL else '')
        kwargs['dynamodbProtocol'] = 'http%s' % ('s' if USE_SSL else '')
        kwargs['disableCertChecking'] = 'true'
    kwargs.update(configs)
    # create config file
    kclipy_helper.create_config_file(config_file=props_file, executableName=listener_script,
        streamName=stream_name, applicationName=stream_info['app_name'],
        credentialsProvider=credentialsProvider, **kwargs)
    TMP_FILES.append(props_file)
    # start stream consumer
    stream = KinesisStream(id=stream_name, params=stream_info)
    thread_consumer = KinesisProcessorThread.start_consumer(stream)
    TMP_THREADS.append(thread_consumer)
    return thread_consumer
Пример #3
0
def use_docker():
    global DO_USE_DOCKER
    if DO_USE_DOCKER is None:
        DO_USE_DOCKER = False
        if 'docker' in config.LAMBDA_EXECUTOR:
            try:
                run('docker images', print_error=False)
                # run('ping -c 1 -t 1 %s' % DOCKER_BRIDGE_IP, print_error=False)
                DO_USE_DOCKER = True
            except Exception:
                pass
    return DO_USE_DOCKER
Пример #4
0
def install_elasticsearch():
    if not os.path.exists(INSTALL_DIR_ES):
        LOGGER.info('Downloading and installing local Elasticsearch server. This may take some time.')
        mkdir(INSTALL_DIR_INFRA)
        # download and extract archive
        download_and_extract_with_retry(ELASTICSEARCH_JAR_URL, TMP_ARCHIVE_ES, INSTALL_DIR_INFRA)
        run('cd %s && mv elasticsearch* elasticsearch' % (INSTALL_DIR_INFRA))

        for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'):
            dir_path = '%s/%s' % (INSTALL_DIR_ES, dir_name)
            mkdir(dir_path)
            chmod_r(dir_path, 0o777)
Пример #5
0
    def destroy_existing_docker_containers(self):
        """
        Stops and/or removes all lambda docker containers for localstack.
        :return: None
        """
        with self.docker_container_lock:
            container_names = self.get_all_container_names()

            LOG.debug('Removing %d containers.' % len(container_names))
            for container_name in container_names:
                cmd = 'docker rm -f %s' % container_name
                LOG.debug(cmd)
                run(cmd, async=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
Пример #6
0
def install_amazon_kinesis_client_libs():
    # install KCL/STS JAR files
    if not os.path.exists(INSTALL_DIR_KCL):
        mkdir(INSTALL_DIR_KCL)
        if not os.path.exists(TMP_ARCHIVE_STS):
            download(STS_JAR_URL, TMP_ARCHIVE_STS)
        shutil.copy(TMP_ARCHIVE_STS, INSTALL_DIR_KCL)
    # Compile Java files
    from localstack.utils.kinesis import kclipy_helper
    classpath = kclipy_helper.get_kcl_classpath()
    java_files = '%s/utils/kinesis/java/com/atlassian/*.java' % ROOT_PATH
    class_files = '%s/utils/kinesis/java/com/atlassian/*.class' % ROOT_PATH
    if not glob.glob(class_files):
        run('javac -cp "%s" %s' % (classpath, java_files))
Пример #7
0
def listen_to_kinesis(stream_name, listener_func=None, processor_script=None,
        events_file=None, endpoint_url=None, log_file=None, configs={}, env=None,
        ddb_lease_table_suffix=None, env_vars={}, kcl_log_level=DEFAULT_KCL_LOG_LEVEL,
        log_subscribers=[], wait_until_started=False):
    """
    High-level function that allows to subscribe to a Kinesis stream
    and receive events in a listener function. A KCL client process is
    automatically started in the background.
    """
    env = aws_stack.get_environment(env)
    if not events_file:
        events_file = EVENTS_FILE_PATTERN.replace('*', short_uid())
        TMP_FILES.append(events_file)
    if not processor_script:
        processor_script = generate_processor_script(events_file, log_file=log_file)

    run('rm -f %s' % events_file)
    # start event reader thread (this process)
    ready_mutex = threading.Semaphore(0)
    thread = EventFileReaderThread(events_file, listener_func, ready_mutex=ready_mutex)
    thread.start()
    # Wait until the event reader thread is ready (to avoid 'Connection refused' error on the UNIX socket)
    ready_mutex.acquire()
    # start KCL client (background process)
    if processor_script[-4:] == '.pyc':
        processor_script = processor_script[0:-1]
    # add log listener that notifies when KCL is started
    if wait_until_started:
        listener = KclStartedLogListener()
        log_subscribers.append(listener)

    process = start_kcl_client_process(stream_name, processor_script,
        endpoint_url=endpoint_url, log_file=log_file, configs=configs, env=env,
        ddb_lease_table_suffix=ddb_lease_table_suffix, env_vars=env_vars, kcl_log_level=kcl_log_level,
        log_subscribers=log_subscribers)

    if wait_until_started:
        # Wait at most 90 seconds for initialization. Note that creating the DDB table can take quite a bit
        try:
            listener.sync_init.get(block=True, timeout=90)
        except Exception:
            raise Exception('Timeout when waiting for KCL initialization.')
        # wait at most 30 seconds for shard lease notification
        try:
            listener.sync_take_shard.get(block=True, timeout=30)
        except Exception:
            # this merely means that there is no shard available to take. Do nothing.
            pass

    return process
Пример #8
0
    def get_docker_container_status(self, func_arn):
        """
        Determine the status of a docker container.
        :param func_arn: The ARN of the lambda function.
        :return: 1 If the container is running,
        -1 if the container exists but is not running
        0 if the container does not exist.
        """
        with self.docker_container_lock:
            # Get the container name and id.
            container_name = self.get_container_name(func_arn)

            # Check if the container is already running.
            LOG.debug('Getting container status: %s' % container_name)
            cmd = (
                'docker ps'
                ' -a'
                ' --filter name="%s"'
                ' --format "{{ .Status }}"'
            ) % (container_name)

            LOG.debug(cmd)
            cmd_result = run(cmd, async=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)

            # If the container doesn't exist. Create and start it.
            container_status = cmd_result.strip()

            if len(container_status) == 0:
                return 0

            if container_status.lower().startswith('up '):
                return 1

            return -1
Пример #9
0
def run_cached(cmd, cache_duration_secs=None):
    if cache_duration_secs is None:
        cache_duration_secs = AWS_CACHE_TIMEOUT
    env_vars = os.environ.copy()
    env_vars.update({
        'AWS_ACCESS_KEY_ID': os.environ.get('AWS_ACCESS_KEY_ID') or 'foobar',
        'AWS_SECRET_ACCESS_KEY': os.environ.get('AWS_SECRET_ACCESS_KEY') or 'foobar',
        'AWS_DEFAULT_REGION': os.environ.get('AWS_DEFAULT_REGION') or DEFAULT_REGION,
        'PYTHONWARNINGS': 'ignore:Unverified HTTPS request'
    })
    return run(cmd, cache_duration_secs=cache_duration_secs, env_vars=env_vars)
Пример #10
0
def install_dynamodb_local():
    if not os.path.exists(INSTALL_DIR_DDB):
        LOGGER.info('Downloading and installing local DynamoDB server. This may take some time.')
        mkdir(INSTALL_DIR_DDB)
        # download and extract archive
        download_and_extract_with_retry(DYNAMODB_JAR_URL, TMP_ARCHIVE_DDB, INSTALL_DIR_DDB)

    # fix for Alpine, otherwise DynamoDBLocal fails with:
    # DynamoDBLocal_lib/libsqlite4java-linux-amd64.so: __memcpy_chk: symbol not found
    if is_alpine():
        ddb_libs_dir = '%s/DynamoDBLocal_lib' % INSTALL_DIR_DDB
        patched_marker = '%s/alpine_fix_applied' % ddb_libs_dir
        if not os.path.exists(patched_marker):
            patched_lib = ('https://rawgit.com/bhuisgen/docker-alpine/master/alpine-dynamodb/' +
                'rootfs/usr/local/dynamodb/DynamoDBLocal_lib/libsqlite4java-linux-amd64.so')
            patched_jar = ('https://rawgit.com/bhuisgen/docker-alpine/master/alpine-dynamodb/' +
                'rootfs/usr/local/dynamodb/DynamoDBLocal_lib/sqlite4java.jar')
            run("curl -L -o %s/libsqlite4java-linux-amd64.so '%s'" % (ddb_libs_dir, patched_lib))
            run("curl -L -o %s/sqlite4java.jar '%s'" % (ddb_libs_dir, patched_jar))
            save_file(patched_marker, '')

    # fix logging configuration for DynamoDBLocal
    log4j2_config = """<Configuration status="WARN">
      <Appenders>
        <Console name="Console" target="SYSTEM_OUT">
          <PatternLayout pattern="%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} - %msg%n"/>
        </Console>
      </Appenders>
      <Loggers>
        <Root level="WARN"><AppenderRef ref="Console"/></Root>
      </Loggers>
    </Configuration>"""
    log4j2_file = os.path.join(INSTALL_DIR_DDB, 'log4j2.xml')
    save_file(log4j2_file, log4j2_config)
    run('cd "%s" && zip -u DynamoDBLocal.jar log4j2.xml || true' % INSTALL_DIR_DDB)
Пример #11
0
def create_lambda_archive(script, stream=None, get_content=False, libs=[], runtime=None):
    """Utility method to create a Lambda function archive"""
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    TMP_FILES.append(tmp_dir)
    file_name = get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER, runtime=runtime)
    script_file = '%s/%s' % (tmp_dir, file_name)
    save_file(script_file, script)
    # copy libs
    for lib in libs:
        paths = [lib, '%s.py' % lib]
        target_dir = tmp_dir
        root_folder = '%s/lib/python*/site-packages' % LOCALSTACK_VENV_FOLDER
        if lib == 'localstack':
            paths = ['localstack/*.py', 'localstack/utils']
            root_folder = LOCALSTACK_ROOT_FOLDER
            target_dir = '%s/%s/' % (tmp_dir, lib)
            mkdir(target_dir)
        for path in paths:
            file_to_copy = '%s/%s' % (root_folder, path)
            for file_path in glob.glob(file_to_copy):
                run('cp -r %s %s/' % (file_path, target_dir))

    # create zip file
    return create_zip_file(tmp_dir, get_content=True)
Пример #12
0
    def get_all_container_names(self):
        """
        Returns a list of container names for lambda containers.
        :return: A String[] localstack docker container names for each function.
        """
        with self.docker_container_lock:
            LOG.debug('Getting all lambda containers names.')
            cmd = 'docker ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"'
            LOG.debug(cmd)
            cmd_result = run(cmd, async=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()

            if len(cmd_result) > 0:
                container_names = cmd_result.split('\n')
            else:
                container_names = []

            return container_names
Пример #13
0
def create_zip_file(file_path, include='*', get_content=False):
    base_dir = file_path
    if not os.path.isdir(file_path):
        base_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
        run('cp "%s" "%s"' % (file_path, base_dir))
        include = os.path.basename(file_path)
        TMP_FILES.append(base_dir)
    tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX)
    zip_file_name = 'archive.zip'
    zip_file = '%s/%s' % (tmp_dir, zip_file_name)
    # create zip file
    run('cd "%s" && zip -r "%s" %s' % (base_dir, zip_file, include))
    if not get_content:
        TMP_FILES.append(tmp_dir)
        return zip_file
    zip_file_content = None
    with open(zip_file, 'rb') as file_obj:
        zip_file_content = file_obj.read()
    run('rm -r "%s"' % tmp_dir)
    return zip_file_content
Пример #14
0
def install_kinesalite():
    if not os.path.exists(INSTALL_PATH_KINESALITE_CLI):
        log_install_msg("Kinesis")
        run('cd "%s" && npm install' % MODULE_MAIN_PATH)
Пример #15
0
def _get_latest_node_agent_version():
    try:
        return common.run("npm view @thundra/core version".split())
    except Exception as e:
        print("Unable to get latest version of Thundra Node agent: %s" % e)
        return None
Пример #16
0
 def setUpClass(cls):
     with (INIT_LOCK):
         run('cd %s; terraform apply -input=false tfplan' %
             (cls.get_base_dir()))
Пример #17
0
    """ Base class for Lambda executors. Subclasses must overwrite the execute method """

    def __init__(self):
        pass

    def execute(self, func_arn, func_details, event, context=None, version=None, async=False):
        raise Exception('Not implemented.')

    def startup(self):
        pass

    def cleanup(self, arn=None):
        pass

    def run_lambda_executor(self, cmd, env_vars={}, async=False):
        process = run(cmd, async=True, stderr=subprocess.PIPE, outfile=subprocess.PIPE, env_vars=env_vars)
        if async:
            result = '{"async": "%s"}' % async
            log_output = 'Lambda executed asynchronously'
        else:
            return_code = process.wait()
            result = to_str(process.stdout.read())
            log_output = to_str(process.stderr.read())

            if return_code != 0:
                raise Exception('Lambda process returned error status code: %s. Output:\n%s' %
                    (return_code, log_output))
        return result, log_output


# holds information about an existing container.
Пример #18
0
 def tearDownClass(cls):
     run('cd %s; terraform destroy -auto-approve' % (cls.get_base_dir()))
Пример #19
0
def is_debug():
    return os.environ.get('DEBUG', '').strip() not in ['', '0', 'false']


def do_run(cmd, async, print_output=False):
    sys.stdout.flush()
    if async:
        if is_debug():
            print_output = True
        outfile = subprocess.PIPE if print_output else None
        t = ShellCommandThread(cmd, outfile=outfile)
        t.start()
        TMP_THREADS.append(t)
        return t
    else:
        return run(cmd)


def start_proxy_for_service(service_name, port, default_backend_port, update_listener, quiet=False, params={}):
    # check if we have a custom backend configured
    custom_backend_url = os.environ.get('%s_BACKEND' % service_name.upper())
    backend_url = custom_backend_url or ('http://%s:%s' % (DEFAULT_BACKEND_HOST, default_backend_port))
    return start_proxy(port, backend_url=backend_url, update_listener=update_listener, quiet=quiet, params=params)


def start_proxy(port, backend_url, update_listener, quiet=False, params={}):
    proxy_thread = GenericProxy(port=port, forward_url=backend_url,
        ssl=USE_SSL, update_listener=update_listener, quiet=quiet, params=params)
    proxy_thread.start()
    TMP_THREADS.append(proxy_thread)
    return proxy_thread
Пример #20
0
    def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
        """
        Prepares a persistent docker container for a specific function.
        :param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
        :param func_arn: The ARN of the lambda function.
        :param env_vars: The environment variables for the lambda.
        :param lambda_cwd: The local directory containing the code for the lambda function.
        :return: ContainerInfo class containing the container name and default entry point.
        """
        with self.docker_container_lock:
            # Get the container name and id.
            container_name = self.get_container_name(func_arn)
            docker_cmd = self._docker_cmd()

            status = self.get_docker_container_status(func_arn)
            LOG.debug('Priming docker container (status "%s"): %s' %
                      (status, container_name))

            docker_image = Util.docker_image_for_runtime(runtime)
            rm_flag = Util.get_docker_remove_flag()

            # Container is not running or doesn't exist.
            if status < 1:
                # Make sure the container does not exist in any form/state.
                self.destroy_docker_container(func_arn)

                env_vars_str = ' '.join([
                    '-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars
                ])

                network = config.LAMBDA_DOCKER_NETWORK
                network_str = '--network="%s"' % network if network else ''

                mount_volume = not config.LAMBDA_REMOTE_DOCKER
                lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(
                    lambda_cwd)
                mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''

                # Create and start the container
                LOG.debug('Creating container: %s' % container_name)
                cmd = (
                    '%s create'
                    ' %s'  # --rm flag
                    ' --name "%s"'
                    ' --entrypoint /bin/bash'  # Load bash when it starts.
                    ' %s'
                    ' --interactive'  # Keeps the container running bash.
                    ' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
                    ' -e HOSTNAME="$HOSTNAME"'
                    ' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
                    '  %s'  # env_vars
                    '  %s'  # network
                    ' %s') % (docker_cmd, rm_flag, container_name,
                              mount_volume_str, env_vars_str, network_str,
                              docker_image)
                LOG.debug(cmd)
                run(cmd)

                if not mount_volume:
                    LOG.debug('Copying files to container "%s" from "%s".' %
                              (container_name, lambda_cwd))
                    cmd = ('%s cp'
                           ' "%s/." "%s:/var/task"') % (docker_cmd, lambda_cwd,
                                                        container_name)
                    LOG.debug(cmd)
                    run(cmd)

                LOG.debug('Starting container: %s' % container_name)
                cmd = '%s start %s' % (docker_cmd, container_name)
                LOG.debug(cmd)
                run(cmd)
                # give the container some time to start up
                time.sleep(1)

            # Get the entry point for the image.
            LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
            cmd = ('%s image inspect'
                   ' --format="{{ .ContainerConfig.Entrypoint }}"'
                   ' %s') % (docker_cmd, docker_image)

            LOG.debug(cmd)
            run_result = run(cmd)

            entry_point = run_result.strip('[]\n\r ')

            container_network = self.get_docker_container_network(func_arn)

            LOG.debug(
                'Using entrypoint "%s" for container "%s" on network "%s".' %
                (entry_point, container_name, container_network))

            return ContainerInfo(container_name, entry_point)
Пример #21
0
def start_kcl_client_process(
    stream_name,
    listener_script,
    log_file=None,
    env=None,
    configs=None,
    endpoint_url=None,
    ddb_lease_table_suffix=None,
    env_vars=None,
    region_name=None,
    kcl_log_level=DEFAULT_KCL_LOG_LEVEL,
    log_subscribers=None,
):
    if configs is None:
        configs = {}
    if env_vars is None:
        env_vars = {}
    if log_subscribers is None:
        log_subscribers = []
    env = aws_stack.get_environment(env)
    # make sure to convert stream ARN to stream name
    stream_name = aws_stack.kinesis_stream_name(stream_name)
    # decide which credentials provider to use
    credentialsProvider = None
    if ("AWS_ASSUME_ROLE_ARN" in os.environ or "AWS_ASSUME_ROLE_ARN"
            in env_vars) and ("AWS_ASSUME_ROLE_SESSION_NAME" in os.environ
                              or "AWS_ASSUME_ROLE_SESSION_NAME" in env_vars):
        # use special credentials provider that can assume IAM roles and handle temporary STS auth tokens
        credentialsProvider = "cloud.localstack.DefaultSTSAssumeRoleSessionCredentialsProvider"
        # pass through env variables to child process
        for var_name in [
                "AWS_ASSUME_ROLE_ARN",
                "AWS_ASSUME_ROLE_SESSION_NAME",
                "AWS_ACCESS_KEY_ID",
                "AWS_SECRET_ACCESS_KEY",
                "AWS_SESSION_TOKEN",
        ]:
            if var_name in os.environ and var_name not in env_vars:
                env_vars[var_name] = os.environ[var_name]
    if aws_stack.is_local_env(env):
        # need to disable CBOR protocol, enforce use of plain JSON,
        # see https://github.com/mhart/kinesalite/issues/31
        env_vars["AWS_CBOR_DISABLE"] = "true"
    if kcl_log_level or (len(log_subscribers) > 0):
        if not log_file:
            log_file = LOG_FILE_PATTERN.replace("*", short_uid())
            TMP_FILES.append(log_file)
        run("touch %s" % log_file)
        # start log output reader thread which will read the KCL log
        # file and print each line to stdout of this process...
        reader_thread = OutputReaderThread({
            "file": log_file,
            "level": kcl_log_level,
            "log_prefix": "KCL",
            "log_subscribers": log_subscribers,
        })
        reader_thread.start()

    # construct stream info
    stream_info = get_stream_info(
        stream_name,
        log_file,
        env=env,
        endpoint_url=endpoint_url,
        ddb_lease_table_suffix=ddb_lease_table_suffix,
        env_vars=env_vars,
    )
    props_file = stream_info["properties_file"]
    # set kcl config options
    kwargs = {"metricsLevel": "NONE", "initialPositionInStream": "LATEST"}
    # set parameters for local connection
    if aws_stack.is_local_env(env):
        kwargs[
            "kinesisEndpoint"] = f"{LOCALHOST}:{config.service_port('kinesis')}"
        kwargs[
            "dynamodbEndpoint"] = f"{LOCALHOST}:{config.service_port('dynamodb')}"
        kwargs["kinesisProtocol"] = get_service_protocol()
        kwargs["dynamodbProtocol"] = get_service_protocol()
        kwargs["disableCertChecking"] = "true"
    kwargs.update(configs)
    # create config file
    kclipy_helper.create_config_file(
        config_file=props_file,
        executableName=listener_script,
        streamName=stream_name,
        applicationName=stream_info["app_name"],
        credentialsProvider=credentialsProvider,
        region_name=region_name,
        **kwargs,
    )
    TMP_FILES.append(props_file)
    # start stream consumer
    stream = KinesisStream(id=stream_name, params=stream_info)
    thread_consumer = KinesisProcessorThread.start_consumer(stream)
    TMP_THREADS.append(thread_consumer)
    return thread_consumer
Пример #22
0
def ensure_webapp_installed():
    web_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), 'web'))
    node_modules_dir = os.path.join(web_dir, 'node_modules', 'jquery')
    if not os.path.exists(node_modules_dir):
        print('Initializing installation of Web application (this could take a long time, please be patient)')
        common.run('cd "%s"; npm install' % web_dir)
Пример #23
0
def set_function_code(code, lambda_name, lambda_cwd=None):

    def generic_handler(event, context):
        raise ClientError(('Unable to find executor for Lambda function "%s". Note that ' +
            'Node.js, Golang, and .Net Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)

    arn = func_arn(lambda_name)
    lambda_details = arn_to_lambda[arn]
    runtime = lambda_details.runtime
    lambda_environment = lambda_details.envvars
    handler_name = lambda_details.handler or LAMBDA_DEFAULT_HANDLER
    code_passed = code
    code = code or lambda_details.code
    is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL
    zip_file_content = None

    if code_passed:
        lambda_cwd = lambda_cwd or set_archive_code(code_passed, lambda_name)
        if not is_local_mount:
            # Save the zip file to a temporary file that the lambda executors can reference
            zip_file_content = get_zip_bytes(code_passed)
    else:
        lambda_cwd = lambda_cwd or lambda_details.cwd

    # get local lambda working directory
    tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)

    if not zip_file_content:
        zip_file_content = load_file(tmp_file, mode='rb')

    # Set the appropriate lambda handler.
    lambda_handler = generic_handler

    if is_java_lambda(runtime):
        # The Lambda executors for Docker subclass LambdaExecutorContainers, which
        # runs Lambda in Docker by passing all *.jar files in the function working
        # directory as part of the classpath. Obtain a Java handler function below.
        lambda_handler = get_java_handler(zip_file_content, handler_name, tmp_file)

    if not is_local_mount:
        # Lambda code must be uploaded in Zip format
        if not is_zip_file(zip_file_content):
            raise ClientError(
                'Uploaded Lambda code for runtime ({}) is not in Zip format'.format(runtime))
        # Unzipping should only be required for (1) non-Java Lambdas, or (2) zip files containing JAR files
        if not is_java_lambda(runtime) or zip_contains_jar_entries(zip_file_content, 'lib/'):
            unzip(tmp_file, lambda_cwd)

    # Obtain handler details for any non-Java Lambda function
    if not is_java_lambda(runtime):

        handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
        handler_function = get_handler_function_from_name(handler_name, runtime=runtime)

        main_file = '%s/%s' % (lambda_cwd, handler_file)
        if not os.path.exists(main_file):
            # Raise an error if (1) this is not a local mount lambda, or (2) we're
            # running Lambdas locally (not in Docker), or (3) we're using remote Docker.
            # -> We do *not* want to raise an error if we're using local mount in non-remote Docker
            if not is_local_mount or not use_docker() or config.LAMBDA_REMOTE_DOCKER:
                file_list = run('cd "%s"; du -d 3 .' % lambda_cwd)
                config_debug = ('Config for local mount, docker, remote: "%s", "%s", "%s"' %
                    (is_local_mount, use_docker(), config.LAMBDA_REMOTE_DOCKER))
                LOG.debug('Lambda archive content:\n%s' % file_list)
                raise ClientError(error_response(
                    'Unable to find handler script (%s) in Lambda archive. %s' % (main_file, config_debug),
                    400, error_type='ValidationError'))

        if runtime.startswith('python') and not use_docker():
            try:
                # make sure the file is actually readable, then read contents
                ensure_readable(main_file)
                zip_file_content = load_file(main_file, mode='rb')
                # extract handler
                lambda_handler = exec_lambda_code(
                    zip_file_content,
                    handler_function=handler_function,
                    lambda_cwd=lambda_cwd,
                    lambda_env=lambda_environment)
            except Exception as e:
                raise ClientError('Unable to get handler function from lambda code.', e)

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
Пример #24
0
def set_function_code(code, lambda_name):
    def generic_handler(event, context):
        raise Exception((
            'Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js Lambdas currently require LAMBDA_EXECUTOR=docker'
        ) % lambda_name)

    lambda_handler = generic_handler
    lambda_cwd = None
    arn = func_arn(lambda_name)
    runtime = arn_to_lambda[arn].runtime
    handler_name = arn_to_lambda.get(arn).handler
    lambda_environment = arn_to_lambda.get(arn).envvars
    if not handler_name:
        handler_name = LAMBDA_DEFAULT_HANDLER
    handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
    handler_function = get_handler_function_from_name(handler_name,
                                                      runtime=runtime)

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(arn)

    if 'S3Bucket' in code:
        s3_client = aws_stack.connect_to_service('s3')
        bytes_io = BytesIO()
        try:
            s3_client.download_fileobj(code['S3Bucket'], code['S3Key'],
                                       bytes_io)
            zip_file_content = bytes_io.getvalue()
        except Exception as e:
            return error_response(
                'Unable to fetch Lambda archive from S3: %s' % e, 404)
    elif 'ZipFile' in code:
        zip_file_content = code['ZipFile']
        zip_file_content = base64.b64decode(zip_file_content)
    else:
        return error_response('No valid Lambda archive specified.', 400)

    # save tmp file
    tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
    run('mkdir -p %s' % tmp_dir)
    tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
    save_file(tmp_file, zip_file_content)
    TMP_FILES.append(tmp_dir)
    lambda_cwd = tmp_dir

    # check if this is a ZIP file
    is_zip = is_zip_file(zip_file_content)
    if is_zip:
        unzip(tmp_file, tmp_dir)
        main_file = '%s/%s' % (tmp_dir, handler_file)
        if not os.path.isfile(main_file):
            # check if this is a zip file that contains a single JAR file
            jar_files = glob.glob('%s/*.jar' % tmp_dir)
            if len(jar_files) == 1:
                main_file = jar_files[0]
        if os.path.isfile(main_file):
            with open(main_file, 'rb') as file_obj:
                zip_file_content = file_obj.read()
        else:
            file_list = run('ls -la %s' % tmp_dir)
            LOG.debug('Lambda archive content:\n%s' % file_list)
            return error_response(
                'Unable to find handler script in Lambda archive.',
                400,
                error_type='ValidationError')

    # it could be a JAR file (regardless of whether wrapped in a ZIP file or not)
    is_jar = is_jar_archive(zip_file_content)
    if is_jar:

        def execute(event, context):
            result, log_output = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(
                event,
                context,
                handler=arn_to_lambda[arn].handler,
                main_file=main_file)
            return result

        lambda_handler = execute

    elif runtime.startswith('python') and not use_docker():
        try:
            lambda_handler = exec_lambda_code(
                zip_file_content,
                handler_function=handler_function,
                lambda_cwd=lambda_cwd,
                lambda_env=lambda_environment)
        except Exception as e:
            raise Exception('Unable to get handler function from lambda code.',
                            e)

    if not is_zip and not is_jar:
        raise Exception('Uploaded Lambda code is neither a ZIP nor JAR file.')

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
Пример #25
0
    def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
        """
        Prepares a persistent docker container for a specific function.
        :param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
        :param func_arn: The ARN of the lambda function.
        :param env_vars: The environment variables for the lambda.
        :param lambda_cwd: The local directory containing the code for the lambda function.
        :return: ContainerInfo class containing the container name and default entry point.
        """
        with self.docker_container_lock:
            # Get the container name and id.
            container_name = self.get_container_name(func_arn)

            LOG.debug('Priming docker container: %s' % container_name)

            status = self.get_docker_container_status(func_arn)
            # Container is not running or doesn't exist.
            if status < 1:
                # Make sure the container does not exist in any form/state.
                self.destroy_docker_container(func_arn)

                env_vars_str = ' '.join([
                    '-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars
                ])

                network = config.LAMBDA_DOCKER_NETWORK
                network_str = ' --network="%s" ' % network if network else ''

                docker_options = config.LAMBDA_DOCKER_OPTIONS if config.LAMBDA_DOCKER_OPTIONS else ''

                # Create and start the container
                LOG.debug('Creating container: %s' % container_name)
                cmd = (
                    'docker create'
                    ' --name "%s"'
                    ' --entrypoint /bin/bash'  # Load bash when it starts.
                    ' --interactive'  # Keeps the container running bash.
                    ' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
                    ' -e HOSTNAME="$HOSTNAME"'
                    ' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
                    '  %s'  # env_vars
                    '  %s'  # network
                    '  %s'  # docker options
                    ' lambci/lambda:%s') % (container_name, env_vars_str,
                                            network_str, docker_options,
                                            runtime)
                LOG.debug(cmd)
                run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)

                LOG.debug('Copying files to container "%s" from "%s".' %
                          (container_name, lambda_cwd))
                cmd = ('docker cp'
                       ' "%s/." "%s:/var/task"') % (lambda_cwd, container_name)
                LOG.debug(cmd)
                run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)

                LOG.debug('Starting container: %s' % container_name)
                cmd = 'docker start %s' % (container_name)
                LOG.debug(cmd)
                run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
                # give the container some time to start up
                time.sleep(1)

            # Get the entry point for the image.
            LOG.debug('Getting the entrypoint for image: lambci/lambda:%s' %
                      runtime)
            cmd = ('docker image inspect'
                   ' --format="{{ .ContainerConfig.Entrypoint }}"'
                   ' lambci/lambda:%s') % (runtime)

            LOG.debug(cmd)
            run_result = run(cmd,
                             asynchronous=False,
                             stderr=subprocess.PIPE,
                             outfile=subprocess.PIPE)

            entry_point = run_result.strip('[]\n\r ')

            container_network = self.get_docker_container_network(func_arn)

            LOG.debug(
                'Using entrypoint "%s" for container "%s" on network "%s".' %
                (entry_point, container_name, container_network))

            return ContainerInfo(container_name, entry_point)
Пример #26
0
def install_kinesalite():
    if not os.path.exists(INSTALL_PATH_KINESALITE_CLI):
        log_install_msg('Kinesis')
        run('cd "%s" && npm install' % ROOT_PATH)
Пример #27
0
def start_infra_in_docker():

    container_name = 'localstack_main'

    if docker_container_running(container_name):
        raise Exception('LocalStack container named "%s" is already running' %
                        container_name)

    # load plugins before starting the docker container
    plugin_configs = load_plugins()
    plugin_run_params = ' '.join([
        entry.get('docker', {}).get('run_flags', '')
        for entry in plugin_configs
    ])

    # prepare APIs
    canonicalize_api_names()

    services = os.environ.get('SERVICES', '')
    entrypoint = os.environ.get('ENTRYPOINT', '')
    cmd = os.environ.get('CMD', '')
    user_flags = config.DOCKER_FLAGS
    image_name = os.environ.get('IMAGE_NAME', constants.DOCKER_IMAGE_NAME)
    service_ports = config.SERVICE_PORTS
    force_noninteractive = os.environ.get('FORCE_NONINTERACTIVE', '')

    # construct port mappings
    ports_list = sorted(service_ports.values())
    start_port = 0
    last_port = 0
    port_ranges = []
    for i in range(0, len(ports_list)):
        if not start_port:
            start_port = ports_list[i]
        if not last_port:
            last_port = ports_list[i]
        if ports_list[i] > last_port + 1:
            port_ranges.append([start_port, last_port])
            start_port = ports_list[i]
        elif i >= len(ports_list) - 1:
            port_ranges.append([start_port, ports_list[i]])
        last_port = ports_list[i]
    port_mappings = ' '.join(
        '-p {start}-{end}:{start}-{end}'.format(start=entry[0], end=entry[1])
        if entry[0] < entry[1] else '-p {port}:{port}'.format(port=entry[0])
        for entry in port_ranges)

    if services:
        port_mappings = ''
        for service, port in service_ports.items():
            port_mappings += ' -p {port}:{port}'.format(port=port)

    env_str = ''
    for env_var in config.CONFIG_ENV_VARS:
        value = os.environ.get(env_var, None)
        if value is not None:
            env_str += '-e %s="%s" ' % (env_var, value)

    data_dir_mount = ''
    data_dir = os.environ.get('DATA_DIR', None)
    if data_dir is not None:
        container_data_dir = '/tmp/localstack_data'
        data_dir_mount = '-v "%s:%s" ' % (data_dir, container_data_dir)
        env_str += '-e DATA_DIR="%s" ' % container_data_dir

    interactive = '' if force_noninteractive or in_ci() else '-it '

    # append space if parameter is set
    user_flags = '%s ' % user_flags if user_flags else user_flags
    entrypoint = '%s ' % entrypoint if entrypoint else entrypoint
    plugin_run_params = '%s ' % plugin_run_params if plugin_run_params else plugin_run_params

    container_name = 'localstack_main'

    docker_cmd = (
        '%s run %s%s%s%s%s' + '--rm --privileged ' + '--name %s ' +
        '-p 8080:8080 %s %s' + '-v "%s:/tmp/localstack" -v "%s:%s" ' +
        '-e DOCKER_HOST="unix://%s" ' + '-e HOST_TMP_FOLDER="%s" "%s" %s') % (
            config.DOCKER_CMD, interactive, entrypoint, env_str, user_flags,
            plugin_run_params, container_name, port_mappings, data_dir_mount,
            config.TMP_FOLDER, config.DOCKER_SOCK, config.DOCKER_SOCK,
            config.DOCKER_SOCK, config.HOST_TMP_FOLDER, image_name, cmd)

    mkdir(config.TMP_FOLDER)
    run_cmd_safe(cmd='chmod -R 777 "%s"' % config.TMP_FOLDER)

    print(docker_cmd)
    t = ShellCommandThread(docker_cmd, outfile=subprocess.PIPE)
    t.start()
    time.sleep(2)

    if DO_CHMOD_DOCKER_SOCK:
        # fix permissions on /var/run/docker.sock
        for i in range(0, 100):
            if docker_container_running(container_name):
                break
            time.sleep(2)
        run('%s exec -u root "%s" chmod 777 /var/run/docker.sock' %
            (config.DOCKER_CMD, container_name))

    t.process.wait()
    sys.exit(t.process.returncode)
Пример #28
0
 def test_run(self):
     cmd = "echo 'foobar'"
     result = common.run(cmd)
     assert result.strip() == "foobar"
Пример #29
0
def ensure_can_use_sudo():
    if not is_root() and not can_use_sudo():
        print('Please enter your sudo password (required to configure local network):')
        run('sudo echo', stdin=True)
Пример #30
0
 def run_command(*_):
     run(cmd, outfile=subprocess.PIPE, print_error=False)
Пример #31
0
def can_use_sudo():
    try:
        run('echo | sudo -S echo', print_error=False)
        return True
    except Exception:
        return False
Пример #32
0
 def tearDownClass(cls):
     run("cd %s; %s destroy -auto-approve" %
         (cls.get_base_dir(), TERRAFORM_BIN))
Пример #33
0
def is_debug():
    return os.environ.get('DEBUG', '').strip() not in ['', '0', 'false']


def do_run(cmd, async, print_output=False):
    sys.stdout.flush()
    if async:
        if is_debug():
            print_output = True
        outfile = subprocess.PIPE if print_output else None
        t = ShellCommandThread(cmd, outfile=outfile)
        t.start()
        TMP_THREADS.append(t)
        return t
    else:
        return run(cmd)


def start_proxy_for_service(service_name,
                            port,
                            default_backend_port,
                            update_listener,
                            quiet=False,
                            params={}):
    # check if we have a custom backend configured
    custom_backend_url = os.environ.get('%s_BACKEND' % service_name.upper())
    backend_url = custom_backend_url or (
        'http://%s:%s' % (DEFAULT_BACKEND_HOST, default_backend_port))
    return start_proxy(port,
                       backend_url=backend_url,
                       update_listener=update_listener,
Пример #34
0
def start_moto_server_separate(key, port, name=None, backend_port=None, asynchronous=False):
    moto_server_cmd = '%s/bin/moto_server' % LOCALSTACK_VENV_FOLDER
    if not os.path.exists(moto_server_cmd):
        moto_server_cmd = run('which moto_server').strip()
    cmd = 'VALIDATE_LAMBDA_S3=0 %s %s -p %s -H %s' % (moto_server_cmd, key, backend_port or port, constants.BIND_HOST)
    return do_run(cmd, asynchronous)
Пример #35
0
def install_elasticsearch(version=None):
    version = get_elasticsearch_install_version(version)
    install_dir = get_elasticsearch_install_dir(version)
    installed_executable = os.path.join(install_dir, 'bin', 'elasticsearch')
    if not os.path.exists(installed_executable):
        log_install_msg('Elasticsearch (%s)' % version)
        es_url = ELASTICSEARCH_URLS.get(version)
        if not es_url:
            raise Exception(
                'Unable to find download URL for Elasticsearch version "%s"' %
                version)
        install_dir_parent = os.path.dirname(install_dir)
        mkdir(install_dir_parent)
        # download and extract archive
        tmp_archive = os.path.join(config.TMP_FOLDER,
                                   'localstack.%s' % os.path.basename(es_url))
        download_and_extract_with_retry(es_url, tmp_archive,
                                        install_dir_parent)
        elasticsearch_dir = glob.glob(
            os.path.join(install_dir_parent, 'elasticsearch*'))
        if not elasticsearch_dir:
            raise Exception('Unable to find Elasticsearch folder in %s' %
                            install_dir_parent)
        shutil.move(elasticsearch_dir[0], install_dir)

        for dir_name in ('data', 'logs', 'modules', 'plugins',
                         'config/scripts'):
            dir_path = os.path.join(install_dir, dir_name)
            mkdir(dir_path)
            chmod_r(dir_path, 0o777)

        # install default plugins
        for plugin in ELASTICSEARCH_PLUGIN_LIST:
            if is_alpine():
                # https://github.com/pires/docker-elasticsearch/issues/56
                os.environ['ES_TMPDIR'] = '/tmp'
            plugin_binary = os.path.join(install_dir, 'bin',
                                         'elasticsearch-plugin')
            plugin_dir = os.path.join(install_dir, 'plugins', plugin)
            if not os.path.exists(plugin_dir):
                LOG.info('Installing Elasticsearch plugin %s' % (plugin))
                run('%s install -b %s' % (plugin_binary, plugin))

    # delete some plugins to free up space
    for plugin in ELASTICSEARCH_DELETE_MODULES:
        module_dir = os.path.join(install_dir, 'modules', plugin)
        rm_rf(module_dir)

    # disable x-pack-ml plugin (not working on Alpine)
    xpack_dir = os.path.join(install_dir, 'modules', 'x-pack-ml', 'platform')
    rm_rf(xpack_dir)

    # patch JVM options file - replace hardcoded heap size settings
    jvm_options_file = os.path.join(install_dir, 'config', 'jvm.options')
    if os.path.exists(jvm_options_file):
        jvm_options = load_file(jvm_options_file)
        jvm_options_replaced = re.sub(r'(^-Xm[sx][a-zA-Z0-9\.]+$)',
                                      r'# \1',
                                      jvm_options,
                                      flags=re.MULTILINE)
        if jvm_options != jvm_options_replaced:
            save_file(jvm_options_file, jvm_options_replaced)
Пример #36
0
def delete_all_elasticsearch_data():
    """ This function drops ALL data in the local Elasticsearch data folder. Use with caution! """
    data_dir = os.path.join(LOCALSTACK_ROOT_FOLDER, 'infra', 'elasticsearch',
                            'data', 'elasticsearch', 'nodes')
    run('rm -rf "%s"' % data_dir)
Пример #37
0
    def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
        """
        Prepares a persistent docker container for a specific function.
        :param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
        :param func_arn: The ARN of the lambda function.
        :param env_vars: The environment variables for the lambda.
        :param lambda_cwd: The local directory containing the code for the lambda function.
        :return: ContainerInfo class containing the container name and default entry point.
        """
        with self.docker_container_lock:
            # Get the container name and id.
            container_name = self.get_container_name(func_arn)

            LOG.debug('Priming docker container: %s' % container_name)

            status = self.get_docker_container_status(func_arn)
            # Container is not running or doesn't exist.
            if status < 1:
                # Make sure the container does not exist in any form/state.
                self.destroy_docker_container(func_arn)

                env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])

                # Create and start the container
                LOG.debug('Creating container: %s' % container_name)
                cmd = (
                    'docker create'
                    ' --name "%s"'
                    ' --entrypoint /bin/bash'  # Load bash when it starts.
                    ' --interactive'  # Keeps the container running bash.
                    ' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
                    ' -e HOSTNAME="$HOSTNAME"'
                    ' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
                    '  %s'  # env_vars
                    ' lambci/lambda:%s'
                ) % (container_name, env_vars_str, runtime)
                LOG.debug(cmd)
                run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)

                LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
                cmd = (
                    'docker cp'
                    ' "%s/." "%s:/var/task"'
                ) % (lambda_cwd, container_name)
                LOG.debug(cmd)
                run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)

                LOG.debug('Starting container: %s' % container_name)
                cmd = 'docker start %s' % (container_name)
                LOG.debug(cmd)
                run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
                # give the container some time to start up
                time.sleep(1)

            # Get the entry point for the image.
            LOG.debug('Getting the entrypoint for image: lambci/lambda:%s' % runtime)
            cmd = (
                'docker image inspect'
                ' --format="{{ .ContainerConfig.Entrypoint }}"'
                ' lambci/lambda:%s'
            ) % (runtime)

            LOG.debug(cmd)
            run_result = run(cmd, async=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)

            entry_point = run_result.strip('[]\n\r ')

            LOG.debug('Using entrypoint "%s" for container "%s".' % (entry_point, container_name))
            return ContainerInfo(container_name, entry_point)
Пример #38
0
def set_function_code(code, lambda_name):
    def generic_handler(event, context):
        raise Exception((
            'Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js and .NET Core Lambdas currently require LAMBDA_EXECUTOR=docker'
        ) % lambda_name)

    lambda_cwd = None
    arn = func_arn(lambda_name)
    lambda_details = arn_to_lambda[arn]
    runtime = lambda_details.runtime
    handler_name = lambda_details.handler
    lambda_environment = lambda_details.envvars
    if not handler_name:
        handler_name = LAMBDA_DEFAULT_HANDLER

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(arn)
    zip_file_content = None
    is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL

    if is_local_mount:
        # Mount or use a local folder lambda executors can reference
        # WARNING: this means we're pointing lambda_cwd to a local path in the user's
        # file system! We must ensure that there is no data loss (i.e., we must *not* add
        # this folder to TMP_FILES or similar).
        lambda_cwd = code['S3Key']
    else:
        # Save the zip file to a temporary file that the lambda executors can reference
        zip_file_content = get_zip_bytes(code)
        if isinstance(zip_file_content, Response):
            return zip_file_content
        code_sha_256 = base64.standard_b64encode(
            hashlib.sha256(zip_file_content).digest())
        lambda_details.get_version('$LATEST')['CodeSize'] = len(
            zip_file_content)
        lambda_details.get_version(
            '$LATEST')['CodeSha256'] = code_sha_256.decode('utf-8')
        tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
        mkdir(tmp_dir)
        tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
        save_file(tmp_file, zip_file_content)
        TMP_FILES.append(tmp_dir)
        lambda_cwd = tmp_dir

    # Set the appropriate lambda handler.
    lambda_handler = generic_handler
    if runtime == LAMBDA_RUNTIME_JAVA8:
        # The Lambda executors for Docker subclass LambdaExecutorContainers,
        # which runs Lambda in Docker by passing all *.jar files in the function
        # working directory as part of the classpath. Because of this, we need to
        # save the zip_file_content as a .jar here.
        if is_jar_archive(zip_file_content):
            jar_tmp_file = '{working_dir}/{file_name}'.format(
                working_dir=tmp_dir, file_name=LAMBDA_JAR_FILE_NAME)
            save_file(jar_tmp_file, zip_file_content)

        lambda_handler = get_java_handler(zip_file_content, handler_name,
                                          tmp_file)
        if isinstance(lambda_handler, Response):
            return lambda_handler
    else:
        handler_file = get_handler_file_from_name(handler_name,
                                                  runtime=runtime)
        handler_function = get_handler_function_from_name(handler_name,
                                                          runtime=runtime)

        if not is_local_mount:
            # Lambda code must be uploaded in Zip format
            if not is_zip_file(zip_file_content):
                raise Exception(
                    'Uploaded Lambda code for runtime ({}) is not in Zip format'
                    .format(runtime))
            unzip(tmp_file, lambda_cwd)

        main_file = '%s/%s' % (lambda_cwd, handler_file)
        if os.path.isfile(main_file):
            # make sure the file is actually readable, then read contents
            ensure_readable(main_file)
            with open(main_file, 'rb') as file_obj:
                zip_file_content = file_obj.read()
        else:
            # Raise an error if (1) this is not a local mount lambda, or (2) we're
            # running Lambdas locally (not in Docker), or (3) we're using remote Docker.
            # -> We do *not* want to raise an error if we're using local mount in non-remote Docker
            if not is_local_mount or not use_docker(
            ) or config.LAMBDA_REMOTE_DOCKER:
                file_list = run('ls -la %s' % lambda_cwd)
                LOG.debug('Lambda archive content:\n%s' % file_list)
                return error_response(
                    'Unable to find handler script in Lambda archive.',
                    400,
                    error_type='ValidationError')

        if runtime.startswith('python') and not use_docker():
            try:
                lambda_handler = exec_lambda_code(
                    zip_file_content,
                    handler_function=handler_function,
                    lambda_cwd=lambda_cwd,
                    lambda_env=lambda_environment)
            except Exception as e:
                raise Exception(
                    'Unable to get handler function from lambda code.', e)

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
Пример #39
0
def is_alpine():
    try:
        run('cat /etc/issue | grep Alpine', print_error=False)
        return True
    except Exception:
        return False
Пример #40
0
def set_function_code(code, lambda_name):

    def generic_handler(event, context):
        raise Exception(('Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js and .NET Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)

    lambda_cwd = None
    arn = func_arn(lambda_name)
    runtime = arn_to_lambda[arn].runtime
    handler_name = arn_to_lambda.get(arn).handler
    lambda_environment = arn_to_lambda.get(arn).envvars
    if not handler_name:
        handler_name = LAMBDA_DEFAULT_HANDLER

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(arn)

    # Save the zip file to a temporary file that the lambda executors can reference.
    zip_file_content = get_zip_bytes(code)
    if isinstance(zip_file_content, Response):
        return zip_file_content
    tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
    mkdir(tmp_dir)
    tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
    save_file(tmp_file, zip_file_content)
    TMP_FILES.append(tmp_dir)
    lambda_cwd = tmp_dir

    # Set the appropriate lambda handler.
    lambda_handler = generic_handler
    if runtime == LAMBDA_RUNTIME_JAVA8:
        # The Lambda executors for Docker subclass LambdaExecutorContainers,
        # which runs Lambda in Docker by passing all *.jar files in the function
        # working directory as part of the classpath. Because of this, we need to
        # save the zip_file_content as a .jar here.
        if is_jar_archive(zip_file_content):
            jar_tmp_file = '{working_dir}/{file_name}'.format(
                working_dir=tmp_dir, file_name=LAMBDA_JAR_FILE_NAME)
            save_file(jar_tmp_file, zip_file_content)

        lambda_handler = get_java_handler(zip_file_content, handler_name, tmp_file)
        if isinstance(lambda_handler, Response):
            return lambda_handler
    else:
        handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
        handler_function = get_handler_function_from_name(handler_name, runtime=runtime)

        # Lambda code must be uploaded in the Zip format.
        if not is_zip_file(zip_file_content):
            raise Exception(
                'Uploaded Lambda code for runtime ({}) is not in Zip format'.format(runtime))

        unzip(tmp_file, tmp_dir)
        main_file = '%s/%s' % (tmp_dir, handler_file)
        if os.path.isfile(main_file):
            # make sure the file is actually readable, then read contents
            ensure_readable(main_file)
            with open(main_file, 'rb') as file_obj:
                zip_file_content = file_obj.read()
        else:
            file_list = run('ls -la %s' % tmp_dir)
            LOG.debug('Lambda archive content:\n%s' % file_list)
            return error_response(
                'Unable to find handler script in Lambda archive.', 400,
                error_type='ValidationError')

        if runtime.startswith('python') and not use_docker():
            try:
                lambda_handler = exec_lambda_code(
                    zip_file_content,
                    handler_function=handler_function,
                    lambda_cwd=lambda_cwd,
                    lambda_env=lambda_environment)
            except Exception as e:
                raise Exception('Unable to get handler function from lambda code.', e)

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
Пример #41
0
def install_kinesalite():
    target_dir = '%s/kinesalite' % INSTALL_DIR_NPM
    if not os.path.exists(target_dir):
        log_install_msg('Kinesis')
        run('cd "%s" && npm install' % ROOT_PATH)
Пример #42
0
def create_zip_file_cli(source_path, base_dir, zip_file):
    # Using the native zip command can be an order of magnitude faster on Travis-CI
    source = "*" if source_path == base_dir else os.path.basename(source_path)
    command = "cd %s; zip -r %s %s" % (base_dir, zip_file, source)
    run(command)
Пример #43
0
def set_function_code(code, lambda_name):

    def generic_handler(event, context):
        raise Exception(('Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js and .NET Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)

    lambda_handler = generic_handler
    lambda_cwd = None
    arn = func_arn(lambda_name)
    runtime = arn_to_lambda[arn].runtime
    handler_name = arn_to_lambda.get(arn).handler
    lambda_environment = arn_to_lambda.get(arn).envvars
    if not handler_name:
        handler_name = LAMBDA_DEFAULT_HANDLER
    handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
    handler_function = get_handler_function_from_name(handler_name, runtime=runtime)

    # Stop/remove any containers that this arn uses.
    LAMBDA_EXECUTOR.cleanup(arn)

    if 'S3Bucket' in code:
        s3_client = aws_stack.connect_to_service('s3')
        bytes_io = BytesIO()
        try:
            s3_client.download_fileobj(code['S3Bucket'], code['S3Key'], bytes_io)
            zip_file_content = bytes_io.getvalue()
        except Exception as e:
            return error_response('Unable to fetch Lambda archive from S3: %s' % e, 404)
    elif 'ZipFile' in code:
        zip_file_content = code['ZipFile']
        zip_file_content = base64.b64decode(zip_file_content)
    else:
        return error_response('No valid Lambda archive specified.', 400)

    # save tmp file
    tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
    mkdir(tmp_dir)
    tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
    save_file(tmp_file, zip_file_content)
    TMP_FILES.append(tmp_dir)
    lambda_cwd = tmp_dir

    # check if this is a ZIP file
    is_zip = is_zip_file(zip_file_content)
    if is_zip:
        unzip(tmp_file, tmp_dir)
        main_file = '%s/%s' % (tmp_dir, handler_file)
        if not os.path.isfile(main_file):
            # check if this is a zip file that contains a single JAR file
            jar_files = glob.glob('%s/*.jar' % tmp_dir)
            if len(jar_files) == 1:
                main_file = jar_files[0]
        if os.path.isfile(main_file):
            # make sure the file is actually readable, then read contents
            ensure_readable(main_file)
            with open(main_file, 'rb') as file_obj:
                zip_file_content = file_obj.read()
        else:
            file_list = run('ls -la %s' % tmp_dir)
            LOG.debug('Lambda archive content:\n%s' % file_list)
            return error_response('Unable to find handler script in Lambda archive.', 400, error_type='ValidationError')

    # it could be a JAR file (regardless of whether wrapped in a ZIP file or not)
    is_jar = is_jar_archive(zip_file_content)
    if is_jar:

        def execute(event, context):
            result, log_output = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(event, context,
                handler=arn_to_lambda[arn].handler, main_file=main_file)
            return result

        lambda_handler = execute

    elif runtime.startswith('python') and not use_docker():
        try:
            lambda_handler = exec_lambda_code(zip_file_content,
                handler_function=handler_function, lambda_cwd=lambda_cwd,
                lambda_env=lambda_environment)
        except Exception as e:
            raise Exception('Unable to get handler function from lambda code.', e)

    if not is_zip and not is_jar:
        raise Exception('Uploaded Lambda code is neither a ZIP nor JAR file.')

    add_function_mapping(lambda_name, lambda_handler, lambda_cwd)

    return {'FunctionName': lambda_name}
Пример #44
0
def copy_dir(source, target):
    if is_alpine():
        # Using the native command can be an order of magnitude faster on Travis-CI
        return run("cp -r %s %s" % (source, target))
    shutil.copytree(source, target)
Пример #45
0
def is_alpine():
    try:
        run('cat /etc/issue | grep Alpine', print_error=False)
        return True
    except Exception:
        return False
Пример #46
0
def rm_dir(dir):
    if is_alpine():
        # Using the native command can be an order of magnitude faster on Travis-CI
        return run("rm -r %s" % dir)
    shutil.rmtree(dir)
Пример #47
0
def install_kinesalite():
    target_dir = '%s/kinesalite' % INSTALL_DIR_NPM
    if not os.path.exists(target_dir):
        LOGGER.info('Downloading and installing local Kinesis server. This may take some time.')
        run('cd "%s" && npm install' % ROOT_PATH)
Пример #48
0
def set_function_code(code, lambda_name):
    def generic_handler(event, context):
        raise Exception((
            'Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js Lambdas currently require LAMBDA_EXECUTOR=docker'
        ) % lambda_name)

    lambda_handler = generic_handler
    lambda_cwd = None
    arn = func_arn(lambda_name)
    runtime = arn_to_lambda[arn].runtime
    handler_name = arn_to_lambda.get(arn).handler
    lambda_environment = arn_to_lambda.get(arn).envvars
    if not handler_name:
        handler_name = LAMBDA_DEFAULT_HANDLER
    handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
    handler_function = get_handler_function_from_name(handler_name,
                                                      runtime=runtime)

    if 'S3Bucket' in code:
        s3_client = aws_stack.connect_to_service('s3')
        bytes_io = BytesIO()
        try:
            s3_client.download_fileobj(code['S3Bucket'], code['S3Key'],
                                       bytes_io)
            zip_file_content = bytes_io.getvalue()
        except Exception as e:
            return error_response(
                'Unable to fetch Lambda archive from S3: %s' % e, 404)
    elif 'ZipFile' in code:
        zip_file_content = code['ZipFile']
        zip_file_content = base64.b64decode(zip_file_content)
    else:
        return error_response('No valid Lambda archive specified.', 400)

    # save tmp file
    tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
    run('mkdir -p %s' % tmp_dir)
    tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
    save_file(tmp_file, zip_file_content)
    TMP_FILES.append(tmp_dir)
    lambda_cwd = tmp_dir

    # check if this is a ZIP file
    is_zip = is_zip_file(zip_file_content)
    if is_zip:
        unzip(tmp_file, tmp_dir)
        main_file = '%s/%s' % (tmp_dir, handler_file)
        if not os.path.isfile(main_file):
            # check if this is a zip file that contains a single JAR file
            jar_files = glob.glob('%s/*.jar' % tmp_dir)
            if len(jar_files) == 1:
                main_file = jar_files[0]
        if os.path.isfile(main_file):
            with open(main_file, 'rb') as file_obj:
                zip_file_content = file_obj.read()
        else:
            file_list = run('ls -la %s' % tmp_dir)
            LOG.debug('Lambda archive content:\n%s' % file_list)
            return error_response(
                'Unable to find handler script in Lambda archive.',
                400,
                error_type='ValidationError')

    # it could be a JAR file (regardless of whether wrapped in a ZIP file or not)
    is_jar = is_jar_archive(zip_file_content)
    if is_jar:

        def execute(event, context):
            event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
            save_file(event_file, json.dumps(event))
            TMP_FILES.append(event_file)
            class_name = arn_to_lambda[arn].handler.split('::')[0]
            classpath = '%s:%s' % (LAMBDA_EXECUTOR_JAR, main_file)
            cmd = 'java -cp %s %s %s %s' % (classpath, LAMBDA_EXECUTOR_CLASS,
                                            class_name, event_file)
            async = False
            # flip async flag depending on origin
            if 'Records' in event:
                # TODO: add more event supporting async lambda execution
                if 'Sns' in event['Records'][0]:
                    async = True
            result, log_output = run_lambda_executor(cmd, async=async)
            LOG.info('Lambda output: %s' % log_output.replace('\n', '\n> '))
            return result
Пример #49
0
def ensure_webapp_installed():
    web_dir = os.path.realpath(os.path.join(os.path.dirname(__file__), 'web'))
    node_modules_dir = os.path.join(web_dir, 'node_modules', 'jquery')
    if not os.path.exists(node_modules_dir):
        print('Initializing installation of Web application (this could take long time, please be patient)')
        common.run('cd "%s"; npm install' % web_dir)
Пример #50
0
    install.install_elasticsearch()
    backend_port = DEFAULT_PORT_ELASTICSEARCH_BACKEND
    es_data_dir = '%s/infra/elasticsearch/data' % (ROOT_PATH)
    if DATA_DIR:
        es_data_dir = '%s/elasticsearch' % DATA_DIR
    # Elasticsearch 5.x cannot be bound to 0.0.0.0 in some Docker environments,
    # hence we use the default bind address 127.0.0.0 and put a proxy in front of it
    cmd = ((
        'ES_JAVA_OPTS=\"$ES_JAVA_OPTS -Xms200m -Xmx500m\" %s/infra/elasticsearch/bin/elasticsearch '
        +
        '-E http.port=%s -E http.publish_port=%s -E http.compression=false -E path.data=%s'
    ) % (ROOT_PATH, backend_port, backend_port, es_data_dir))
    print("Starting local Elasticsearch (%s port %s)..." %
          (get_service_protocol(), port))
    if delete_data:
        run('rm -rf %s' % es_data_dir)
    # fix permissions
    run('chmod -R 777 %s/infra/elasticsearch' % ROOT_PATH)
    run('mkdir -p "%s"; chmod -R 777 "%s"' % (es_data_dir, es_data_dir))
    # start proxy and ES process
    start_proxy_for_service('elasticsearch',
                            port,
                            backend_port,
                            update_listener,
                            quiet=True,
                            params={'protocol_version': 'HTTP/1.0'})
    if is_root():
        cmd = "su -c '%s' localstack" % cmd
    thread = do_run(cmd, async)
    return thread