示例#1
0
def _create_kinesis_stream_trigger_from_meta(lambda_name, lambda_arn,
                                             role_name, trigger_meta):
    required_parameters = ['target_stream', 'batch_size', 'starting_position']
    validate_params(lambda_name, trigger_meta, required_parameters)

    stream_name = trigger_meta['target_stream']

    stream = CONN.kinesis().get_stream(stream_name)
    stream_arn = stream['StreamDescription']['StreamARN']
    stream_status = stream['StreamDescription']['StreamStatus']
    # additional waiting for stream
    if stream_status != 'ACTIVE':
        _LOG.debug('Kinesis stream %s is not in active state,'
                   ' waiting for activation...', stream_name)
        time.sleep(120)

    # TODO policy should be moved to meta
    policy_name = '{0}KinesisTo{1}Lambda'.format(stream_name, lambda_name)
    policy_document = {
        "Statement": [
            {
                "Effect": "Allow",
                "Action": [
                    "lambda:InvokeFunction"
                ],
                "Resource": [
                    lambda_arn
                ]
            },
            {
                "Action": [
                    "kinesis:DescribeStreams",
                    "kinesis:DescribeStream",
                    "kinesis:ListStreams",
                    "kinesis:GetShardIterator",
                    "Kinesis:GetRecords"
                ],
                "Effect": "Allow",
                "Resource": stream_arn
            }
        ],
        "Version": "2012-10-17"
    }
    CONN.iam().attach_inline_policy(role_name=role_name,
                                    policy_name=policy_name,
                                    policy_document=policy_document)
    _LOG.debug('Inline policy %s is attached to role %s',
               policy_name, role_name)
    _LOG.debug('Waiting for activation policy %s...', policy_name)
    time.sleep(10)

    _add_kinesis_event_source(lambda_arn, stream_arn, trigger_meta)
    _LOG.info('Lambda %s subscribed to kinesis stream %s', lambda_name,
              stream_name)
def _enable_autoscaling(autoscaling_config, name):
    targets = []
    policies = []
    for item in autoscaling_config:
        autoscaling_required_parameters = [
            'resource_name', 'dimension', 'min_capacity', 'max_capacity',
            'role_name'
        ]
        validate_params(name, item, autoscaling_required_parameters)
        role_name = item['role_name']
        role_arn = CONN.iam().check_if_role_exists(role_name)
        if role_arn:
            dimension = item['dimension']
            resource_id, sc_targets = register_autoscaling_target(
                dimension, item, role_arn, name)
            targets.extend(sc_targets)
            _LOG.debug('Autoscaling %s is set up for %s', dimension,
                       resource_id)
            autoscaling_policy = item.get('config')
            if autoscaling_policy:
                policy_name = autoscaling_policy['policy_name']
                _LOG.debug('Going to set up autoscaling with '
                           'policy %s', policy_name)
                sc_policies = put_autoscaling_policy(autoscaling_policy,
                                                     dimension, policy_name,
                                                     resource_id)
                policies.append(sc_policies)
                _LOG.debug('Policy %s is set up', policy_name)
        else:
            _LOG.warn('Role %s is not found, skip autoscaling config',
                      role_name)
    return {'targets': targets, 'policies': policies}
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
    limitations under the License.
"""
from botocore.exceptions import ClientError

from syndicate.commons.log_helper import get_logger
from syndicate.connection.helper import retry
from syndicate.core import CONFIG, CONN
from syndicate.core.helper import create_pool, prettify_json, unpack_kwargs
from syndicate.core.resources.helper import (build_description_obj,
                                             resolve_dynamic_identifier)

_IAM_CONN = CONN.iam()

_LOG = get_logger('syndicate.core.resources.iam_resource')


def remove_policies(args):
    create_pool(_remove_policy, args)


@unpack_kwargs
def _remove_policy(arn, config):
    policy_name = config['resource_name']
    try:
        _IAM_CONN.remove_policy(arn)
        _LOG.info('IAM policy %s was removed.', policy_name)
    except ClientError as e:
示例#4
0
def _create_lambda_from_meta(name, meta):
    _LOG.debug('Creating lambda %s', name)
    req_params = ['iam_role_name', 'runtime', 'memory', 'timeout', 'func_name']
    # Lambda configuration
    validate_params(name, meta, req_params)

    key = meta[S3_PATH_NAME]
    if not _S3_CONN.is_file_exists(CONFIG.deploy_target_bucket, key):
        raise AssertionError(
            'Error while creating lambda: %s;'
            'Deployment package %s does not exist '
            'in %s bucket', name, key, CONFIG.deploy_target_bucket)

    lambda_def = _LAMBDA_CONN.get_function(name)
    if lambda_def:
        _LOG.warn('%s lambda exists.', name)
        return describe_lambda(name, meta, lambda_def)

    role_name = meta['iam_role_name']
    role_arn = CONN.iam().check_if_role_exists(role_name)
    if not role_arn:
        raise AssertionError('Role {} does not exist; '
                             'Lambda {} failed to be configured.'.format(
                                 role_name, name))

    dl_type = meta.get('dl_resource_type')
    if dl_type:
        dl_type = dl_type.lower()
    dl_name = meta.get('dl_resource_name')

    dl_target_arn = 'arn:aws:{0}:{1}:{2}:{3}'.format(
        dl_type, CONFIG.region, CONFIG.account_id,
        dl_name) if dl_type and dl_name else None

    publish_version = meta.get('publish_version', False)
    lambda_layers_arns = []
    layer_meta = meta.get('layers')
    if layer_meta:
        for layer_name in layer_meta:
            layer_arn = _LAMBDA_CONN.get_lambda_layer_arn(layer_name)
            if not layer_arn:
                raise AssertionError(
                    'Could not link lambda layer {} to lambda {} '
                    'due to layer absence!'.format(layer_name, name))
            lambda_layers_arns.append(layer_arn)

    _LAMBDA_CONN.create_lambda(
        lambda_name=name,
        func_name=meta['func_name'],
        role=role_arn,
        runtime=meta['runtime'].lower(),
        memory=meta['memory'],
        timeout=meta['timeout'],
        s3_bucket=CONFIG.deploy_target_bucket,
        s3_key=key,
        env_vars=meta.get('env_variables'),
        vpc_sub_nets=meta.get('subnet_ids'),
        vpc_security_group=meta.get('security_group_ids'),
        dl_target_arn=dl_target_arn,
        tracing_mode=meta.get('tracing_mode'),
        publish_version=publish_version,
        layers=lambda_layers_arns)
    _LOG.debug('Lambda created %s', name)
    # AWS sometimes returns None after function creation, needs for stability
    time.sleep(10)
    lambda_def = __describe_lambda_by_version(
        name) if publish_version else _LAMBDA_CONN.get_function(name)
    version = lambda_def['Configuration']['Version']
    _setup_function_concurrency(name=name, meta=meta)

    # enabling aliases
    # aliases can be enabled only and for $LATEST
    alias = meta.get('alias')
    if alias:
        _LOG.debug('Creating alias')
        _LOG.debug(
            _LAMBDA_CONN.create_alias(function_name=name,
                                      name=alias,
                                      version=version))

    arn = build_lambda_arn_with_alias(lambda_def, alias) \
        if publish_version or alias else \
        lambda_def['Configuration']['FunctionArn']
    _LOG.debug('arn value: ' + str(arn))

    if meta.get('event_sources'):
        for trigger_meta in meta.get('event_sources'):
            trigger_type = trigger_meta['resource_type']
            func = CREATE_TRIGGER[trigger_type]
            func(name, arn, role_name, trigger_meta)
    # concurrency configuration
    _manage_provisioned_concurrency_configuration(function_name=name,
                                                  meta=meta,
                                                  lambda_def=lambda_def)
    return describe_lambda(name, meta, lambda_def)
示例#5
0
def _create_lambda_from_meta(name, meta):
    req_params = ['iam_role_name', 'runtime', 'memory', 'timeout', 'func_name']

    # Lambda configuration
    validate_params(name, meta, req_params)

    key = meta[S3_PATH_NAME]
    if not _S3_CONN.is_file_exists(CONFIG.deploy_target_bucket, key):
        raise AssertionError('Deployment package %s does not exist '
                             'in %s bucket', key, CONFIG.deploy_target_bucket)

    response = _LAMBDA_CONN.get_function(name)
    if response:
        _LOG.warn('%s lambda exists.', name)
        return describe_lambda(name, meta, response)

    role_name = meta['iam_role_name']
    role_arn = CONN.iam().check_if_role_exists(role_name)
    if not role_arn:
        raise AssertionError('Role {0} does not exist.'.format(role_name))

    dl_type = meta.get('dl_resource_type')
    if dl_type:
        dl_type = dl_type.lower()
    dl_name = meta.get('dl_resource_name')

    dl_target_arn = 'arn:aws:{0}:{1}:{2}:{3}'.format(dl_type,
                                                     CONFIG.region,
                                                     CONFIG.account_id,
                                                     dl_name) if dl_type and dl_name else None

    publish_version = meta.get('publish_version', False)

    _LAMBDA_CONN.create_lambda(
        lambda_name=name,
        func_name=meta['func_name'],
        role=role_arn,
        runtime=meta['runtime'].lower(),
        memory=meta['memory'],
        timeout=meta['timeout'],
        s3_bucket=CONFIG.deploy_target_bucket,
        s3_key=key,
        env_vars=meta.get('env_variables'),
        vpc_sub_nets=meta.get('subnet_ids'),
        vpc_security_group=meta.get('security_group_ids'),
        dl_target_arn=dl_target_arn,
        tracing_mode=meta.get('tracing_mode'),
        publish_version=publish_version
    )

    # AWS sometimes returns None after function creation, needs for stability
    time.sleep(10)
    response = __describe_lambda_by_version(
        name) if publish_version else _LAMBDA_CONN.get_function(name)
    version = response['Configuration']['Version']
    con_exec = meta.get('concurrent_executions')
    if con_exec:
        _LOG.debug('Going to set up concurrency executions')
        unresolved_exec = _LAMBDA_CONN.get_unresolved_concurrent_executions()
        if con_exec <= unresolved_exec:
            _LAMBDA_CONN.put_function_concurrency(
                function_name=name,
                concurrent_executions=con_exec)
            _LOG.debug('Concurrency is enabled for %s lambda', name)
        else:
            _LOG.warn(
                'Account does not have any unresolved executions.'
                ' Current size - %s', unresolved_exec)

    # enabling aliases
    # aliases can be enabled only and for $LATEST
    alias = meta.get('alias')
    if alias:
        _LOG.debug('Creating alias')
        _LOG.debug(_LAMBDA_CONN.create_alias(function_name=name,
                                             name=alias, version=version))

    arn = build_lambda_arn_with_alias(response,
                                      alias) if publish_version or alias else \
        response['Configuration']['FunctionArn']
    _LOG.debug('arn value: ' + str(arn))

    if meta.get('event_sources'):
        for trigger_meta in meta.get('event_sources'):
            trigger_type = trigger_meta['resource_type']
            func = CREATE_TRIGGER[trigger_type]
            func(name, arn, role_name, trigger_meta)
    _LOG.info('Created lambda %s.', name)
    return describe_lambda(name, meta, response)