Ejemplo n.º 1
0
def _create_cloud_watch_rule_from_meta(name, meta, region):
    # validation depends on rule type
    required_parameters = ['rule_type']
    rule_type = meta.get('rule_type')
    if rule_type:
        if rule_type == 'schedule':
            required_parameters.append('expression')
    validate_params(name, meta, required_parameters)

    event_buses = meta.get('event_bus_accounts')
    response = CONN.cw_events(region).get_rule(name)
    if response:
        _LOG.warn('%s rule exists in %s.', name, region)
        return describe_rule(name=name, meta=meta, region=region,
                             response=response)
    try:
        func = RULE_TYPES[rule_type]
        func(name, meta, CONN.cw_events(region))
        if event_buses:
            time.sleep(5)
            _attach_tenant_rule_targets(name, region, event_buses)
        _LOG.info('Created cloud watch rule %s in %s.', name, region)
        response = CONN.cw_events(region).get_rule(name)
        time.sleep(5)
        return describe_rule(name=name, meta=meta, region=region,
                             response=response)
    except KeyError:
        raise AssertionError(
            'Invalid rule type: {0} for resource {1}. '
            'Please, change rule type with existing: '
            'schedule|ec2|api_call.'.format(rule_type, name))
def _create_api_gateway_from_meta(name, meta):
    """ Create API Gateway with all specified meta.

    :type name: str
    :type meta: dict
    """
    required_parameters = ['resources', 'deploy_stage']
    validate_params(name, meta, required_parameters)

    api_resources = meta['resources']

    api_id = _API_GATEWAY_CONN.create_rest_api(name)['id']
    if api_resources:
        args = __prepare_api_resources_args(api_id, api_resources)
        create_pool(_create_resource_from_metadata, args, 1)
    else:
        _LOG.info('There is no resources in %s API Gateway description.', name)
    # add headers
    # waiter b4 customization
    time.sleep(10)
    _LOG.debug('Customizing API Gateway responses...')
    # _customize_gateway_responses call is commented due to botocore
    # InternalFailure while performing the call. will be fixed later
    #_customize_gateway_responses(api_id)
    # deploy api
    __deploy_api_gateway(api_id, meta, api_resources)
    return describe_api_resources(api_id=api_id, meta=meta, name=name)
Ejemplo n.º 3
0
 def _create_platform_application_from_meta(self, name, meta, region):
     required_parameters = ['platform', 'attributes']
     validate_params(name, meta, required_parameters)
     arn = self.connection_provider.sns(region).get_platform_application(
         name)
     if arn:
         _LOG.warn(
             '{0} SNS platform application exists in region {1}.'.format(
                 name, region))
         return self.describe_sns_application(name, meta, region, arn)
     platform = meta['platform']
     atrbts = meta['attributes']
     try:
         arn = self.connection_provider.sns(
             region).create_platform_application(
             name=name,
             platform=platform,
             attributes=atrbts)
     except ClientError as e:
         exception_type = e.response['Error']['Code']
         if exception_type == 'InvalidParameterException':
             _LOG.warn('SNS application %s is already exist.', name)
         else:
             raise e
     _LOG.info('SNS platform application %s in region %s has been created.',
               name, region)
     return self.describe_sns_application(name, meta, region, arn)
Ejemplo n.º 4
0
def _enable_autoscaling(autoscaling_config, name):
    targets = []
    policies = []
    for item in autoscaling_config:
        autoscaling_required_parameters = [
            'resource_name', 'dimension', 'min_capacity', 'max_capacity',
            'role_name'
        ]
        validate_params(name, item, autoscaling_required_parameters)
        role_name = item['role_name']
        role_arn = CONN.iam().check_if_role_exists(role_name)
        if role_arn:
            dimension = item['dimension']
            resource_id, sc_targets = register_autoscaling_target(
                dimension, item, role_arn, name)
            targets.extend(sc_targets)
            _LOG.debug('Autoscaling %s is set up for %s', dimension,
                       resource_id)
            autoscaling_policy = item.get('config')
            if autoscaling_policy:
                policy_name = autoscaling_policy['policy_name']
                _LOG.debug('Going to set up autoscaling with '
                           'policy %s', policy_name)
                sc_policies = put_autoscaling_policy(autoscaling_policy,
                                                     dimension, policy_name,
                                                     resource_id)
                policies.append(sc_policies)
                _LOG.debug('Policy %s is set up', policy_name)
        else:
            _LOG.warn('Role %s is not found, skip autoscaling config',
                      role_name)
    return {'targets': targets, 'policies': policies}
Ejemplo n.º 5
0
    def _create_api_gateway_from_meta(self, name, meta):
        """ Create API Gateway with all specified meta.
    
        :type name: str
        :type meta: dict
        """
        required_parameters = ['resources', 'deploy_stage']
        validate_params(name, meta, required_parameters)

        api_resources = meta['resources']

        api_item = self.connection.create_rest_api(
            api_name=name,
            binary_media_types=meta.get('binary_media_types'))
        api_id = api_item['id']

        # deploy authorizers
        authorizers = meta.get('authorizers', {})
        for key, val in authorizers.items():
            lambda_version = val.get('lambda_version')
            lambda_name = val.get('lambda_name')
            lambda_alias = val.get('lambda_alias')
            lambda_arn = self.lambda_res. \
                resolve_lambda_arn_by_version_and_alias(lambda_name,
                                                        lambda_version,
                                                        lambda_alias)
            uri = 'arn:aws:apigateway:{0}:lambda:path/2015-03-31/' \
                  'functions/{1}/invocations'.format(self.region, lambda_arn)
            self.connection.create_authorizer(api_id=api_id, name=key,
                                              type=val['type'],
                                              authorizer_uri=uri,
                                              identity_source=val.get(
                                                  'identity_source'),
                                              ttl=val.get('ttl'))

            self.lambda_res.add_invocation_permission(
                statement_id=api_id,
                name=lambda_arn,
                principal='apigateway.amazonaws.com')
        if api_resources:
            api_resp = meta.get('api_method_responses')
            api_integration_resp = meta.get('api_method_integration_responses')
            args = self.__prepare_api_resources_args(api_id, api_resources,
                                                     api_resp,
                                                     api_integration_resp)
            self.create_pool(self._create_resource_from_metadata, args, 1)
        else:
            _LOG.info('There is no resources in %s API Gateway description.',
                      name)
        # add headers
        # waiter b4 customization
        time.sleep(10)
        _LOG.debug('Customizing API Gateway responses...')
        # _customize_gateway_responses call is commented due to botocore
        # InternalFailure while performing the call. will be fixed later
        # _customize_gateway_responses(api_id)
        # deploy api
        self.__deploy_api_gateway(api_id, meta, api_resources)
        return self.describe_api_resources(api_id=api_id, meta=meta, name=name)
Ejemplo n.º 6
0
def _create_dynamodb_table_from_meta(name, meta):
    """ Create Dynamo DB table from meta description after parameter
    validation.

    :type name: str
    :type meta: dict
    """
    required_parameters = [
        'hash_key_name', 'hash_key_type', 'read_capacity', 'write_capacity'
    ]
    validate_params(name, meta, required_parameters)

    res = _DYNAMO_DB_CONN.describe_table(name)
    autoscaling_config = meta.get('autoscaling')
    if res:
        _LOG.warn('%s table exists.', name)
        if autoscaling_config:
            res['Autoscaling'] = _describe_autoscaling(autoscaling_config,
                                                       name)
        return describe_table(name, meta, res)

    _DYNAMO_DB_CONN.create_table(name,
                                 meta['hash_key_name'],
                                 meta['hash_key_type'],
                                 meta.get('sort_key_name'),
                                 meta.get('sort_key_type'),
                                 meta['read_capacity'],
                                 meta['write_capacity'],
                                 global_indexes=meta.get('global_indexes'),
                                 local_indexes=meta.get('local_indexes'),
                                 wait=False)
    response = _DYNAMO_DB_CONN.describe_table(name)
    if not response:
        raise AssertionError(
            'Table with name {0} has not been created!'.format(name))
    # enabling stream if present
    stream_view_type = meta.get('stream_view_type')
    if stream_view_type:
        stream = _DYNAMO_DB_CONN.get_table_stream_arn(name)
        if stream:
            _LOG.warn('Stream %s exists.', name)
        else:
            try:
                _DYNAMO_DB_CONN.enable_table_stream(name, stream_view_type)
            except ClientError as e:
                # handle specific case for fantom stream enabling
                if 'ResourceInUseException' in str(e):
                    _LOG.warn(
                        'Stream enabling currently in progress,'
                        ' table: %s', name)
                else:
                    raise e
    if autoscaling_config:
        _LOG.debug('Found autoscaling configuration for resource %s', name)
        sc_res = _enable_autoscaling(autoscaling_config, name)
        response['Autoscaling'] = sc_res
    _LOG.info('Created table %s.', name)
    return describe_table(name, meta, response)
Ejemplo n.º 7
0
def _create_sns_topic_trigger_from_meta(lambda_name, lambda_arn, role_name,
                                        trigger_meta):
    required_params = ['target_topic']
    validate_params(lambda_name, trigger_meta, required_params)
    topic_name = trigger_meta['target_topic']

    region = trigger_meta.get('region')
    create_sns_subscription_for_lambda(lambda_arn, topic_name, region)
    _LOG.info('Lambda %s subscribed to sns topic %s', lambda_name,
              trigger_meta['target_topic'])
Ejemplo n.º 8
0
def _create_cloud_watch_trigger_from_meta(topic_name, trigger_meta, region):
    required_parameters = ['target_rule']
    validate_params(topic_name, trigger_meta, required_parameters)
    rule_name = trigger_meta['target_rule']

    topic_arn = CONN.sns(region).get_topic_arn(topic_name)
    CONN.cw_events(region).add_rule_target(rule_name, topic_arn)
    CONN.sns(region).allow_service_invoke(topic_arn, 'events.amazonaws.com')
    _LOG.info('SNS topic %s subscribed to cloudwatch rule %s', topic_name,
              rule_name)
Ejemplo n.º 9
0
def _update_lambda(name, meta):
    _LOG.info('Updating lambda: {0}'.format(name))
    req_params = ['runtime', 'memory', 'timeout', 'func_name']

    validate_params(name, meta, req_params)

    key = meta[S3_PATH_NAME]
    if not _S3_CONN.is_file_exists(CONFIG.deploy_target_bucket, key):
        raise AssertionError(
            'Deployment package {0} does not exist '
            'in {1} bucket'.format(key, CONFIG.deploy_target_bucket))

    response = _LAMBDA_CONN.get_function(name)
    if not response:
        raise AssertionError('{0} lambda does not exist.'.format(name))

    publish_version = meta.get('publish_version', False)

    _LAMBDA_CONN.update_code_source(
        lambda_name=name,
        s3_bucket=CONFIG.deploy_target_bucket,
        s3_key=key,
        publish_version=publish_version)

    # AWS sometimes returns None after function creation, needs for stability
    time.sleep(10)
    response = _LAMBDA_CONN.get_function(name)
    _LOG.debug('Lambda describe result: {0}'.format(response))
    code_sha_256 = response['Configuration']['CodeSha256']
    publish_ver_response = _LAMBDA_CONN.publish_version(
        function_name=name,
        code_sha_256=code_sha_256)
    updated_version = publish_ver_response['Version']
    _LOG.info(
        'Version {0} for lambda {1} published'.format(updated_version, name))

    alias_name = meta.get('alias')
    if alias_name:
        alias = _LAMBDA_CONN.get_alias(function_name=name, name=alias_name)
        if not alias:
            _LAMBDA_CONN.create_alias(
                function_name=name,
                name=alias_name,
                version=updated_version)
            _LOG.info(
                'Alias {0} has been created for lambda {1}'.format(alias_name,
                                                                   name))
        else:
            _LAMBDA_CONN.update_alias(
                function_name=name,
                alias_name=alias_name,
                function_version=updated_version)
            _LOG.info(
                'Alias {0} has been updated for lambda {1}'.format(alias_name,
                                                                   name))
Ejemplo n.º 10
0
def _create_kinesis_stream_trigger_from_meta(lambda_name, lambda_arn,
                                             role_name, trigger_meta):
    required_parameters = ['target_stream', 'batch_size', 'starting_position']
    validate_params(lambda_name, trigger_meta, required_parameters)

    stream_name = trigger_meta['target_stream']

    stream = CONN.kinesis().get_stream(stream_name)
    stream_arn = stream['StreamDescription']['StreamARN']
    stream_status = stream['StreamDescription']['StreamStatus']
    # additional waiting for stream
    if stream_status != 'ACTIVE':
        _LOG.debug('Kinesis stream %s is not in active state,'
                   ' waiting for activation...', stream_name)
        time.sleep(120)

    # TODO policy should be moved to meta
    policy_name = '{0}KinesisTo{1}Lambda'.format(stream_name, lambda_name)
    policy_document = {
        "Statement": [
            {
                "Effect": "Allow",
                "Action": [
                    "lambda:InvokeFunction"
                ],
                "Resource": [
                    lambda_arn
                ]
            },
            {
                "Action": [
                    "kinesis:DescribeStreams",
                    "kinesis:DescribeStream",
                    "kinesis:ListStreams",
                    "kinesis:GetShardIterator",
                    "Kinesis:GetRecords"
                ],
                "Effect": "Allow",
                "Resource": stream_arn
            }
        ],
        "Version": "2012-10-17"
    }
    CONN.iam().attach_inline_policy(role_name=role_name,
                                    policy_name=policy_name,
                                    policy_document=policy_document)
    _LOG.debug('Inline policy %s is attached to role %s',
               policy_name, role_name)
    _LOG.debug('Waiting for activation policy %s...', policy_name)
    time.sleep(10)

    _add_kinesis_event_source(lambda_arn, stream_arn, trigger_meta)
    _LOG.info('Lambda %s subscribed to kinesis stream %s', lambda_name,
              stream_name)
Ejemplo n.º 11
0
    def create_lambda_layer_from_meta(self, name, meta, context=None):
        """
        :param name:
        :param meta:
        :param context: because of usage in 'update' flow
        :return:
        """
        req_params = ['runtimes', 'deployment_package']

        validate_params(name, meta, req_params)

        key = meta[S3_PATH_NAME]
        file_name = key.split('/')[-1]
        self.s3_conn.download_file(self.deploy_target_bucket, key, file_name)
        with open(file_name, 'rb') as file_data:
            file_body = file_data.read()
        import hashlib
        hash_object = hashlib.sha256()
        hash_object.update(file_body)
        existing_version = self._is_equal_lambda_layer(hash_object.digest(),
                                                       name)
        if existing_version:
            existing_layer_arn = existing_version['LayerVersionArn']
            _LOG.info('Layer {} with same content already '
                      'exists in layer version {}.'.format(
                          name, existing_layer_arn))
            return {
                existing_layer_arn:
                build_description_obj(response=existing_version,
                                      name=name,
                                      meta=meta)
            }

        _LOG.debug('Creating lambda layer %s', name)

        args = {
            'layer_name': name,
            'runtimes': meta['runtimes'],
            's3_bucket': self.deploy_target_bucket,
            's3_key': meta[S3_PATH_NAME]
        }
        if meta.get('description'):
            args['description'] = meta['description']
        if meta.get('license'):
            args['layer_license'] = meta['license']
        response = self.lambda_conn.create_layer(**args)

        _LOG.info(
            'Lambda Layer {0} version {1} was successfully created'.format(
                name, response['Version']))
        layer_arn = response['LayerArn'] + ':' + str(response['Version'])
        del response['LayerArn']
        return {layer_arn: build_description_obj(response, name, meta)}
Ejemplo n.º 12
0
def _create_cloud_watch_trigger_from_meta(lambda_name, lambda_arn, role_name,
                                          trigger_meta):
    required_parameters = ['target_rule']
    validate_params(lambda_name, trigger_meta, required_parameters)
    rule_name = trigger_meta['target_rule']

    rule_arn = CONN.cw_events().get_rule_arn(rule_name)
    CONN.cw_events().add_rule_target(rule_name, lambda_arn)
    _LAMBDA_CONN.add_invocation_permission(lambda_arn, 'events.amazonaws.com',
                                           rule_arn)
    _LOG.info('Lambda %s subscribed to cloudwatch rule %s', lambda_name,
              rule_name)
def _build_python_artifact(item, project_base_folder, project_path, root,
                           target_folder):
    _LOG.debug('Building artifact in {0}'.format(target_folder))
    lambda_config_dict = json.load(open(build_path(root, item)))
    req_params = ['lambda_path', 'name', 'version']
    validate_params(root, lambda_config_dict, req_params)
    lambda_path = path_resolver(lambda_config_dict['lambda_path'])
    lambda_name = lambda_config_dict['name']
    lambda_version = lambda_config_dict['version']
    artifact_name = lambda_name + '-' + lambda_version
    # create folder to store artifacts
    artifact_path = build_path(target_folder, artifact_name)
    _LOG.debug('Artifacts path: {0}'.format(artifact_path))
    os.makedirs(artifact_path)
    _LOG.debug('Folders are created')
    # install requirements.txt content
    # getting file content
    req_path = build_path(root, REQ_FILE_NAME)
    if os.path.exists(req_path):
        _LOG.debug('Going to install 3-rd party dependencies')
        with open(req_path) as f:
            req_list = f.readlines()
        req_list = [path_resolver(r.strip()) for r in req_list]
        _LOG.debug(str(req_list))
        # install dependencies
        for lib in req_list:
            command = 'pip install {0} -t {1}'.format(lib, artifact_path)
            execute_command(command=command)
        _LOG.debug('3-rd party dependencies were installed successfully')

    # install local requirements
    local_req_path = build_path(root, LOCAL_REQ_FILE_NAME)
    if os.path.exists(local_req_path):
        _LOG.debug('Going to install local dependencies')
        _install_local_req(artifact_path, local_req_path, project_base_folder,
                           project_path)
        _LOG.debug('Local dependencies were installed successfully')

    src_path = build_path(CONFIG.project_path, project_path, lambda_path)
    _copy_py_files(src_path, artifact_path)
    package_name = build_py_package_name(lambda_name, lambda_version)
    _zip_dir(artifact_path, build_path(target_folder, package_name))
    # remove unused folder
    lock = threading.RLock()
    lock.acquire()
    try:
        shutil.rmtree(artifact_path)
    finally:
        lock.release()
    _LOG.info('Package {0} was created successfully'.format(package_name))
Ejemplo n.º 14
0
def _build_node_artifact(item, root, target_folder):
    _check_npm_is_installed()
    _LOG.debug('Building artifact in {0}'.format(target_folder))
    lambda_config_dict = json.load(open(build_path(root, item)))
    _LOG.debug('Root path: {}'.format(root))
    req_params = ['lambda_path', 'name', 'version']
    validate_params(root, lambda_config_dict, req_params)
    lambda_name = lambda_config_dict['name']
    lambda_version = lambda_config_dict['version']
    artifact_name = lambda_name + '-' + lambda_version
    # create folder to store artifacts
    artifact_path = build_path(target_folder, artifact_name)
    _LOG.debug('Artifacts path: {0}'.format(artifact_path))
    if not os.path.exists(artifact_path):
        os.makedirs(artifact_path)
    _LOG.debug('Folders are created')
    # getting file content
    req_path = build_path(root, NODE_REQ_FILE_NAME)
    try:
        if os.path.exists(req_path):
            command = 'npm install --prefix {0}'.format(root)
            execute_command(command=command)
            _LOG.debug('3-rd party dependencies were installed successfully')

        package_name = build_py_package_name(lambda_name, lambda_version)
        zip_dir(root, build_path(target_folder, package_name))
        lock = threading.RLock()
        lock.acquire()
        try:
            # remove unused folder/files
            node_modules_path = os.path.join(root, 'node_modules')
            if os.path.exists(node_modules_path):
                shutil.rmtree(node_modules_path)
            # todo Investigate deleting package_lock file
            # shutil.rmtree(os.path.join(root, 'package_lock.json'))
            shutil.rmtree(artifact_path)
        except FileNotFoundError as e:
            _LOG.exception('Error occurred while temp files removing.')
        finally:
            lock.release()
        return 'Lambda package {0} was created successfully'.format(
            package_name)
    except Exception:
        _LOG.exception(
            'Error occurred during the \'{0}\' lambda deployment package '
            'assembling'.format(lambda_name))
        return 'Error occurred during the \'{0}\' lambda deployment package ' \
               'assembling'.format(lambda_name)
Ejemplo n.º 15
0
    def _create_cloud_watch_trigger_from_meta(self, name, trigger_meta):
        required_parameters = ['target_rule', 'input', 'iam_role']
        validate_params(name, trigger_meta, required_parameters)
        rule_name = trigger_meta['target_rule']
        input = trigger_meta['input']
        sf_role = trigger_meta['iam_role']

        sf_arn = self._build_sm_arn(name, self.region)
        sf_description = self.sf_conn.describe_state_machine(arn=sf_arn)
        if sf_description.get('status') == 'ACTIVE':
            sf_role_arn = self.iam_conn.check_if_role_exists(sf_role)
            if sf_role_arn:
                self.cw_events_conn.add_rule_sf_target(rule_name, sf_arn,
                                                       input, sf_role_arn)
                _LOG.info('State machine %s subscribed to cloudwatch rule %s',
                          name, rule_name)
Ejemplo n.º 16
0
def _create_dynamodb_trigger_from_meta(lambda_name, lambda_arn, role_name,
                                       trigger_meta):
    required_parameters = ['target_table', 'batch_size']
    validate_params(lambda_name, trigger_meta, required_parameters)
    table_name = trigger_meta['target_table']

    if not CONN.dynamodb().is_stream_enabled(table_name):
        CONN.dynamodb().enable_table_stream(table_name)

    stream = CONN.dynamodb().get_table_stream_arn(table_name)
    # TODO support another sub type
    _LAMBDA_CONN.add_event_source(lambda_arn, stream,
                                  trigger_meta['batch_size'],
                                  start_position='LATEST')
    # start_position='LATEST' - in case we did not remove tables before
    _LOG.info('Lambda %s subscribed to dynamodb table %s', lambda_name,
              table_name)
Ejemplo n.º 17
0
    def _create_sqs_trigger_from_meta(self, lambda_name, lambda_arn, role_name,
                                      trigger_meta):
        required_parameters = ['target_queue', 'batch_size']
        validate_params(lambda_name, trigger_meta, required_parameters)
        target_queue = trigger_meta['target_queue']

        if not self.sqs_conn.get_queue_url(target_queue, self.account_id):
            _LOG.debug('Queue %s does not exist', target_queue)
            return

        queue_arn = 'arn:aws:sqs:{0}:{1}:{2}'.format(self.region,
                                                     self.account_id,
                                                     target_queue)

        self.lambda_conn.add_event_source(lambda_arn, queue_arn,
                                          trigger_meta['batch_size'])
        _LOG.info('Lambda %s subscribed to SQS queue %s', lambda_name,
                  target_queue)
def _create_s3_trigger_from_meta(lambda_name, lambda_arn, role_name,
                                 trigger_meta):
    required_parameters = ['target_bucket', 's3_events']
    validate_params(lambda_name, trigger_meta, required_parameters)
    target_bucket = trigger_meta['target_bucket']

    if not _S3_CONN.is_bucket_exists(target_bucket):
        _LOG.error(
            'S3 bucket {0} event source for lambda {1} was not created.'.
            format(target_bucket, lambda_name))
        return
    _LAMBDA_CONN.add_invocation_permission(
        lambda_arn, 's3.amazonaws.com',
        'arn:aws:s3:::{0}'.format(target_bucket))
    _S3_CONN.configure_event_source_for_lambda(target_bucket, lambda_arn,
                                               trigger_meta['s3_events'])
    _LOG.info('Lambda %s subscribed to S3 bucket %s', lambda_name,
              target_bucket)
    def _create_alarm_from_meta(self, name, meta):
        """ Create alarm resource in AWS Cloud via meta description.

        :type name: str
        :type meta: dict
        """
        required_parameters = [
            'metric_name', 'namespace', 'period', 'threshold',
            'evaluation_periods', 'comparison_operator', 'statistic'
        ]
        validate_params(name, meta, required_parameters)

        if self.client.is_alarm_exists(name):
            _LOG.warn('%s alarm exists.', name)
            return self.describe_alarm(name, meta)

        params = dict(alarm_name=name,
                      metric_name=meta['metric_name'],
                      namespace=meta['namespace'],
                      period=meta['period'],
                      evaluation_periods=meta['evaluation_periods'],
                      threshold=meta['threshold'],
                      statistic=meta['statistic'],
                      comparison_operator=meta['comparison_operator'])

        sns_topics = meta.get('sns_topics')
        sns_topic_arns = []
        if sns_topics:
            for each in sns_topics:
                arn = self.sns_conn.get_topic_arn(each)
                sns_topic_arns.append(arn)
            if sns_topic_arns:
                params['alarm_actions'] = sns_topic_arns

        self.client.put_metric_alarm(**params)
        _LOG.info('Created alarm {0}.'.format(name))
        return self.describe_alarm(name, meta)
Ejemplo n.º 20
0
    def _create_lambda_from_meta(self, name, meta):
        _LOG.debug('Creating lambda %s', name)
        req_params = [
            'iam_role_name', 'runtime', 'memory', 'timeout', 'func_name'
        ]
        # Lambda configuration
        validate_params(name, meta, req_params)

        key = meta[S3_PATH_NAME]
        if not self.s3_conn.is_file_exists(self.deploy_target_bucket, key):
            raise AssertionError(
                'Error while creating lambda: %s;'
                'Deployment package %s does not exist '
                'in %s bucket', name, key, self.deploy_target_bucket)

        lambda_def = self.lambda_conn.get_function(name)
        if lambda_def:
            _LOG.warn('%s lambda exists.', name)
            return self.describe_lambda(name, meta, lambda_def)

        role_name = meta['iam_role_name']
        role_arn = self.iam_conn.check_if_role_exists(role_name)
        if not role_arn:
            raise AssertionError('Role {} does not exist; '
                                 'Lambda {} failed to be configured.'.format(
                                     role_name, name))

        dl_type = meta.get('dl_resource_type')
        if dl_type:
            dl_type = dl_type.lower()
        dl_name = meta.get('dl_resource_name')

        dl_target_arn = 'arn:aws:{0}:{1}:{2}:{3}'.format(
            dl_type, self.region, self.account_id,
            dl_name) if dl_type and dl_name else None

        publish_version = meta.get('publish_version', False)
        lambda_layers_arns = []
        layer_meta = meta.get('layers')
        if layer_meta:
            for layer_name in layer_meta:
                layer_arn = self.lambda_conn.get_lambda_layer_arn(layer_name)
                if not layer_arn:
                    raise AssertionError(
                        'Could not link lambda layer {} to lambda {} '
                        'due to layer absence!'.format(layer_name, name))
                lambda_layers_arns.append(layer_arn)

        self.lambda_conn.create_lambda(
            lambda_name=name,
            func_name=meta['func_name'],
            role=role_arn,
            runtime=meta['runtime'].lower(),
            memory=meta['memory'],
            timeout=meta['timeout'],
            s3_bucket=self.deploy_target_bucket,
            s3_key=key,
            env_vars=meta.get('env_variables'),
            vpc_sub_nets=meta.get('subnet_ids'),
            vpc_security_group=meta.get('security_group_ids'),
            dl_target_arn=dl_target_arn,
            tracing_mode=meta.get('tracing_mode'),
            publish_version=publish_version,
            layers=lambda_layers_arns)
        _LOG.debug('Lambda created %s', name)
        # AWS sometimes returns None after function creation, needs for stability
        time.sleep(10)

        log_group_name = name
        retention = meta.get('logs_expiration')
        if retention:
            self.cw_logs_conn.create_log_group_with_retention_days(
                group_name=log_group_name, retention_in_days=retention)

        lambda_def = self.__describe_lambda_by_version(
            name) if publish_version else self.lambda_conn.get_function(name)
        version = lambda_def['Configuration']['Version']
        self._setup_function_concurrency(name=name, meta=meta)

        # enabling aliases
        # aliases can be enabled only and for $LATEST
        alias = meta.get('alias')
        if alias:
            _LOG.debug('Creating alias')
            _LOG.debug(
                self.lambda_conn.create_alias(function_name=name,
                                              name=alias,
                                              version=version))

        arn = self.build_lambda_arn_with_alias(lambda_def, alias) \
            if publish_version or alias else \
            lambda_def['Configuration']['FunctionArn']
        _LOG.debug('arn value: ' + str(arn))

        if meta.get('event_sources'):
            for trigger_meta in meta.get('event_sources'):
                trigger_type = trigger_meta['resource_type']
                func = self.CREATE_TRIGGER[trigger_type]
                func(self, name, arn, role_name, trigger_meta)
        # concurrency configuration
        self._manage_provisioned_concurrency_configuration(
            function_name=name, meta=meta, lambda_def=lambda_def)
        return self.describe_lambda(name, meta, lambda_def)
Ejemplo n.º 21
0
def _create_lambda_from_meta(name, meta):
    req_params = ['iam_role_name', 'runtime', 'memory', 'timeout', 'func_name']

    # Lambda configuration
    validate_params(name, meta, req_params)

    key = meta[S3_PATH_NAME]
    if not _S3_CONN.is_file_exists(CONFIG.deploy_target_bucket, key):
        raise AssertionError('Deployment package %s does not exist '
                             'in %s bucket', key, CONFIG.deploy_target_bucket)

    response = _LAMBDA_CONN.get_function(name)
    if response:
        _LOG.warn('%s lambda exists.', name)
        return describe_lambda(name, meta, response)

    role_name = meta['iam_role_name']
    role_arn = CONN.iam().check_if_role_exists(role_name)
    if not role_arn:
        raise AssertionError('Role {0} does not exist.'.format(role_name))

    dl_type = meta.get('dl_resource_type')
    if dl_type:
        dl_type = dl_type.lower()
    dl_name = meta.get('dl_resource_name')

    dl_target_arn = 'arn:aws:{0}:{1}:{2}:{3}'.format(dl_type,
                                                     CONFIG.region,
                                                     CONFIG.account_id,
                                                     dl_name) if dl_type and dl_name else None

    publish_version = meta.get('publish_version', False)

    _LAMBDA_CONN.create_lambda(
        lambda_name=name,
        func_name=meta['func_name'],
        role=role_arn,
        runtime=meta['runtime'].lower(),
        memory=meta['memory'],
        timeout=meta['timeout'],
        s3_bucket=CONFIG.deploy_target_bucket,
        s3_key=key,
        env_vars=meta.get('env_variables'),
        vpc_sub_nets=meta.get('subnet_ids'),
        vpc_security_group=meta.get('security_group_ids'),
        dl_target_arn=dl_target_arn,
        tracing_mode=meta.get('tracing_mode'),
        publish_version=publish_version
    )

    # AWS sometimes returns None after function creation, needs for stability
    time.sleep(10)
    response = __describe_lambda_by_version(
        name) if publish_version else _LAMBDA_CONN.get_function(name)
    version = response['Configuration']['Version']
    con_exec = meta.get('concurrent_executions')
    if con_exec:
        _LOG.debug('Going to set up concurrency executions')
        unresolved_exec = _LAMBDA_CONN.get_unresolved_concurrent_executions()
        if con_exec <= unresolved_exec:
            _LAMBDA_CONN.put_function_concurrency(
                function_name=name,
                concurrent_executions=con_exec)
            _LOG.debug('Concurrency is enabled for %s lambda', name)
        else:
            _LOG.warn(
                'Account does not have any unresolved executions.'
                ' Current size - %s', unresolved_exec)

    # enabling aliases
    # aliases can be enabled only and for $LATEST
    alias = meta.get('alias')
    if alias:
        _LOG.debug('Creating alias')
        _LOG.debug(_LAMBDA_CONN.create_alias(function_name=name,
                                             name=alias, version=version))

    arn = build_lambda_arn_with_alias(response,
                                      alias) if publish_version or alias else \
        response['Configuration']['FunctionArn']
    _LOG.debug('arn value: ' + str(arn))

    if meta.get('event_sources'):
        for trigger_meta in meta.get('event_sources'):
            trigger_type = trigger_meta['resource_type']
            func = CREATE_TRIGGER[trigger_type]
            func(name, arn, role_name, trigger_meta)
    _LOG.info('Created lambda %s.', name)
    return describe_lambda(name, meta, response)
Ejemplo n.º 22
0
    def _update_lambda(self, name, meta, context):
        _LOG.info('Updating lambda: {0}'.format(name))
        req_params = ['runtime', 'memory', 'timeout', 'func_name']

        validate_params(name, meta, req_params)

        key = meta[S3_PATH_NAME]
        if not self.s3_conn.is_file_exists(self.deploy_target_bucket, key):
            raise AssertionError('Deployment package {0} does not exist '
                                 'in {1} bucket'.format(
                                     key, self.deploy_target_bucket))

        response = self.lambda_conn.get_function(name)
        if not response:
            raise AssertionError('{0} lambda does not exist.'.format(name))

        publish_version = meta.get('publish_version', False)

        self.lambda_conn.update_code_source(
            lambda_name=name,
            s3_bucket=self.deploy_target_bucket,
            s3_key=key,
            publish_version=publish_version)

        # update lambda layers version
        if meta.get('layers'):
            layers = meta.get('layers')
            updated_layers_arns = [
                layer_arn for layer_arn, body in context.items()
                if body.get('resource_name') in layers
            ]
            self.lambda_conn.update_lambda_configuration(
                lambda_name=name, layers=updated_layers_arns)

        # AWS sometimes returns None after function creation, needs for stability
        time.sleep(10)
        response = self.lambda_conn.get_function(name)
        _LOG.debug('Lambda describe result: {0}'.format(response))
        code_sha_256 = response['Configuration']['CodeSha256']
        publish_ver_response = self.lambda_conn.publish_version(
            function_name=name, code_sha_256=code_sha_256)
        updated_version = publish_ver_response['Version']
        _LOG.info('Version {0} for lambda {1} published'.format(
            updated_version, name))

        alias_name = meta.get('alias')
        if alias_name:
            alias = self.lambda_conn.get_alias(function_name=name,
                                               name=alias_name)
            if not alias:
                self.lambda_conn.create_alias(function_name=name,
                                              name=alias_name,
                                              version=updated_version)
                _LOG.info('Alias {0} has been created for lambda {1}'.format(
                    alias_name, name))
            else:
                self.lambda_conn.update_alias(function_name=name,
                                              alias_name=alias_name,
                                              function_version=updated_version)
                _LOG.info('Alias {0} has been updated for lambda {1}'.format(
                    alias_name, name))
        req_max_concurrency = meta.get(LAMBDA_MAX_CONCURRENCY)
        existing_max_concurrency = self.lambda_conn.describe_function_concurrency(
            name=name)
        if req_max_concurrency and existing_max_concurrency:
            if existing_max_concurrency != req_max_concurrency:
                self._set_function_concurrency(name=name, meta=meta)
        elif not req_max_concurrency and existing_max_concurrency:
            self.lambda_conn.delete_function_concurrency_config(name=name)
        elif req_max_concurrency and not existing_max_concurrency:
            self._set_function_concurrency(name=name, meta=meta)

        self._manage_provisioned_concurrency_configuration(function_name=name,
                                                           meta=meta,
                                                           lambda_def=context)