def register_resources_template(self, template): """Register one ``EventSourceMapping`` into the resources template. Note: We preprend a 30s Sleep before the creation of this resource because the IAM role of the lambda is not propagated fast enough uppon creation, and CloudFormation checks if the referenced lambda has permission to consume this stream on creation time. Because the ``Lambda`` and the ``EventSourceMapping`` are created in the same stack we need to introduce this as palliative measure, sorry! """ sleep_lambda = 'lambda:contrib_helpers:sleep:current' sleep = Sleep.create_with( utils.valid_cloudformation_name(self.name, "Sleep"), DependsOn=[self.project.reference(sleep_lambda)], lambda_arn=troposphere.Ref(self.project.reference(sleep_lambda)), Time=30) template.add_resource(sleep) template.add_resource( awslambda.EventSourceMapping( self.in_project_cf_name, DependsOn=[sleep.name, self.get_function_name()], BatchSize=self.get_batch_size(), Enabled=self.get_enabled(), EventSourceArn=self.settings.get('stream'), FunctionName=troposphere.Ref(self.get_function_name()), StartingPosition=self.get_starting_position()))
def __init__(self, key): # Resources for n, v in getattr(cfg, key).items(): resname = f'{key}{n}' # resources r_EventSourceMapping = lbd.EventSourceMapping(resname) auto_get_props(r_EventSourceMapping, v, recurse=True) add_obj([ r_EventSourceMapping, ])
), Runtime='python3.6', Timeout=cfg['lambda_timeout'], MemorySize=cfg['lambda_memory_size'], Environment=awslambda.Environment( 'LambdaVars', Variables={ 'DELIVERY_STREAM': cfg['kinesis_delivery_stream_name'], 'ADD_NEWLINE': 'True' }))) add_kinesis_trigger_for_lambda = template.add_resource( awslambda.EventSourceMapping( 'KinesisLambdaTrigger', BatchSize=cfg['lambda_batch_size'], Enabled=cfg['lambda_enabled'], FunctionName=Ref(lambda_stream_to_firehose), StartingPosition=cfg['lambda_starting_position'], EventSourceArn=GetAtt(kinesis_stream, 'Arn'))) template_json = template.to_json(indent=4) print(template_json) stack_args = { 'StackName': STACK_NAME, 'TemplateBody': template_json, 'Capabilities': [ 'CAPABILITY_IAM', ], 'Tags': [{ 'Key': 'Purpose',
def build_template(self): deploy_alias = False t = self._init_template() self.jinja_env_vars() role_param = t.add_parameter( Parameter(self.role.output_role_arn(), Type='String')) bucket_ref = t.add_parameter( Parameter(self.s3_bucket.output_bucket_name(), Type='String')) if self._deploying: if not self.uploaded and self._bucket_name(): deploy_alias = True self._determine_code_versions() logger.info("S3 Key: {}".format(self.zip_name)) func = t.add_resource( awslambda.Function( '{}Function'.format(self.get_stack_name()), FunctionName=self.get_stack_name(), Handler=self.handler, MemorySize=self.memory, Timeout=self.timeout, Runtime=self.runtime, Role=Ref(role_param), Environment=awslambda.Environment(Variables=self.vars), Code=awslambda.Code(S3Bucket=Ref(bucket_ref), S3Key=self.zip_name))) if self.s3_version: func.Code.S3ObjectVersion = self.s3_version # vpc mode if self.vpc_stack is not None: if self.public_subnet: subnets = self.vpc_stack.output_public_subnets() else: subnets = self.vpc_stack.output_private_subnets() subnet_refs = [ Ref(utils.ensure_param(t, val, 'String')) for val in subnets ] func.VpcConfig = awslambda.VPCConfig(SubnetIds=subnet_refs, SecurityGroupIds=[]) for sg in self.security_groups: sg_ref = Ref( utils.ensure_param(t, sg.output_security_group(), 'String')) func.VpcConfig.SecurityGroupIds.append(sg_ref) if deploy_alias is True: for v in self.aliases: t.add_resource( awslambda.Alias('{}Alias'.format(v['name']), FunctionName=Ref(func), Name=v['name'], FunctionVersion=v['version'])) if len(self.event_sources) > 0: for s in self.event_sources: src = s['src'] args = s['args'] if isinstance(src, dynamodb.DynamoTable): p = t.add_parameter( Parameter(src.output_stream(), Type='String')) t.add_resource( awslambda.EventSourceMapping( 'LambdaDynamo{}'.format(src.name), FunctionName=Ref(func), EventSourceArn=Ref(p), StartingPosition='LATEST')) if isinstance(src, sqs.Queue): p = t.add_parameter( Parameter(src.output_queue_arn(), Type='String')) t.add_resource( awslambda.EventSourceMapping( 'LambdaSQS{}'.format(src.name), FunctionName=Ref(func), EventSourceArn=Ref(p), BatchSize=args.get('BatchSize', 1))) for k, v in enumerate(self.perms): v.build(t, funv, k) t.add_output([ Output('FunctionName', Value=Ref(func)), Output('FunctionArn', Value=GetAtt(func, "Arn")) ]) return t
Timeout=cfg['deliver_to_firehose_lambda']['timeout'], MemorySize=cfg['deliver_to_firehose_lambda']['memory_size'], Environment=awslambda.Environment('LambdaVars', Variables={ 'DELIVERY_STREAM': cfg['firehose_delivery_stream'], 'CROSS_ACCOUNT_ROLE_ARN': cfg['cross_account_role'].format( os.environ['CROSS_ACCOUNT']) }))) kinesis_trigger_lambda = template.add_resource( awslambda.EventSourceMapping( 'KinesisLambdaTrigger', BatchSize=cfg['deliver_to_firehose_lambda']['batch_size'], Enabled=cfg['deliver_to_firehose_lambda']['enabled'], FunctionName=Ref(deliver_to_firehose_lambda), StartingPosition=cfg['deliver_to_firehose_lambda'] ['kinesis_shard_iterator'], EventSourceArn=GetAtt(kinesis_stream, 'Arn'))) template.add_output([ Output('KinesisStream', Description='Kinesis stream', Value=Ref(kinesis_stream)) ]) template.add_output([ Output('KinesisStreamArn', Description='Kinesis stream Arn', Value=GetAtt(kinesis_stream, 'Arn')) ])