def define_redrive_policy(target_queue, retries=None, mono_template=True): if target_queue.cfn_resource: policy = { "RedrivePolicy": RedrivePolicy( deadLetterTargetArn=GetAtt(target_queue.cfn_resource, "Arn") if mono_template else Ref(DLQ_ARN), maxReceiveCount=retries, ) } else: policy = { "RedrivePolicy": RedrivePolicy( deadLetterTargetArn=target_queue.attributes_outputs[SQS_ARN] ["ImportValue"], maxReceiveCount=retries, ) } return policy
def define_redrive_policy(target_queue, retries=None, mono_template=True): policy = { "RedrivePolicy": RedrivePolicy( deadLetterTargetArn=GetAtt(target_queue, "Arn") if mono_template else Ref(DLQ_ARN), maxReceiveCount=retries, ) } return policy
from troposphere import GetAtt, Output, Ref, Template from troposphere.sqs import Queue, RedrivePolicy t = Template() t.set_description( "AWS CloudFormation Sample Template SQS: Sample template showing how to " "create an SQS queue with a dead letter queue. **WARNING** This template " "creates Amazon SQS Queues. You will be billed for the AWS resources used " "if you create a stack from this template.") mysourcequeue = t.add_resource( Queue( "MySourceQueue", RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt("MyDeadLetterQueue", "Arn"), maxReceiveCount="5", ), )) mydeadletterqueue = t.add_resource(Queue("MyDeadLetterQueue")) t.add_output([ Output( "SourceQueueURL", Description="URL of the source queue", Value=Ref(mysourcequeue), ), Output( "SourceQueueARN", Description="ARN of the source queue", Value=GetAtt(mysourcequeue, "Arn"),
Action="lambda:InvokeFunction", SourceArn=GetAtt("JsonNotificationReceiveQueue", "Arn"), Principal="sqs.amazonaws.com")) t.add_resource( Alias("GalileoBabelLambdaAlias", Description="Alias for the galileo babel lambda", FunctionName=Ref("LambdaFunction"), FunctionVersion="$LATEST", Name=Ref("LambdaEnv"))) t.add_resource( Queue("JsonNotificationReceiveQueue", QueueName=Sub("${LambdaEnv}-json-notification-inbound-queue"), RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt( "JsonNotificationDLQ", "Arn"), maxReceiveCount=3))) t.add_resource( Queue( "JsonNotificationDLQ", QueueName=Sub("${LambdaEnv}-json-notification-inbound-dlq"), )) t.add_resource( QueuePolicy( "JsonNotificationReceiveQueuePolicy", Queues=[Ref("JsonNotificationReceiveQueue")], PolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[SendMessage],
def generate_template(self, template): alias = Parameter("LambdaEnvAlias", Default="int", Description="Alias used to reference the lambda", Type="String") template.add_parameter(aliasP) lambda_bucket = template.add_parameter( Parameter("LambdaBucket", Type="String", Default="go-lambda-hello-world", Description= "The S3 Bucket that contains the zip to bootstrap your " "lambda function")) s3_key = template.add_parameter( Parameter("S3Key", Type="String", Default="main.zip", Description= "The S3 key that references the zip to bootstrap your " "lambda function")) handler = template.add_parameter( Parameter( "LambdaHandler", Type="String", Default="event_handler.handler", Description="The name of the function (within your source code) " "that Lambda calls to start running your code.")) memory_size = template.add_parameter( Parameter( "LambdaMemorySize", Type="Number", Default="128", Description="The amount of memory, in MB, that is allocated to " "your Lambda function.", MinValue="128")) timeout = template.add_parameter( Parameter("LambdaTimeout", Type="Number", Default="300", Description= "The function execution time (in seconds) after which " "Lambda terminates the function. ")) lambda_function = template.add_resource( Function( "LambdaGoHelloWorld", Code=Code(S3Bucket="go-lambda-hello-world", S3Key=Ref(s3_key)), Description="Go function used to demonstate sqs integration", Handler=Ref(handler), Role=GetAtt("LambdaExecutionRole", "Arn"), Runtime="go1.x", MemorySize=Ref(memory_size), FunctionName="go-lambda-hello-world", Timeout=Ref(timeout))) alias = template.add_resource( Alias("GolLambdaAlias", Description="Alias for the go lambda", FunctionName=Ref(lambda_function), FunctionVersion="$LATEST", Name=Ref(alias))) dead_letter_queue = template.add_resource( Queue("GoLambdaDeadLetterQueue", QueueName=("golambdaqueue-dlq"), VisibilityTimeout=30, MessageRetentionPeriod=1209600, MaximumMessageSize=262144, DelaySeconds=0, ReceiveMessageWaitTimeSeconds=0)) go_helloworld_queue = template.add_resource( Queue("GoLambdaQueue", QueueName=("golambdaqueue"), VisibilityTimeout=1800, RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt( dead_letter_queue, "Arn"), maxReceiveCount=3))) lambda_execution_role = template.add_resource( Role("LambdaExecutionRole", Policies=[ iam.Policy( PolicyName="GoFunctionRolePolicy", PolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[ Action("logs", "CreateLogGroup"), Action("logs", "CreateLogStream"), Action("logs", "PutLogEvents") ], Resource=["arn:aws:logs:*:*:*"]), Statement( Effect=Allow, Action=[ Action("sqs", "ChangeMessageVisibility"), Action("sqs", "DeleteMessage"), Action("sqs", "GetQueueAttributes"), Action("sqs", "ReceiveMessage") ], Resource=[GetAtt(go_helloworld_queue, "Arn")]) ])) ], AssumeRolePolicyDocument=Policy(Statement=[ Statement(Effect=Allow, Action=[AssumeRole], Principal=Principal("Service", ["lambda.amazonaws.com"])) ]))) template.add_resource( awslambda.Permission( 'QueueInvokePermission', FunctionName=GetAtt(lambda_function, 'Arn'), Action="lambda:InvokeFunction", Principal="sqs.amazonaws.com", SourceArn=GetAtt(go_helloworld_queue, 'Arn'), )) template.add_output( Output("LambdaHelloWorldQueue", Value=GetAtt(go_helloworld_queue, "Arn"), Export=Export("lambda-go-hello-world-queue"), Description="Arn of the queue")) template.add_output( Output("LambdaHelloWorlFunction", Value=Ref(alias), Export=Export("lambda-go-hello-world-function"), Description="Arn of the function"))
def _deploy_service(self, service: ff.Service): context = self._context_map.get_context(service.name) if self._aws_config.get('image_uri') is None: self._package_and_deploy_code(context) template = Template() template.set_version('2010-09-09') memory_size = template.add_parameter( Parameter(f'{self._lambda_resource_name(service.name)}MemorySize', Type=NUMBER, Default=self._aws_config.get('memory_sync', '3008'))) timeout_gateway = template.add_parameter( Parameter( f'{self._lambda_resource_name(service.name)}GatewayTimeout', Type=NUMBER, Default='30')) timeout_async = template.add_parameter( Parameter( f'{self._lambda_resource_name(service.name)}AsyncTimeout', Type=NUMBER, Default='900')) role_title = f'{self._lambda_resource_name(service.name)}ExecutionRole' role = self._add_role(role_title, template) params = { 'FunctionName': f'{self._service_name(service.name)}Sync', 'Role': GetAtt(role_title, 'Arn'), 'MemorySize': Ref(memory_size), 'Timeout': Ref(timeout_gateway), 'Environment': self._lambda_environment(context) } image_uri = self._aws_config.get('image_uri') if image_uri is not None: params.update({ 'Code': Code(ImageUri=image_uri), 'PackageType': 'Image', }) else: params.update({ 'Code': Code(S3Bucket=self._bucket, S3Key=self._code_key), 'Runtime': 'python3.7', 'Handler': 'handlers.main', }) if self._security_group_ids and self._subnet_ids: params['VpcConfig'] = VPCConfig( SecurityGroupIds=self._security_group_ids, SubnetIds=self._subnet_ids) api_lambda = template.add_resource( Function(f'{self._lambda_resource_name(service.name)}Sync', **params)) route = inflection.dasherize(context.name) proxy_route = f'{route}/{{proxy+}}' template.add_resource( Permission( f'{self._lambda_resource_name(service.name)}SyncPermission', Action='lambda:InvokeFunction', FunctionName=f'{self._service_name(service.name)}Sync', Principal='apigateway.amazonaws.com', SourceArn=Join('', [ 'arn:aws:execute-api:', self._region, ':', self._account_id, ':', ImportValue( self._rest_api_reference()), '/*/*/', route, '*' ]), DependsOn=api_lambda)) if self._adaptive_memory: value = '3008' if not self._adaptive_memory else '256' try: value = int(self._aws_config.get('memory_async')) except ValueError: pass memory_size = template.add_parameter( Parameter( f'{self._lambda_resource_name(service.name)}MemorySizeAsync', Type=NUMBER, Default=value)) params = { 'FunctionName': self._lambda_function_name(service.name, 'Async'), 'Role': GetAtt(role_title, 'Arn'), 'MemorySize': Ref(memory_size), 'Timeout': Ref(timeout_async), 'Environment': self._lambda_environment(context) } if image_uri is not None: params.update({ 'Code': Code(ImageUri=image_uri), 'PackageType': 'Image', }) else: params.update({ 'Code': Code(S3Bucket=self._bucket, S3Key=self._code_key), 'Runtime': 'python3.7', 'Handler': 'handlers.main', }) if self._security_group_ids and self._subnet_ids: params['VpcConfig'] = VPCConfig( SecurityGroupIds=self._security_group_ids, SubnetIds=self._subnet_ids) async_lambda = template.add_resource( Function(self._lambda_resource_name(service.name, type_='Async'), **params)) if self._adaptive_memory: self._add_adaptive_memory_functions(template, context, timeout_async, role_title, async_lambda) # self._add_adaptive_memory_streams(template, context, async_lambda, role) # Timers for cls, _ in context.command_handlers.items(): if cls.has_timer(): timer = cls.get_timer() if timer.environment is not None and timer.environment != self._env: continue if isinstance(timer.command, str): timer_name = timer.command else: timer_name = timer.command.__name__ target = Target( f'{self._service_name(service.name)}AsyncTarget', Arn=GetAtt( self._lambda_resource_name(service.name, type_='Async'), 'Arn'), Id=self._lambda_resource_name(service.name, type_='Async'), Input= f'{{"_context": "{context.name}", "_type": "command", "_name": "{cls.__name__}"}}' ) rule = template.add_resource( Rule(f'{timer_name}TimerRule', ScheduleExpression=f'cron({timer.cron})', State='ENABLED', Targets=[target])) template.add_resource( Permission(f'{timer_name}TimerPermission', Action='lambda:invokeFunction', Principal='events.amazonaws.com', FunctionName=Ref(async_lambda), SourceArn=GetAtt(rule, 'Arn'))) integration = template.add_resource( Integration( self._integration_name(context.name), ApiId=ImportValue(self._rest_api_reference()), PayloadFormatVersion='2.0', IntegrationType='AWS_PROXY', IntegrationUri=Join('', [ 'arn:aws:lambda:', self._region, ':', self._account_id, ':function:', Ref(api_lambda), ]), )) template.add_resource( Route(f'{self._route_name(context.name)}Base', ApiId=ImportValue(self._rest_api_reference()), RouteKey=f'ANY /{route}', AuthorizationType='NONE', Target=Join( '/', ['integrations', Ref(integration)]), DependsOn=integration)) template.add_resource( Route(f'{self._route_name(context.name)}Proxy', ApiId=ImportValue(self._rest_api_reference()), RouteKey=f'ANY /{proxy_route}', AuthorizationType='NONE', Target=Join( '/', ['integrations', Ref(integration)]), DependsOn=integration)) # Error alarms / subscriptions if 'errors' in self._aws_config: alerts_topic = template.add_resource( Topic(self._alert_topic_name(service.name), TopicName=self._alert_topic_name(service.name))) if 'email' in self._aws_config.get('errors'): for address in self._aws_config.get('errors').get('email').get( 'recipients').split(','): template.add_resource( SubscriptionResource( self._alarm_subscription_name(context.name), Protocol='email', Endpoint=address, TopicArn=self._alert_topic_arn(context.name), DependsOn=[alerts_topic])) # Queues / Topics subscriptions = {} for subscription in self._get_subscriptions(context): if subscription['context'] not in subscriptions: subscriptions[subscription['context']] = [] subscriptions[subscription['context']].append(subscription) dlq = template.add_resource( Queue(f'{self._queue_name(context.name)}Dlq', QueueName=f'{self._queue_name(context.name)}Dlq', VisibilityTimeout=905, ReceiveMessageWaitTimeSeconds=20, MessageRetentionPeriod=1209600)) self._queue_policy(template, dlq, f'{self._queue_name(context.name)}Dlq', subscriptions) queue = template.add_resource( Queue(self._queue_name(context.name), QueueName=self._queue_name(context.name), VisibilityTimeout=905, ReceiveMessageWaitTimeSeconds=20, MessageRetentionPeriod=1209600, RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt( dlq, 'Arn'), maxReceiveCount=1000), DependsOn=dlq)) self._queue_policy(template, queue, self._queue_name(context.name), subscriptions) template.add_resource( EventSourceMapping( f'{self._lambda_resource_name(context.name)}AsyncMapping', BatchSize=1, Enabled=True, EventSourceArn=GetAtt(queue, 'Arn'), FunctionName=self._lambda_function_name(service.name, 'Async'), DependsOn=[queue, async_lambda])) topic = template.add_resource( Topic(self._topic_name(context.name), TopicName=self._topic_name(context.name))) for context_name, list_ in subscriptions.items(): if context_name == context.name and len(list_) > 0: template.add_resource( SubscriptionResource( self._subscription_name(context_name), Protocol='sqs', Endpoint=GetAtt(queue, 'Arn'), TopicArn=self._topic_arn(context.name), FilterPolicy={ '_name': [x['name'] for x in list_], }, RedrivePolicy={ 'deadLetterTargetArn': GetAtt(dlq, 'Arn'), }, DependsOn=[queue, dlq, topic])) elif len(list_) > 0: if context_name not in self._context_map.contexts: self._find_or_create_topic(context_name) template.add_resource( SubscriptionResource( self._subscription_name(context.name, context_name), Protocol='sqs', Endpoint=GetAtt(queue, 'Arn'), TopicArn=self._topic_arn(context_name), FilterPolicy={'_name': [x['name'] for x in list_]}, RedrivePolicy={ 'deadLetterTargetArn': GetAtt(dlq, 'Arn'), }, DependsOn=[queue, dlq])) # DynamoDB Table ddb_table = template.add_resource( Table(self._ddb_resource_name(context.name), TableName=self._ddb_table_name(context.name), AttributeDefinitions=[ AttributeDefinition(AttributeName='pk', AttributeType='S'), AttributeDefinition(AttributeName='sk', AttributeType='S'), ], BillingMode='PAY_PER_REQUEST', KeySchema=[ KeySchema(AttributeName='pk', KeyType='HASH'), KeySchema(AttributeName='sk', KeyType='RANGE'), ], TimeToLiveSpecification=TimeToLiveSpecification( AttributeName='TimeToLive', Enabled=True))) template.add_output( Output("DDBTable", Value=Ref(ddb_table), Description="Document table")) for cb in self._pre_deployment_hooks: cb(template=template, context=context, env=self._env) self.info('Deploying stack') self._s3_client.put_object(Body=template.to_json(), Bucket=self._bucket, Key=self._template_key) url = self._s3_client.generate_presigned_url(ClientMethod='get_object', Params={ 'Bucket': self._bucket, 'Key': self._template_key }) stack_name = self._stack_name(context.name) try: self._cloudformation_client.describe_stacks(StackName=stack_name) self._update_stack(self._stack_name(context.name), url) except ClientError as e: if f'Stack with id {stack_name} does not exist' in str(e): self._create_stack(self._stack_name(context.name), url) else: raise e for cb in self._post_deployment_hooks: cb(template=template, context=context, env=self._env) self._migrate_schema(context) self.info('Done')
from troposphere.sns import Subscription, Topic from troposphere.sqs import Queue, RedrivePolicy, QueuePolicy template = Template() template.add_description( "AWS CloudFormation Sample Template SNS_AND_SQS_: Sample") dead_letter_queue = template.add_resource( Queue("deadSQS", QueueName="dead__letter_queue__iac_sqs_sample")) sqs_aws = template.add_resource( Queue("iacSQS", QueueName="iac_sqs_sample", RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(dead_letter_queue, "Arn"), maxReceiveCount="5", ))) sns_aws = template.add_resource( Topic("iacSNS", TopicName="iac_sns_sample", Subscription=[ Subscription(Protocol="sqs", Endpoint=GetAtt(sqs_aws, "Arn")) ])) template.add_output([ Output("SourceQueueURL", Description="URL of the source queue", Value=Ref(sqs_aws)), Output("SourceQueueARN", Description="ARN of the source queue",
def _deploy_service(self, service: ff.Service): context = self._context_map.get_context(service.name) self._package_and_deploy_code(context) template = Template() template.set_version('2010-09-09') memory_size = template.add_parameter(Parameter( f'{self._lambda_resource_name(service.name)}MemorySize', Type=NUMBER, Default='3008' )) timeout_gateway = template.add_parameter(Parameter( f'{self._lambda_resource_name(service.name)}GatewayTimeout', Type=NUMBER, Default='30' )) timeout_async = template.add_parameter(Parameter( f'{self._lambda_resource_name(service.name)}AsyncTimeout', Type=NUMBER, Default='900' )) role_title = f'{self._lambda_resource_name(service.name)}ExecutionRole' self._add_role(role_title, template) params = { 'FunctionName': f'{self._service_name(service.name)}Sync', 'Code': Code( S3Bucket=self._bucket, S3Key=self._code_key ), 'Handler': 'handlers.main', 'Role': GetAtt(role_title, 'Arn'), 'Runtime': 'python3.7', 'MemorySize': Ref(memory_size), 'Timeout': Ref(timeout_gateway), 'Environment': self._lambda_environment(context) } if self._security_group_ids and self._subnet_ids: params['VpcConfig'] = VPCConfig( SecurityGroupIds=self._security_group_ids, SubnetIds=self._subnet_ids ) api_lambda = template.add_resource(Function( f'{self._lambda_resource_name(service.name)}Sync', **params )) route = inflection.dasherize(context.name) proxy_route = f'{route}/{{proxy+}}' template.add_resource(Permission( f'{self._lambda_resource_name(service.name)}SyncPermission', Action='lambda:InvokeFunction', FunctionName=f'{self._service_name(service.name)}Sync', Principal='apigateway.amazonaws.com', SourceArn=Join('', [ 'arn:aws:execute-api:', self._region, ':', self._account_id, ':', ImportValue(self._rest_api_reference()), '/*/*/', route, '*' ]), DependsOn=api_lambda )) params = { 'FunctionName': f'{self._service_name(service.name)}Async', 'Code': Code( S3Bucket=self._bucket, S3Key=self._code_key ), 'Handler': 'handlers.main', 'Role': GetAtt(role_title, 'Arn'), 'Runtime': 'python3.7', 'MemorySize': Ref(memory_size), 'Timeout': Ref(timeout_async), 'Environment': self._lambda_environment(context) } if self._security_group_ids and self._subnet_ids: params['VpcConfig'] = VPCConfig( SecurityGroupIds=self._security_group_ids, SubnetIds=self._subnet_ids ) async_lambda = template.add_resource(Function( f'{self._lambda_resource_name(service.name)}Async', **params )) integration = template.add_resource(Integration( self._integration_name(context.name), ApiId=ImportValue(self._rest_api_reference()), PayloadFormatVersion='2.0', IntegrationType='AWS_PROXY', IntegrationUri=Join('', [ 'arn:aws:lambda:', self._region, ':', self._account_id, ':function:', Ref(api_lambda), ]), )) template.add_resource(Route( f'{self._route_name(context.name)}Base', ApiId=ImportValue(self._rest_api_reference()), RouteKey=f'ANY /{route}', AuthorizationType='NONE', Target=Join('/', ['integrations', Ref(integration)]), DependsOn=integration )) template.add_resource(Route( f'{self._route_name(context.name)}Proxy', ApiId=ImportValue(self._rest_api_reference()), RouteKey=f'ANY /{proxy_route}', AuthorizationType='NONE', Target=Join('/', ['integrations', Ref(integration)]), DependsOn=integration )) # Error alarms / subscriptions if 'errors' in self._aws_config: alerts_topic = template.add_resource(Topic( self._alert_topic_name(service.name), TopicName=self._alert_topic_name(service.name) )) self._add_error_alarm(template, f'{self._service_name(context.name)}Sync', context.name, alerts_topic) self._add_error_alarm(template, f'{self._service_name(context.name)}Async', context.name, alerts_topic) if 'email' in self._aws_config.get('errors'): template.add_resource(SubscriptionResource( self._alarm_subscription_name(context.name), Protocol='email', Endpoint=self._aws_config.get('errors').get('email').get('recipients'), TopicArn=self._alert_topic_arn(context.name), DependsOn=[alerts_topic] )) # Queues / Topics subscriptions = {} for subscription in self._get_subscriptions(context): if subscription['context'] not in subscriptions: subscriptions[subscription['context']] = [] subscriptions[subscription['context']].append(subscription) dlq = template.add_resource(Queue( f'{self._queue_name(context.name)}Dlq', QueueName=f'{self._queue_name(context.name)}Dlq', VisibilityTimeout=905, ReceiveMessageWaitTimeSeconds=20, MessageRetentionPeriod=1209600 )) self._queue_policy(template, dlq, f'{self._queue_name(context.name)}Dlq', subscriptions) queue = template.add_resource(Queue( self._queue_name(context.name), QueueName=self._queue_name(context.name), VisibilityTimeout=905, ReceiveMessageWaitTimeSeconds=20, MessageRetentionPeriod=1209600, RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(dlq, 'Arn'), maxReceiveCount=1000 ), DependsOn=dlq )) self._queue_policy(template, queue, self._queue_name(context.name), subscriptions) template.add_resource(EventSourceMapping( f'{self._lambda_resource_name(context.name)}AsyncMapping', BatchSize=1, Enabled=True, EventSourceArn=GetAtt(queue, 'Arn'), FunctionName=f'{self._service_name(service.name)}Async', DependsOn=[queue, async_lambda] )) topic = template.add_resource(Topic( self._topic_name(context.name), TopicName=self._topic_name(context.name) )) for context_name, list_ in subscriptions.items(): if context_name == context.name and len(list_) > 0: template.add_resource(SubscriptionResource( self._subscription_name(context_name), Protocol='sqs', Endpoint=GetAtt(queue, 'Arn'), TopicArn=self._topic_arn(context.name), FilterPolicy={ '_name': [x['name'] for x in list_], }, RedrivePolicy={ 'deadLetterTargetArn': GetAtt(dlq, 'Arn'), }, DependsOn=[queue, dlq, topic] )) elif len(list_) > 0: if context_name not in self._context_map.contexts: self._find_or_create_topic(context_name) template.add_resource(SubscriptionResource( self._subscription_name(context.name, context_name), Protocol='sqs', Endpoint=GetAtt(queue, 'Arn'), TopicArn=self._topic_arn(context_name), FilterPolicy={ '_name': [x['name'] for x in list_] }, RedrivePolicy={ 'deadLetterTargetArn': GetAtt(dlq, 'Arn'), }, DependsOn=[queue, dlq] )) self.info('Deploying stack') stack_name = self._stack_name(context.name) try: self._cloudformation_client.describe_stacks(StackName=stack_name) self._update_stack(self._stack_name(context.name), template) except ClientError as e: if f'Stack with id {stack_name} does not exist' in str(e): self._create_stack(self._stack_name(context.name), template) else: raise e self._execute_ddl(context) self.info('Done')
print(' adding sqs') def hours_in_seconds(n): return n * 60 * 60 failed_start_evets = t.add_resource( Queue( "FailedStartEvents", FifoQueue=True, ContentBasedDeduplication=True, )) start_events = t.add_resource( Queue("HyP3StartEvents", FifoQueue=True, ContentBasedDeduplication=True, RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(failed_start_evets, "Arn"), maxReceiveCount=1, ), VisibilityTimeout=hours_in_seconds(3))) ssm_queue_name = t.add_resource( Parameter("HyP3SSMParameterStartEventQueueName", Name=Sub("/${StackName}/StartEventQueueName", StackName=Ref("AWS::StackName")), Type="String", Value=GetAtt(start_events, "QueueName")))
def generate_queues_template(QueueNamePrefix, Environment): QueueName = f'{QueueNamePrefix}-{Environment}' DLQQueueName = f'{QueueNamePrefix}DLQ-{Environment}' t = Template(Description='A template for a messaging queue') t.version = '2010-09-09' KMSKey = t.add_resource( Key('KMSKey', Description=f'KMS Key for encrypting {QueueName}', Enabled=True, EnableKeyRotation=True, KeyPolicy=Policy( Version='2012-10-17', Statement=[ Statement(Sid='Enable IAM User Permissions', Effect=Allow, Principal=AWSPrincipal( Sub('arn:aws:iam::${AWS::AccountId}:root')), Action=[KmsAction(All)], Resource=AllResources), Statement(Sid='Allow access for Key Administrators', Effect=Allow, Principal=AWSPrincipal([ Sub(f'{USER}/frank'), Sub(f'{USER}/moonunit') ]), Action=[ KmsAction('Create*'), KmsAction('Describe*'), KmsAction('Enable*'), KmsAction('List*'), KmsAction('Put*'), KmsAction('Update*'), KmsAction('Revoke*'), KmsAction('Disable*'), KmsAction('Get*'), KmsAction('Delete*'), KmsAction('ScheduleKeyDeletion'), KmsAction('CancelKeyDeletion') ], Resource=AllResources) ]))) t.add_resource( Alias('KMSKeyAlias', AliasName=f'alias/{QueueName}', TargetKeyId=Ref(KMSKey))) dlq = t.add_resource( Queue( 'DeadLetterQueue', QueueName=DLQQueueName, MaximumMessageSize=262144, # 256KiB MessageRetentionPeriod=1209600, # 14 days VisibilityTimeout=30)) t.add_resource( Queue( 'PrimaryQueue', QueueName=QueueName, MaximumMessageSize=262144, # 256KiB MessageRetentionPeriod=1209600, # 14 days VisibilityTimeout=30, RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt( dlq.title, 'Arn'), maxReceiveCount=10), KmsMasterKeyId=Ref(KMSKey), KmsDataKeyReusePeriodSeconds=300)) t.add_output([ Output('QueueArn', Description=f'ARN of {QueueName} Queue', Value=GetAtt('PrimaryQueue', 'Arn'), Export=Export(Name(Sub('${AWS::StackName}:PrimaryQueueArn')))), Output('KmsKeyArn', Description=f'KMS Key ARN for {QueueName} Queue', Value=GetAtt('KMSKey', 'Arn'), Export=Export(Name(Sub('${AWS::StackName}:KmsKeyArn')))) ]) return t
def generate(env='pilot'): template = Template() template.set_version("2010-09-09") # ExistingVPC = template.add_parameter(Parameter( # "ExistingVPC", # Type="AWS::EC2::VPC::Id", # Description=( # "The VPC ID that includes the security groups in the" # "ExistingSecurityGroups parameter." # ), # )) # # ExistingSecurityGroups = template.add_parameter(Parameter( # "ExistingSecurityGroups", # Type="List<AWS::EC2::SecurityGroup::Id>", # )) param_spider_lambda_memory_size = template.add_parameter( Parameter( 'SpiderLambdaMemorySize', Type=NUMBER, Description='Amount of memory to allocate to the Lambda Function', Default='128', AllowedValues=MEMORY_VALUES ) ) param_spider_lambda_timeout = template.add_parameter( Parameter( 'SpiderLambdaTimeout', Type=NUMBER, Description='Timeout in seconds for the Lambda function', Default='60' ) ) spider_tasks_queue_dlq_name = f'{env}-spider-tasks-dlq' spider_tasks_queue_dlq = template.add_resource( Queue( "SpiderTasksDLQ", QueueName=spider_tasks_queue_dlq_name, MessageRetentionPeriod=(60 * 60 * 24 * 14), ) ) spider_tasks_queue_name = f"{env}-spider-tasks" spider_tasks_queue = template.add_resource( Queue( "SpiderTasksQueue", QueueName=spider_tasks_queue_name, MessageRetentionPeriod=(60 * 60 * 24 * 14), VisibilityTimeout=300, RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(spider_tasks_queue_dlq, "Arn"), maxReceiveCount=2, ), DependsOn=[spider_tasks_queue_dlq], ) ) spider_lambda_role = template.add_resource( Role( "SpiderLambdaRole", Path="/", Policies=[ Policy( PolicyName="root", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="root", Statement=[ Statement( Effect=Allow, Resource=["*"], Action=[ Action("logs", "*") ] ), Statement( Effect=Allow, Resource=["*"], Action=[ Action("s3", "*") ] ), Statement( Effect=Allow, Resource=["*"], Action=[ Action("sqs", "*") ] ), ] ), ) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] } }] }, ) ) spider_file_path = './spider/index.js' spider_code = open(spider_file_path, 'r').readlines() spider_lambda = template.add_resource( Function( "SpiderLambda", Code=Code( S3Bucket='spider-lambda', S3Key=f'{env}.zip', # ZipFile=Join("", spider_code) ), Handler="index.handler", Role=GetAtt(spider_lambda_role, "Arn"), Runtime="nodejs12.x", Layers=['arn:aws:lambda:us-east-1:342904801388:layer:spider-node-browser:1'], MemorySize=Ref(param_spider_lambda_memory_size), Timeout=Ref(param_spider_lambda_timeout), DependsOn=[spider_tasks_queue], ) ) # AllSecurityGroups = template.add_resource(CustomResource( # "AllSecurityGroups", # List=Ref(ExistingSecurityGroups), # AppendedItem=Ref("SecurityGroup"), # ServiceToken=GetAtt(spider_lambda, "Arn"), # )) # # SecurityGroup = template.add_resource(SecurityGroup( # "SecurityGroup", # SecurityGroupIngress=[ # {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", # "FromPort": "80"}], # VpcId=Ref(ExistingVPC), # GroupDescription="Allow HTTP traffic to the host", # SecurityGroupEgress=[ # {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", # "FromPort": "80"}], # )) # # AllSecurityGroups = template.add_output(Output( # "AllSecurityGroups", # Description="Security Groups that are associated with the EC2 instance", # Value=Join(", ", GetAtt(AllSecurityGroups, "Value")), # )) source_sns_name = f'{env}-source-sns-topic' source_sns_topic = template.add_resource( Topic( "SNSSource", TopicName=source_sns_name, Subscription=[ Subscription( Endpoint=GetAtt(spider_tasks_queue, "Arn"), Protocol='sqs', ) ], DependsOn=[spider_tasks_queue] ) ) source_sns_topic_policy = template.add_resource( TopicPolicy( "SourceForwardingTopicPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="AllowS3PutMessageInSNS", Statement=[ Statement( Sid="AllowS3PutMessages", Principal=Principal("Service", "s3.amazonaws.com"), Effect=Allow, Action=[ Action("sns", "Publish"), ], Resource=["*"], ) ] ), Topics=[Ref(source_sns_topic)], ) ) sns_sqs_policy = template.add_resource( QueuePolicy( "AllowSNSPutMessagesInSQS", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="AllowSNSPutMessagesInSQS", Statement=[ Statement( Sid="AllowSNSPutMessagesInSQS2", Principal=Principal("*"), Effect=Allow, Action=[ Action("sqs", "SendMessage"), ], Resource=["*"], ) ] ), Queues=[Ref(spider_tasks_queue)], DependsOn=[spider_tasks_queue], ) ) # Buckets source_bucket_name = f'{env}-source-bucket' source_bucket = template.add_resource( Bucket( "SourceBucket", BucketName=source_bucket_name, NotificationConfiguration=NotificationConfiguration( TopicConfigurations=[ TopicConfigurations( Topic=Ref(source_sns_topic), Event="s3:ObjectCreated:*", ) ], ), DependsOn=[source_sns_topic_policy], ) ) results_bucket_name = f'{env}-results-bucket' results_bucket = template.add_resource( Bucket( "ResultsBucket", BucketName=results_bucket_name, ) ) # Lambda trigger template.add_resource( EventSourceMapping( "TriggerLambdaSpiderFromSQS", EventSourceArn=GetAtt(spider_tasks_queue, "Arn"), FunctionName=Ref(spider_lambda), BatchSize=1, # Default process tasks one by one ) ) return template.to_json()