def create_sns(alarm_email): email_topic = Topic('email_notification', Subscription=[ Subscription(Endpoint=Ref(alarm_email), Protocol="email") ]) return email_topic
def add_sns_topic(resources, outputs): resources.append( Topic( "BenchmarksNotificationTopic", DisplayName="CIS-Benchmarks", TopicName="CIS-Benchmarks" ) ) return resources, outputs
def create_sns_topic(stack, name, endpoint, protocol='https'): """Add a SNS topic.""" return stack.stack.add_resource( Topic( '{0}Topic'.format(name.replace('-', '')), DisplayName=name, Subscription=[Subscription(Endpoint=endpoint, Protocol=protocol)], TopicName='{0}Topic'.format(name)))
def create_events_topic(self): t = self.template t.add_resource( Topic("EventTopic", Condition="EnableSNSEvents", DisplayName="Empire")) t.add_output( Output("SNSEventTopicArn", Condition="EnableSNSEvents", Value=Ref("EventTopic")))
def _find_or_create_topic(self, context_name: str): arn = f'arn:aws:sns:{self._region}:{self._account_id}:{self._topic_name(context_name)}' try: self._sns_client.get_topic_attributes(TopicArn=arn) except ClientError: template = Template() template.set_version('2010-09-09') template.add_resource( Topic(self._topic_name(context_name), TopicName=self._topic_name(context_name))) self.info(f'Creating stack for context "{context_name}"') self._create_stack(self._stack_name(context_name), template)
def topic(topic_title, emails): topic = Topic(topic_title, DisplayName=Join("", [Ref("AWS::StackName"), "-", topic_title])) topic.Subscription = [] for index, email in enumerate(emails): topic.Subscription.append( Subscription(topic_title + "Subscription" + str(index), Endpoint=email, Protocol="email")) return topic
def __init__(self, template): """ AWS: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-sns-topic.html Troposphere: https://github.com/cloudtools/troposphere/blob/master/troposphere/sns.py :param template: The troposphere template to add the Elastic Loadbalancer to. """ title = 'SnsTopic' self.template = template self.trop_topic = self.template.add_resource( Topic(title, DisplayName=Join('', [Ref('AWS::StackName'), 'Notifications']))) self.subscriptions = [] self.alarms = []
def define_topic(topic, content): """ Function that builds the SNS topic template from cli.Dockerfile Properties :param topic: The topic and its definition :type topic: ecs_composex.sns.sns_stack.Topic """ topic.cfn_resource = Topic(topic.logical_name, Metadata=metadata) if keyisset(SUBSCRIPTIONS_KEY, topic.properties): subscriptions = define_topic_subscriptions( topic.properties[SUBSCRIPTIONS_KEY], content) setattr(topic.cfn_resource, "Subscription", subscriptions) for key in topic.properties.keys(): if type(topic.properties[key]) != list: setattr(topic.cfn_resource, key, topic.properties[key])
def define_topic(topic_name, topic, content): """ Function that builds the SNS topic template from Dockerfile Properties """ properties = topic["Properties"] if keyisset("Properties", topic) else {} topic = Topic(NONALPHANUM.sub("", topic_name), Metadata=metadata) if keyisset(SUBSCRIPTIONS_KEY, properties): subscriptions = define_topic_subscriptions( properties[SUBSCRIPTIONS_KEY], content ) setattr(topic, "Subscription", subscriptions) for key in properties.keys(): if type(properties[key]) != list: setattr(topic, key, properties[key]) return topic
def create_sns_sqs(template, sns_name, sqs_name): q = template.add_resource(Queue(sqs_name, QueueName=sqs_name)) topic = template.add_resource( Topic(sns_name, TopicName=sns_name, Subscription=[ Subscription(Endpoint=GetAtt(q, 'Arn'), Protocol='sqs') ])) policy = template.add_resource( QueuePolicy(sqs_name + sns_name + 'policy', PolicyDocument={ "Version": "2012-10-17", "Id": "MyQueuePolicy", "Statement": [{ "Sid": "Allow-SendMessage-From-SNS-Topic", "Effect": "Allow", "Principal": "*", "Action": ["sqs:SendMessage"], "Resource": "*", "Condition": { "ArnEquals": { "aws:SourceArn": Ref(topic) } } }] }, Queues=[sqs_name])) template.add_output(Output("sns", Description="SNS Arn", Value=Ref(topic))) template.add_output( Output("queuearn", Description="Queue Arn", Value=GetAtt(q, 'Arn'))) template.add_output( Output("queueurl", Description="Queue URL", Value=Ref(q)))
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ from troposphere import GetAtt, Output, Ref, Template from troposphere.sns import Subscription, Topic from troposphere.sqs import Queue, QueuePolicy t = Template() t.set_description("AWS CloudFormation Sample Template: This template " "demonstrates the creation of a DynamoDB table.") sqsqueue = t.add_resource(Queue("SQSQueue")) snstopic = t.add_resource( Topic("SNSTopic", Subscription=[ Subscription(Protocol="sqs", Endpoint=GetAtt(sqsqueue, "Arn")) ])) t.add_output( Output( "QueueArn", Value=GetAtt(sqsqueue, "Arn"), Description="ARN of SQS Queue", )) t.add_resource( QueuePolicy("AllowSNS2SQSPolicy", Queues=[Ref(sqsqueue)], PolicyDocument={ "Version": "2008-10-17",
def generate(env='pilot'): template = Template() template.set_version("2010-09-09") # ExistingVPC = template.add_parameter(Parameter( # "ExistingVPC", # Type="AWS::EC2::VPC::Id", # Description=( # "The VPC ID that includes the security groups in the" # "ExistingSecurityGroups parameter." # ), # )) # # ExistingSecurityGroups = template.add_parameter(Parameter( # "ExistingSecurityGroups", # Type="List<AWS::EC2::SecurityGroup::Id>", # )) param_spider_lambda_memory_size = template.add_parameter( Parameter( 'SpiderLambdaMemorySize', Type=NUMBER, Description='Amount of memory to allocate to the Lambda Function', Default='128', AllowedValues=MEMORY_VALUES ) ) param_spider_lambda_timeout = template.add_parameter( Parameter( 'SpiderLambdaTimeout', Type=NUMBER, Description='Timeout in seconds for the Lambda function', Default='60' ) ) spider_tasks_queue_dlq_name = f'{env}-spider-tasks-dlq' spider_tasks_queue_dlq = template.add_resource( Queue( "SpiderTasksDLQ", QueueName=spider_tasks_queue_dlq_name, MessageRetentionPeriod=(60 * 60 * 24 * 14), ) ) spider_tasks_queue_name = f"{env}-spider-tasks" spider_tasks_queue = template.add_resource( Queue( "SpiderTasksQueue", QueueName=spider_tasks_queue_name, MessageRetentionPeriod=(60 * 60 * 24 * 14), VisibilityTimeout=300, RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(spider_tasks_queue_dlq, "Arn"), maxReceiveCount=2, ), DependsOn=[spider_tasks_queue_dlq], ) ) spider_lambda_role = template.add_resource( Role( "SpiderLambdaRole", Path="/", Policies=[ Policy( PolicyName="root", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="root", Statement=[ Statement( Effect=Allow, Resource=["*"], Action=[ Action("logs", "*") ] ), Statement( Effect=Allow, Resource=["*"], Action=[ Action("s3", "*") ] ), Statement( Effect=Allow, Resource=["*"], Action=[ Action("sqs", "*") ] ), ] ), ) ], AssumeRolePolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Action": ["sts:AssumeRole"], "Effect": "Allow", "Principal": { "Service": ["lambda.amazonaws.com"] } }] }, ) ) spider_file_path = './spider/index.js' spider_code = open(spider_file_path, 'r').readlines() spider_lambda = template.add_resource( Function( "SpiderLambda", Code=Code( S3Bucket='spider-lambda', S3Key=f'{env}.zip', # ZipFile=Join("", spider_code) ), Handler="index.handler", Role=GetAtt(spider_lambda_role, "Arn"), Runtime="nodejs12.x", Layers=['arn:aws:lambda:us-east-1:342904801388:layer:spider-node-browser:1'], MemorySize=Ref(param_spider_lambda_memory_size), Timeout=Ref(param_spider_lambda_timeout), DependsOn=[spider_tasks_queue], ) ) # AllSecurityGroups = template.add_resource(CustomResource( # "AllSecurityGroups", # List=Ref(ExistingSecurityGroups), # AppendedItem=Ref("SecurityGroup"), # ServiceToken=GetAtt(spider_lambda, "Arn"), # )) # # SecurityGroup = template.add_resource(SecurityGroup( # "SecurityGroup", # SecurityGroupIngress=[ # {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", # "FromPort": "80"}], # VpcId=Ref(ExistingVPC), # GroupDescription="Allow HTTP traffic to the host", # SecurityGroupEgress=[ # {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0", # "FromPort": "80"}], # )) # # AllSecurityGroups = template.add_output(Output( # "AllSecurityGroups", # Description="Security Groups that are associated with the EC2 instance", # Value=Join(", ", GetAtt(AllSecurityGroups, "Value")), # )) source_sns_name = f'{env}-source-sns-topic' source_sns_topic = template.add_resource( Topic( "SNSSource", TopicName=source_sns_name, Subscription=[ Subscription( Endpoint=GetAtt(spider_tasks_queue, "Arn"), Protocol='sqs', ) ], DependsOn=[spider_tasks_queue] ) ) source_sns_topic_policy = template.add_resource( TopicPolicy( "SourceForwardingTopicPolicy", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="AllowS3PutMessageInSNS", Statement=[ Statement( Sid="AllowS3PutMessages", Principal=Principal("Service", "s3.amazonaws.com"), Effect=Allow, Action=[ Action("sns", "Publish"), ], Resource=["*"], ) ] ), Topics=[Ref(source_sns_topic)], ) ) sns_sqs_policy = template.add_resource( QueuePolicy( "AllowSNSPutMessagesInSQS", PolicyDocument=PolicyDocument( Version="2012-10-17", Id="AllowSNSPutMessagesInSQS", Statement=[ Statement( Sid="AllowSNSPutMessagesInSQS2", Principal=Principal("*"), Effect=Allow, Action=[ Action("sqs", "SendMessage"), ], Resource=["*"], ) ] ), Queues=[Ref(spider_tasks_queue)], DependsOn=[spider_tasks_queue], ) ) # Buckets source_bucket_name = f'{env}-source-bucket' source_bucket = template.add_resource( Bucket( "SourceBucket", BucketName=source_bucket_name, NotificationConfiguration=NotificationConfiguration( TopicConfigurations=[ TopicConfigurations( Topic=Ref(source_sns_topic), Event="s3:ObjectCreated:*", ) ], ), DependsOn=[source_sns_topic_policy], ) ) results_bucket_name = f'{env}-results-bucket' results_bucket = template.add_resource( Bucket( "ResultsBucket", BucketName=results_bucket_name, ) ) # Lambda trigger template.add_resource( EventSourceMapping( "TriggerLambdaSpiderFromSQS", EventSourceArn=GetAtt(spider_tasks_queue, "Arn"), FunctionName=Ref(spider_lambda), BatchSize=1, # Default process tasks one by one ) ) return template.to_json()
)) template.add_parameter_to_group(start_insights_code_key, 'Lambda Keys') template.add_parameter_to_group(rekognition_code_key, 'Lambda Keys') template.add_parameter_to_group(video_metadata_event_code_key, 'Lambda Keys') template.add_parameter_to_group(rekognition_results_code_key, 'Lambda Keys') rekognition_updates_queue = template.add_resource( Queue('RekognitionUpdatesQueue', )) rekognition_updates_topic = template.add_resource( Topic( 'RekognitionUpdatesTopic', Subscription=[ Subscription( Endpoint=GetAtt(rekognition_updates_queue, 'Arn'), Protocol='sqs', ) ], )) template.add_resource( QueuePolicy( "RekognitionUpdatesQueuePolicy", Queues=[Ref(rekognition_updates_queue)], PolicyDocument={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["sqs:SendMessage"],
"*", "Effect": "Allow" }, { "Action": ["sns:Publish"], "Resource": "*", "Effect": "Allow" }, { "Action": ["lambda:*"], "Resource": "*", "Effect": "Allow" }] }, Roles=[Ref("Civ6NotifLambdaExecutionRole")]) Civ6Notif_SNS = Topic("Civ6NotifTopic", DisplayName="Civ6Notification") code = [ "from botocore.vendored import requests\n", "import json\n", "import os\n", "import boto3\n", "import logging\n", "logger = logging.getLogger()\n", "logger.setLevel(logging.INFO)\n", "\n", "def lambda_handler(event, context):\n", "\tlogger.INFO('Received event: %s', event)\n", "\tmsg = f'It is now {event[\"value2\"]}\\'s turn in Civ6 game {event[\"value1\"]}'\n\n", "\tif os.environ['SendToDiscord'] == 'True':\n", "\t\tr = requests.post(os.environ['DiscordWebhookURL'],json={'content':msg})\n", "\t\tlogger.INFO('%s', r)\n", "\n\n", "\tif os.environ['SendToSNS'] == 'True':\n", "\t\tclient = boto3.client('sns')\n", "\t\tclient.publish(TopicArn=os.environ['SNSTopic'],Message=msg,Subject='Civilization 6 Play By Cloud Notifications')\n\n", "\treturn {}"
from troposphere.sns import Topic from troposphere import (Output, Ref) from . import template as t, common_tags def generate_topic_name(topic_name): return '%s-%s-%s' % (common_tags['Environment'], common_tags['VPC'], topic_name) # Topics nat_emergency_topic = t.add_resource( Topic('NatEmergencyTopic', TopicName=generate_topic_name('NatEmergencyTopic'))) cloudwatch_alarm_topic = t.add_resource( Topic('CloudWatchAlarmTopic', TopicName=generate_topic_name('CloudWatchAlarms'))) # Outputs t.add_output([ Output("CloudWatchAlarmTopic", Value=Ref(cloudwatch_alarm_topic)), Output("NatEmergencyTopicARN", Value=Ref(nat_emergency_topic)) ])
dead_letter_queue = template.add_resource( Queue("deadSQS", QueueName="dead__letter_queue__iac_sqs_sample")) sqs_aws = template.add_resource( Queue("iacSQS", QueueName="iac_sqs_sample", RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(dead_letter_queue, "Arn"), maxReceiveCount="5", ))) sns_aws = template.add_resource( Topic("iacSNS", TopicName="iac_sns_sample", Subscription=[ Subscription(Protocol="sqs", Endpoint=GetAtt(sqs_aws, "Arn")) ])) template.add_output([ Output("SourceQueueURL", Description="URL of the source queue", Value=Ref(sqs_aws)), Output("SourceQueueARN", Description="ARN of the source queue", Value=GetAtt(sqs_aws, "Arn")), Output("DeadLetterQueueURL", Description="URL of the dead letter queue", Value=Ref(dead_letter_queue)), Output("DeadLetterQueueARN", Description="ARN of the dead letter queue",
from troposphere import Output, Ref, Template, Parameter from troposphere.sns import Topic, SubscriptionResource template = Template() email = template.add_parameter(Parameter('Email', Type='String')) topic = template.add_resource( Topic('SnsTopic', DisplayName='sns-topic', TopicName='sns-topic')) template.add_resource( SubscriptionResource('Subscription', Protocol='email', Endpoint=Ref(email), TopicArn=Ref(topic))) template.add_output( Output('TopicArn', Description='SNS Topic ARN', Value=Ref(topic))) print(template.to_json())
), } conditions = { "IsProdStage": Equals( Ref("Branch"), "master" ) } resources = { "PipelineNotificationsTopic": Topic( "PipelineNotificationsTopic", Condition="IsProdStage", DisplayName=Sub("${AppName}-notifications-${AWS::Region}"), ), "DynamicPipelineCleanupDev": CustomResource( "DynamicPipelineCleanupDev", Version="1.0", ServiceToken=Ref("DynamicPipelineCleanupLambdaArn"), RoleArn=Sub("arn:aws:iam::${DevAwsAccountId}:role/CodePipelineServiceRole-${AWS::Region}-${" "DevAwsAccountId}-dev"), Region=Ref("AWS::Region"), StackName=If("IsProdStage", Sub("${AppName}-dev"), Sub("${AppName}-dev-${Suffix}") ) ),
"Api{e}-{e}-VPC-Bastion-SG".format(e=env) }, { "Key": "Owner", "Value": "Foo industries" }, { "Key": "Service", "Value": "ServiceVPC" }, { "Key": "VPC", "Value": env }], VpcId=Ref("VPC")) template.add_resource(bastion_sg) cloud_watch_alarm_topic = Topic( "CloudWatchAlarmTopic", TopicName="Api{e}-{e}-CloudWatchAlarms".format(e=env)) template.add_resource(cloud_watch_alarm_topic) dhcp_options = DHCPOptions("DomainName", DomainName=Join( "", [Ref("AWS::Region"), ".compute.internal"]), DomainNameServers=["AmazonProvidedDNS"], Tags=[{ "Key": "Environment", "Value": "Api{e}".format(e=env) }, { "Key": "Name", "Value": "Api{e}-{e}-DhcpOptions".format(e=env)
def _deploy_service(self, service: ff.Service): context = self._context_map.get_context(service.name) self._package_and_deploy_code(context) template = Template() template.set_version('2010-09-09') memory_size = template.add_parameter(Parameter( f'{self._lambda_resource_name(service.name)}MemorySize', Type=NUMBER, Default='3008' )) timeout_gateway = template.add_parameter(Parameter( f'{self._lambda_resource_name(service.name)}GatewayTimeout', Type=NUMBER, Default='30' )) timeout_async = template.add_parameter(Parameter( f'{self._lambda_resource_name(service.name)}AsyncTimeout', Type=NUMBER, Default='900' )) role_title = f'{self._lambda_resource_name(service.name)}ExecutionRole' self._add_role(role_title, template) params = { 'FunctionName': f'{self._service_name(service.name)}Sync', 'Code': Code( S3Bucket=self._bucket, S3Key=self._code_key ), 'Handler': 'handlers.main', 'Role': GetAtt(role_title, 'Arn'), 'Runtime': 'python3.7', 'MemorySize': Ref(memory_size), 'Timeout': Ref(timeout_gateway), 'Environment': self._lambda_environment(context) } if self._security_group_ids and self._subnet_ids: params['VpcConfig'] = VPCConfig( SecurityGroupIds=self._security_group_ids, SubnetIds=self._subnet_ids ) api_lambda = template.add_resource(Function( f'{self._lambda_resource_name(service.name)}Sync', **params )) route = inflection.dasherize(context.name) proxy_route = f'{route}/{{proxy+}}' template.add_resource(Permission( f'{self._lambda_resource_name(service.name)}SyncPermission', Action='lambda:InvokeFunction', FunctionName=f'{self._service_name(service.name)}Sync', Principal='apigateway.amazonaws.com', SourceArn=Join('', [ 'arn:aws:execute-api:', self._region, ':', self._account_id, ':', ImportValue(self._rest_api_reference()), '/*/*/', route, '*' ]), DependsOn=api_lambda )) params = { 'FunctionName': f'{self._service_name(service.name)}Async', 'Code': Code( S3Bucket=self._bucket, S3Key=self._code_key ), 'Handler': 'handlers.main', 'Role': GetAtt(role_title, 'Arn'), 'Runtime': 'python3.7', 'MemorySize': Ref(memory_size), 'Timeout': Ref(timeout_async), 'Environment': self._lambda_environment(context) } if self._security_group_ids and self._subnet_ids: params['VpcConfig'] = VPCConfig( SecurityGroupIds=self._security_group_ids, SubnetIds=self._subnet_ids ) async_lambda = template.add_resource(Function( f'{self._lambda_resource_name(service.name)}Async', **params )) integration = template.add_resource(Integration( self._integration_name(context.name), ApiId=ImportValue(self._rest_api_reference()), PayloadFormatVersion='2.0', IntegrationType='AWS_PROXY', IntegrationUri=Join('', [ 'arn:aws:lambda:', self._region, ':', self._account_id, ':function:', Ref(api_lambda), ]), )) template.add_resource(Route( f'{self._route_name(context.name)}Base', ApiId=ImportValue(self._rest_api_reference()), RouteKey=f'ANY /{route}', AuthorizationType='NONE', Target=Join('/', ['integrations', Ref(integration)]), DependsOn=integration )) template.add_resource(Route( f'{self._route_name(context.name)}Proxy', ApiId=ImportValue(self._rest_api_reference()), RouteKey=f'ANY /{proxy_route}', AuthorizationType='NONE', Target=Join('/', ['integrations', Ref(integration)]), DependsOn=integration )) # Error alarms / subscriptions if 'errors' in self._aws_config: alerts_topic = template.add_resource(Topic( self._alert_topic_name(service.name), TopicName=self._alert_topic_name(service.name) )) self._add_error_alarm(template, f'{self._service_name(context.name)}Sync', context.name, alerts_topic) self._add_error_alarm(template, f'{self._service_name(context.name)}Async', context.name, alerts_topic) if 'email' in self._aws_config.get('errors'): template.add_resource(SubscriptionResource( self._alarm_subscription_name(context.name), Protocol='email', Endpoint=self._aws_config.get('errors').get('email').get('recipients'), TopicArn=self._alert_topic_arn(context.name), DependsOn=[alerts_topic] )) # Queues / Topics subscriptions = {} for subscription in self._get_subscriptions(context): if subscription['context'] not in subscriptions: subscriptions[subscription['context']] = [] subscriptions[subscription['context']].append(subscription) dlq = template.add_resource(Queue( f'{self._queue_name(context.name)}Dlq', QueueName=f'{self._queue_name(context.name)}Dlq', VisibilityTimeout=905, ReceiveMessageWaitTimeSeconds=20, MessageRetentionPeriod=1209600 )) self._queue_policy(template, dlq, f'{self._queue_name(context.name)}Dlq', subscriptions) queue = template.add_resource(Queue( self._queue_name(context.name), QueueName=self._queue_name(context.name), VisibilityTimeout=905, ReceiveMessageWaitTimeSeconds=20, MessageRetentionPeriod=1209600, RedrivePolicy=RedrivePolicy( deadLetterTargetArn=GetAtt(dlq, 'Arn'), maxReceiveCount=1000 ), DependsOn=dlq )) self._queue_policy(template, queue, self._queue_name(context.name), subscriptions) template.add_resource(EventSourceMapping( f'{self._lambda_resource_name(context.name)}AsyncMapping', BatchSize=1, Enabled=True, EventSourceArn=GetAtt(queue, 'Arn'), FunctionName=f'{self._service_name(service.name)}Async', DependsOn=[queue, async_lambda] )) topic = template.add_resource(Topic( self._topic_name(context.name), TopicName=self._topic_name(context.name) )) for context_name, list_ in subscriptions.items(): if context_name == context.name and len(list_) > 0: template.add_resource(SubscriptionResource( self._subscription_name(context_name), Protocol='sqs', Endpoint=GetAtt(queue, 'Arn'), TopicArn=self._topic_arn(context.name), FilterPolicy={ '_name': [x['name'] for x in list_], }, RedrivePolicy={ 'deadLetterTargetArn': GetAtt(dlq, 'Arn'), }, DependsOn=[queue, dlq, topic] )) elif len(list_) > 0: if context_name not in self._context_map.contexts: self._find_or_create_topic(context_name) template.add_resource(SubscriptionResource( self._subscription_name(context.name, context_name), Protocol='sqs', Endpoint=GetAtt(queue, 'Arn'), TopicArn=self._topic_arn(context_name), FilterPolicy={ '_name': [x['name'] for x in list_] }, RedrivePolicy={ 'deadLetterTargetArn': GetAtt(dlq, 'Arn'), }, DependsOn=[queue, dlq] )) self.info('Deploying stack') stack_name = self._stack_name(context.name) try: self._cloudformation_client.describe_stacks(StackName=stack_name) self._update_stack(self._stack_name(context.name), template) except ClientError as e: if f'Stack with id {stack_name} does not exist' in str(e): self._create_stack(self._stack_name(context.name), template) else: raise e self._execute_ddl(context) self.info('Done')
if len(content) > 4096: raise Exception("Clean ES function too long! Has " + str(len(content))) clea_es_function = t.add_resource( Function('LambdaCleanESFunction', Description='Removes old ElasticSearch indexes', Code=Code(ZipFile=content), Handler='index.lambda_handler', MemorySize=128, Role=GetAtt(es_exec_role, 'Arn'), Runtime='python2.7', Timeout=60)) alarm_topic = t.add_resource( Topic('LambdaErrorTopic', Subscription=[ Subscription(Protocol="email", Endpoint=Ref(param_alarm_email)) ])) t.add_resource( Alarm("LambdaBaseErrorsAlarm", ComparisonOperator='GreaterThanThreshold', EvaluationPeriods=1, MetricName='Errors', Namespace='AWS/Lambda', Dimensions=[ MetricDimension(Name='FunctionName', Value=Ref(base_function)) ], Period=300, Statistic='Maximum', Threshold='0', AlarmActions=[Ref(alarm_topic)]))
alarmemail = t.add_parameter( Parameter( "AlarmEmail", Default="*****@*****.**", Description="Email address to notify if there are any " "operational issues", Type="String", )) myqueue = t.add_resource(Queue("MyQueue")) alarmtopic = t.add_resource( Topic( "AlarmTopic", Subscription=[ Subscription(Endpoint=Ref(alarmemail), Protocol="email"), ], )) queuedepthalarm = t.add_resource( Alarm( "QueueDepthAlarm", AlarmDescription="Alarm if queue depth grows beyond 10 messages", Namespace="AWS/SQS", MetricName="ApproximateNumberOfMessagesVisible", Dimensions=[ MetricDimension(Name="QueueName", Value=GetAtt(myqueue, "QueueName")), ], Statistic="Sum", Period="300",
"RequestEncodingLambdaLogGroup", LogGroupName=Join( '/', ['/aws/lambda', Ref(request_encoding_function)]), RetentionInDays=7, )) request_encoding_topic = template.add_resource( Topic( 'RequestEncodingTopic', Subscription=[ Subscription( Protocol='sqs', Endpoint=GetAtt(request_encoding_queue, 'Arn'), ), Subscription( Protocol='lambda', Endpoint=GetAtt(request_encoding_function, 'Arn'), ), Subscription( Protocol='sqs', Endpoint=GetAtt(start_media_insights_queue, 'Arn'), ) ], )) template.add_resource( Permission( 'InvokeRequestEncodingFunctionPermission', Action='lambda:InvokeFunction', FunctionName=Ref(request_encoding_function), Principal='sns.amazonaws.com',
OperatorEmail = t.add_parameter(Parameter( "OperatorEmail", Type="String", Description="Email address to notify when new logs are published.", )) S3Bucket = t.add_resource(Bucket( "S3Bucket", DeletionPolicy="Retain" )) Topic = t.add_resource(Topic( "Topic", Subscription=[ Subscription( Endpoint=Ref(OperatorEmail), Protocol="email", ), ], )) TopicPolicy = t.add_resource(TopicPolicy( "TopicPolicy", Topics=[Ref(Topic)], PolicyDocument={ "Version": "2008-10-17", "Statement": [{ "Action": "SNS:Publish", "Principal": { "Service": "cloudtrail.amazonaws.com" }, "Resource": "*", "Effect": "Allow", "Sid": "AWSCloudTrailSNSPolicy"
from template import t from troposphere import GetAtt, Ref, Sub from troposphere.awslambda import Permission from troposphere.sns import Subscription, Topic from troposphere.ssm import Parameter from .hyp3_send_email import send_email print(' adding sns') finish_sns = t.add_resource(Topic( "HyP3FinishEventSNSTopic", Subscription=[ Subscription( Protocol="lambda", Endpoint=GetAtt(send_email, "Arn") ) ] )) sns_invoke_permissions = t.add_resource(Permission( "SNSSchedulerInvokePermissions", Action="lambda:InvokeFunction", Principal="sns.amazonaws.com", SourceArn=Ref(finish_sns), FunctionName=GetAtt(send_email, "Arn") )) ssm_sns_arn = t.add_resource(Parameter( "HyP3SSMParameterFinishEventSNSArn",
def emit_configuration(): # Build an SQS queue for the babysitter """create_queue = template.add_parameter( Parameter( 'CreateDeregistrationTopic', Type='String', Description='Whether or not to create the Chef Deregistration queue. This option is provided in case the queue already exists.', Default='no', AllowedValues=['yes', 'no'], ConstraintDescription='Answer must be yes or no' ) ) conditions = { "CreateDeregCondition": Equals( Ref(create_queue), "yes" ) } for c in conditions: template.add_condition(c, conditions[c])""" queue_name = '_'.join(['chef-deregistration', CLOUDNAME, CLOUDENV]) queue = template.add_resource( Queue( cfn.sanitize_id(queue_name), VisibilityTimeout=60, MessageRetentionPeriod=1209600, MaximumMessageSize=16384, QueueName=queue_name, )) alert_topic = template.add_resource( Topic( cfn.sanitize_id("BabysitterAlarmTopic{0}".format(CLOUDENV)), DisplayName='Babysitter Alarm', TopicName=queue_name, Subscription=[ Subscription(Endpoint=GetAtt(queue, "Arn"), Protocol='sqs'), ], DependsOn=queue.title, )) queue_depth_alarm = template.add_resource( Alarm( "BabysitterQueueDepthAlarm", AlarmDescription= 'Alarm if the queue depth grows beyond 200 messages', Namespace='AWS/SQS', MetricName='ApproximateNumberOfMessagesVisible', Dimensions=[ MetricDimension(Name='QueueName', Value=GetAtt(queue, "QueueName")) ], Statistic='Sum', Period='300', EvaluationPeriods='1', Threshold='200', ComparisonOperator='GreaterThanThreshold', #AlarmActions=[Ref(alert_topic), ], #InsufficientDataActions=[Ref(alert_topic), ], DependsOn=alert_topic.title, ), ) queue_policy = { "Version": "2012-10-17", "Id": "BabysitterSNSPublicationPolicy", "Statement": [{ "Sid": "AllowSNSPublishing", "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": ["sqs:SendMessage"], "Resource": GetAtt(queue, "Arn"), "Condition": { "ArnEquals": { "aws:SourceArn": Ref(alert_topic) } } }] } # Publish all events from SNS to the Queue template.add_resource( QueuePolicy( "BabysitterPublishSNStoSQSPolicy", Queues=[Ref(queue)], PolicyDocument=queue_policy, DependsOn=[queue.title, alert_topic.title], )) cfn.alert_topic = alert_topic
from troposphere.iam import Policy as IAMPolicy from troposphere.awslambda import Function, Code, Permission from troposphere.sns import Subscription, Topic, TopicPolicy from troposphere.cloudtrail import Trail from troposphere.s3 import Bucket, BucketPolicy from troposphere.cloudwatch import Alarm, MetricDimension from awacs.aws import Allow, Statement, Action, Principal, Policy, Condition, StringEquals, ArnEquals from awacs.sts import AssumeRole import os t = Template() t.add_description('Lambda function monitoring cloudtrail logs') notificationTopic = t.add_resource( Topic("NotifcationTopic", DisplayName="CloudTrail Monitor Alerts")) bucket = t.add_resource( Bucket("Bucket", AccessControl="Private", BucketName=Join("-", [Ref("AWS::StackName"), Ref("AWS::AccountId")]), DeletionPolicy="Retain")) bucket_policy = t.add_resource( BucketPolicy( "BucketPolicy", Bucket=Ref(bucket), PolicyDocument=Policy(Statement=[ Statement( Sid="AWSCloudTrailAclCheck",
def _deploy_service(self, service: ff.Service): context = self._context_map.get_context(service.name) if self._aws_config.get('image_uri') is None: self._package_and_deploy_code(context) template = Template() template.set_version('2010-09-09') memory_size = template.add_parameter( Parameter(f'{self._lambda_resource_name(service.name)}MemorySize', Type=NUMBER, Default=self._aws_config.get('memory_sync', '3008'))) timeout_gateway = template.add_parameter( Parameter( f'{self._lambda_resource_name(service.name)}GatewayTimeout', Type=NUMBER, Default='30')) timeout_async = template.add_parameter( Parameter( f'{self._lambda_resource_name(service.name)}AsyncTimeout', Type=NUMBER, Default='900')) role_title = f'{self._lambda_resource_name(service.name)}ExecutionRole' role = self._add_role(role_title, template) params = { 'FunctionName': f'{self._service_name(service.name)}Sync', 'Role': GetAtt(role_title, 'Arn'), 'MemorySize': Ref(memory_size), 'Timeout': Ref(timeout_gateway), 'Environment': self._lambda_environment(context) } image_uri = self._aws_config.get('image_uri') if image_uri is not None: params.update({ 'Code': Code(ImageUri=image_uri), 'PackageType': 'Image', }) else: params.update({ 'Code': Code(S3Bucket=self._bucket, S3Key=self._code_key), 'Runtime': 'python3.7', 'Handler': 'handlers.main', }) if self._security_group_ids and self._subnet_ids: params['VpcConfig'] = VPCConfig( SecurityGroupIds=self._security_group_ids, SubnetIds=self._subnet_ids) api_lambda = template.add_resource( Function(f'{self._lambda_resource_name(service.name)}Sync', **params)) route = inflection.dasherize(context.name) proxy_route = f'{route}/{{proxy+}}' template.add_resource( Permission( f'{self._lambda_resource_name(service.name)}SyncPermission', Action='lambda:InvokeFunction', FunctionName=f'{self._service_name(service.name)}Sync', Principal='apigateway.amazonaws.com', SourceArn=Join('', [ 'arn:aws:execute-api:', self._region, ':', self._account_id, ':', ImportValue( self._rest_api_reference()), '/*/*/', route, '*' ]), DependsOn=api_lambda)) if self._adaptive_memory: value = '3008' if not self._adaptive_memory else '256' try: value = int(self._aws_config.get('memory_async')) except ValueError: pass memory_size = template.add_parameter( Parameter( f'{self._lambda_resource_name(service.name)}MemorySizeAsync', Type=NUMBER, Default=value)) params = { 'FunctionName': self._lambda_function_name(service.name, 'Async'), 'Role': GetAtt(role_title, 'Arn'), 'MemorySize': Ref(memory_size), 'Timeout': Ref(timeout_async), 'Environment': self._lambda_environment(context) } if image_uri is not None: params.update({ 'Code': Code(ImageUri=image_uri), 'PackageType': 'Image', }) else: params.update({ 'Code': Code(S3Bucket=self._bucket, S3Key=self._code_key), 'Runtime': 'python3.7', 'Handler': 'handlers.main', }) if self._security_group_ids and self._subnet_ids: params['VpcConfig'] = VPCConfig( SecurityGroupIds=self._security_group_ids, SubnetIds=self._subnet_ids) async_lambda = template.add_resource( Function(self._lambda_resource_name(service.name, type_='Async'), **params)) if self._adaptive_memory: self._add_adaptive_memory_functions(template, context, timeout_async, role_title, async_lambda) # self._add_adaptive_memory_streams(template, context, async_lambda, role) # Timers for cls, _ in context.command_handlers.items(): if cls.has_timer(): timer = cls.get_timer() if timer.environment is not None and timer.environment != self._env: continue if isinstance(timer.command, str): timer_name = timer.command else: timer_name = timer.command.__name__ target = Target( f'{self._service_name(service.name)}AsyncTarget', Arn=GetAtt( self._lambda_resource_name(service.name, type_='Async'), 'Arn'), Id=self._lambda_resource_name(service.name, type_='Async'), Input= f'{{"_context": "{context.name}", "_type": "command", "_name": "{cls.__name__}"}}' ) rule = template.add_resource( Rule(f'{timer_name}TimerRule', ScheduleExpression=f'cron({timer.cron})', State='ENABLED', Targets=[target])) template.add_resource( Permission(f'{timer_name}TimerPermission', Action='lambda:invokeFunction', Principal='events.amazonaws.com', FunctionName=Ref(async_lambda), SourceArn=GetAtt(rule, 'Arn'))) integration = template.add_resource( Integration( self._integration_name(context.name), ApiId=ImportValue(self._rest_api_reference()), PayloadFormatVersion='2.0', IntegrationType='AWS_PROXY', IntegrationUri=Join('', [ 'arn:aws:lambda:', self._region, ':', self._account_id, ':function:', Ref(api_lambda), ]), )) template.add_resource( Route(f'{self._route_name(context.name)}Base', ApiId=ImportValue(self._rest_api_reference()), RouteKey=f'ANY /{route}', AuthorizationType='NONE', Target=Join( '/', ['integrations', Ref(integration)]), DependsOn=integration)) template.add_resource( Route(f'{self._route_name(context.name)}Proxy', ApiId=ImportValue(self._rest_api_reference()), RouteKey=f'ANY /{proxy_route}', AuthorizationType='NONE', Target=Join( '/', ['integrations', Ref(integration)]), DependsOn=integration)) # Error alarms / subscriptions if 'errors' in self._aws_config: alerts_topic = template.add_resource( Topic(self._alert_topic_name(service.name), TopicName=self._alert_topic_name(service.name))) if 'email' in self._aws_config.get('errors'): for address in self._aws_config.get('errors').get('email').get( 'recipients').split(','): template.add_resource( SubscriptionResource( self._alarm_subscription_name(context.name), Protocol='email', Endpoint=address, TopicArn=self._alert_topic_arn(context.name), DependsOn=[alerts_topic])) # Queues / Topics subscriptions = {} for subscription in self._get_subscriptions(context): if subscription['context'] not in subscriptions: subscriptions[subscription['context']] = [] subscriptions[subscription['context']].append(subscription) dlq = template.add_resource( Queue(f'{self._queue_name(context.name)}Dlq', QueueName=f'{self._queue_name(context.name)}Dlq', VisibilityTimeout=905, ReceiveMessageWaitTimeSeconds=20, MessageRetentionPeriod=1209600)) self._queue_policy(template, dlq, f'{self._queue_name(context.name)}Dlq', subscriptions) queue = template.add_resource( Queue(self._queue_name(context.name), QueueName=self._queue_name(context.name), VisibilityTimeout=905, ReceiveMessageWaitTimeSeconds=20, MessageRetentionPeriod=1209600, RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt( dlq, 'Arn'), maxReceiveCount=1000), DependsOn=dlq)) self._queue_policy(template, queue, self._queue_name(context.name), subscriptions) template.add_resource( EventSourceMapping( f'{self._lambda_resource_name(context.name)}AsyncMapping', BatchSize=1, Enabled=True, EventSourceArn=GetAtt(queue, 'Arn'), FunctionName=self._lambda_function_name(service.name, 'Async'), DependsOn=[queue, async_lambda])) topic = template.add_resource( Topic(self._topic_name(context.name), TopicName=self._topic_name(context.name))) for context_name, list_ in subscriptions.items(): if context_name == context.name and len(list_) > 0: template.add_resource( SubscriptionResource( self._subscription_name(context_name), Protocol='sqs', Endpoint=GetAtt(queue, 'Arn'), TopicArn=self._topic_arn(context.name), FilterPolicy={ '_name': [x['name'] for x in list_], }, RedrivePolicy={ 'deadLetterTargetArn': GetAtt(dlq, 'Arn'), }, DependsOn=[queue, dlq, topic])) elif len(list_) > 0: if context_name not in self._context_map.contexts: self._find_or_create_topic(context_name) template.add_resource( SubscriptionResource( self._subscription_name(context.name, context_name), Protocol='sqs', Endpoint=GetAtt(queue, 'Arn'), TopicArn=self._topic_arn(context_name), FilterPolicy={'_name': [x['name'] for x in list_]}, RedrivePolicy={ 'deadLetterTargetArn': GetAtt(dlq, 'Arn'), }, DependsOn=[queue, dlq])) # DynamoDB Table ddb_table = template.add_resource( Table(self._ddb_resource_name(context.name), TableName=self._ddb_table_name(context.name), AttributeDefinitions=[ AttributeDefinition(AttributeName='pk', AttributeType='S'), AttributeDefinition(AttributeName='sk', AttributeType='S'), ], BillingMode='PAY_PER_REQUEST', KeySchema=[ KeySchema(AttributeName='pk', KeyType='HASH'), KeySchema(AttributeName='sk', KeyType='RANGE'), ], TimeToLiveSpecification=TimeToLiveSpecification( AttributeName='TimeToLive', Enabled=True))) template.add_output( Output("DDBTable", Value=Ref(ddb_table), Description="Document table")) for cb in self._pre_deployment_hooks: cb(template=template, context=context, env=self._env) self.info('Deploying stack') self._s3_client.put_object(Body=template.to_json(), Bucket=self._bucket, Key=self._template_key) url = self._s3_client.generate_presigned_url(ClientMethod='get_object', Params={ 'Bucket': self._bucket, 'Key': self._template_key }) stack_name = self._stack_name(context.name) try: self._cloudformation_client.describe_stacks(StackName=stack_name) self._update_stack(self._stack_name(context.name), url) except ClientError as e: if f'Stack with id {stack_name} does not exist' in str(e): self._create_stack(self._stack_name(context.name), url) else: raise e for cb in self._post_deployment_hooks: cb(template=template, context=context, env=self._env) self._migrate_schema(context) self.info('Done')
)) apiKey = t.add_resource(api.ApiKey( "apiKey", StageKeys=[api.StageKey( RestApiId=Ref(restApi), StageName=Ref(apiStage) )] )) snsTopic = t.add_resource(Topic( "snsTopic", DisplayName="GDACK", TopicName="GDACK", Subscription=[ Subscription( Endpoint=GetAtt(lambdaFunction, "Arn"), Protocol="lambda" ) ] )) ApiPermission = t.add_resource(Permission( "ApiPermission", FunctionName=Ref(lambdaFunction), Action="lambda:InvokeFunction", Principal="apigateway.amazonaws.com", SourceArn=Join("", ["arn:aws:execute-api:us-west-2:", Ref("AWS::AccountId"), ":", Ref(restApi), "/*/*/*"]) )) # Output
Description="Email address to notify if there are any " "operational issues", Type="String", )) lambda_function_name = t.add_parameter( Parameter( "LambdaFunctionName", Description="The name of the lambda function", Type="String", )) alarm_topic = t.add_resource( Topic( "ChaosLambdaAlarmTopic", Subscription=[ Subscription(Endpoint=Ref(alarm_email), Protocol="email"), ], )) t.add_resource( Alarm( "ChaosLambdaErrorAlarm", AlarmName="chaosLambda/LambdaError", AlarmDescription="Enters ALARM state because we have received a lamdba " "error. See 'Errors' section on the following link: " "http://docs.aws.amazon.com/lambda/latest/dg/" "monitoring-functions-metrics.html for more " "information.", Namespace="AWS/Lambda", MetricName="Errors", Dimensions=[