def test_QueueName(self):
        Queue(
            "q",
            FifoQueue=False,
        ).validate()

        Queue(
            "q",
            FifoQueue=True,
            QueueName="foobar.fifo",
        ).validate()

        Queue(
            "q",
            FifoQueue=True,
            QueueName=Join("foo", "bar"),
        ).validate()

        Queue(
            "q",
            FifoQueue=True,
        ).validate()

        with self.assertRaises(ValueError):
            Queue(
                "q",
                FifoQueue=True,
                QueueName="foobar",
            ).validate()
def set_queue(queue, properties, redrive_policy=None):
    """
    Function to define and set the SQS Queue

    :param queue: Queue object
    :param dict properties: queue properties
    :param dict redrive_policy: redrive policy in case it has been defined

    :return: queue
    :rtype: troposphere.sqs.Queue
    """
    name = None
    if redrive_policy is not None:
        properties.update(redrive_policy)
    if keyisset("QueueName", properties):
        name = properties["QueueName"]
        properties.pop("QueueName")
    if keyisset("FifoQueue", properties):
        properties["QueueName"] = (
            Sub(f"${{{ROOT_STACK_NAME_T}}}-{queue.name}.fifo")
            if not name else Sub(f"${{{ROOT_STACK_NAME_T}}}-{name}.fifo"))
    else:
        properties["QueueName"] = (
            Sub(f"${{{ROOT_STACK_NAME_T}}}-{queue.name}")
            if not name else Sub(f"${{{ROOT_STACK_NAME_T}}}-{name}"))
    queue = Queue(queue.logical_name, **properties)
    return queue
def emit_configuration():
    # Build the 6 sqs queues for farragut
    queues = [
        QueueConfig('farragut-aggregate-{0}'.format(CLOUDENV), 1800, 345600,
                    262144),
        QueueConfig('farragut-hourly-{0}'.format(CLOUDENV), 180, 345600,
                    262144),
        QueueConfig('farragut-leaf-site-{0}'.format(CLOUDENV), 30, 345600,
                    262144),
        QueueConfig('farragut-leaf-{0}'.format(CLOUDENV), 30, 345600, 262144),
        QueueConfig('farragut-{0}'.format(CLOUDENV), 1800, 345600, 262144),
        QueueConfig('farragut-import-{0}'.format(CLOUDENV), 30, 345600, 262144)
    ]
    for q in queues:
        template.add_resource(
            Queue(cfn.sanitize_id(q.name),
                  VisibilityTimeout=q.visibility,
                  MessageRetentionPeriod=q.retention,
                  MaximumMessageSize=q.max_size,
                  QueueName=q.name))
Exemple #4
0
def create_sns_sqs(template, sns_name, sqs_name):
    q = template.add_resource(Queue(sqs_name, QueueName=sqs_name))

    topic = template.add_resource(
        Topic(sns_name,
              TopicName=sns_name,
              Subscription=[
                  Subscription(Endpoint=GetAtt(q, 'Arn'), Protocol='sqs')
              ]))

    policy = template.add_resource(
        QueuePolicy(sqs_name + sns_name + 'policy',
                    PolicyDocument={
                        "Version":
                        "2012-10-17",
                        "Id":
                        "MyQueuePolicy",
                        "Statement": [{
                            "Sid": "Allow-SendMessage-From-SNS-Topic",
                            "Effect": "Allow",
                            "Principal": "*",
                            "Action": ["sqs:SendMessage"],
                            "Resource": "*",
                            "Condition": {
                                "ArnEquals": {
                                    "aws:SourceArn": Ref(topic)
                                }
                            }
                        }]
                    },
                    Queues=[sqs_name]))

    template.add_output(Output("sns", Description="SNS Arn", Value=Ref(topic)))

    template.add_output(
        Output("queuearn", Description="Queue Arn", Value=GetAtt(q, 'Arn')))

    template.add_output(
        Output("queueurl", Description="Queue URL", Value=Ref(q)))
Exemple #5
0
def set_queue(queue_name, properties, redrive_policy=None):
    """
    Function to define and set the SQS Queue

    :param str queue_name: name of the queue
    :param dict properties: queue properties
    :param dict redrive_policy: redrive policy in case it has been defined

    :return: queue
    :rtype: troposphere.sqs.Queue
    """
    res_name = NONALPHANUM.sub("", queue_name)
    if redrive_policy is not None:
        properties.update(redrive_policy)
    if keyisset("QueueName", properties):
        queue_name = properties["QueueName"]
        properties.pop("QueueName")
        properties["QueueName"] = Sub(f"${{{ROOT_STACK_NAME_T}}}-{queue_name}")
        if keyisset("FifoQueue", properties):
            properties["QueueName"] = Sub(
                f"${{{ROOT_STACK_NAME_T}}}-{queue_name}.fifo")
    queue = Queue(res_name, **properties)
    return queue
Exemple #6
0
    "AWS CloudFormation Sample Template SQS_With_CloudWatch_Alarms: Sample "
    "template showing how to create an SQS queue with AWS CloudWatch alarms "
    "on queue depth. **WARNING** This template creates an Amazon SQS Queue "
    "and one or more Amazon CloudWatch alarms. You will be billed for the "
    "AWS resources used if you create a stack from this template.")

alarmemail = t.add_parameter(
    Parameter(
        "AlarmEmail",
        Default="*****@*****.**",
        Description="Email address to notify if there are any "
        "operational issues",
        Type="String",
    ))

myqueue = t.add_resource(Queue("MyQueue"))

alarmtopic = t.add_resource(
    Topic(
        "AlarmTopic",
        Subscription=[
            Subscription(Endpoint=Ref(alarmemail), Protocol="email"),
        ],
    ))

queuedepthalarm = t.add_resource(
    Alarm(
        "QueueDepthAlarm",
        AlarmDescription="Alarm if queue depth grows beyond 10 messages",
        Namespace="AWS/SQS",
        MetricName="ApproximateNumberOfMessagesVisible",
Exemple #7
0
    def _deploy_service(self, service: ff.Service):
        context = self._context_map.get_context(service.name)
        self._package_and_deploy_code(context)

        template = Template()
        template.set_version('2010-09-09')

        memory_size = template.add_parameter(Parameter(
            f'{self._lambda_resource_name(service.name)}MemorySize',
            Type=NUMBER,
            Default='3008'
        ))

        timeout_gateway = template.add_parameter(Parameter(
            f'{self._lambda_resource_name(service.name)}GatewayTimeout',
            Type=NUMBER,
            Default='30'
        ))

        timeout_async = template.add_parameter(Parameter(
            f'{self._lambda_resource_name(service.name)}AsyncTimeout',
            Type=NUMBER,
            Default='900'
        ))

        role_title = f'{self._lambda_resource_name(service.name)}ExecutionRole'
        self._add_role(role_title, template)

        params = {
            'FunctionName': f'{self._service_name(service.name)}Sync',
            'Code': Code(
                S3Bucket=self._bucket,
                S3Key=self._code_key
            ),
            'Handler': 'handlers.main',
            'Role': GetAtt(role_title, 'Arn'),
            'Runtime': 'python3.7',
            'MemorySize': Ref(memory_size),
            'Timeout': Ref(timeout_gateway),
            'Environment': self._lambda_environment(context)
        }
        if self._security_group_ids and self._subnet_ids:
            params['VpcConfig'] = VPCConfig(
                SecurityGroupIds=self._security_group_ids,
                SubnetIds=self._subnet_ids
            )
        api_lambda = template.add_resource(Function(
            f'{self._lambda_resource_name(service.name)}Sync',
            **params
        ))

        route = inflection.dasherize(context.name)
        proxy_route = f'{route}/{{proxy+}}'
        template.add_resource(Permission(
            f'{self._lambda_resource_name(service.name)}SyncPermission',
            Action='lambda:InvokeFunction',
            FunctionName=f'{self._service_name(service.name)}Sync',
            Principal='apigateway.amazonaws.com',
            SourceArn=Join('', [
                'arn:aws:execute-api:',
                self._region,
                ':',
                self._account_id,
                ':',
                ImportValue(self._rest_api_reference()),
                '/*/*/',
                route,
                '*'
            ]),
            DependsOn=api_lambda
        ))

        params = {
            'FunctionName': f'{self._service_name(service.name)}Async',
            'Code': Code(
                S3Bucket=self._bucket,
                S3Key=self._code_key
            ),
            'Handler': 'handlers.main',
            'Role': GetAtt(role_title, 'Arn'),
            'Runtime': 'python3.7',
            'MemorySize': Ref(memory_size),
            'Timeout': Ref(timeout_async),
            'Environment': self._lambda_environment(context)
        }
        if self._security_group_ids and self._subnet_ids:
            params['VpcConfig'] = VPCConfig(
                SecurityGroupIds=self._security_group_ids,
                SubnetIds=self._subnet_ids
            )
        async_lambda = template.add_resource(Function(
            f'{self._lambda_resource_name(service.name)}Async',
            **params
        ))

        integration = template.add_resource(Integration(
            self._integration_name(context.name),
            ApiId=ImportValue(self._rest_api_reference()),
            PayloadFormatVersion='2.0',
            IntegrationType='AWS_PROXY',
            IntegrationUri=Join('', [
                'arn:aws:lambda:',
                self._region,
                ':',
                self._account_id,
                ':function:',
                Ref(api_lambda),
            ]),
        ))

        template.add_resource(Route(
            f'{self._route_name(context.name)}Base',
            ApiId=ImportValue(self._rest_api_reference()),
            RouteKey=f'ANY /{route}',
            AuthorizationType='NONE',
            Target=Join('/', ['integrations', Ref(integration)]),
            DependsOn=integration
        ))

        template.add_resource(Route(
            f'{self._route_name(context.name)}Proxy',
            ApiId=ImportValue(self._rest_api_reference()),
            RouteKey=f'ANY /{proxy_route}',
            AuthorizationType='NONE',
            Target=Join('/', ['integrations', Ref(integration)]),
            DependsOn=integration
        ))

        # Error alarms / subscriptions

        if 'errors' in self._aws_config:
            alerts_topic = template.add_resource(Topic(
                self._alert_topic_name(service.name),
                TopicName=self._alert_topic_name(service.name)
            ))
            self._add_error_alarm(template, f'{self._service_name(context.name)}Sync', context.name, alerts_topic)
            self._add_error_alarm(template, f'{self._service_name(context.name)}Async', context.name, alerts_topic)

            if 'email' in self._aws_config.get('errors'):
                template.add_resource(SubscriptionResource(
                    self._alarm_subscription_name(context.name),
                    Protocol='email',
                    Endpoint=self._aws_config.get('errors').get('email').get('recipients'),
                    TopicArn=self._alert_topic_arn(context.name),
                    DependsOn=[alerts_topic]
                ))

        # Queues / Topics

        subscriptions = {}
        for subscription in self._get_subscriptions(context):
            if subscription['context'] not in subscriptions:
                subscriptions[subscription['context']] = []
            subscriptions[subscription['context']].append(subscription)

        dlq = template.add_resource(Queue(
            f'{self._queue_name(context.name)}Dlq',
            QueueName=f'{self._queue_name(context.name)}Dlq',
            VisibilityTimeout=905,
            ReceiveMessageWaitTimeSeconds=20,
            MessageRetentionPeriod=1209600
        ))
        self._queue_policy(template, dlq, f'{self._queue_name(context.name)}Dlq', subscriptions)

        queue = template.add_resource(Queue(
            self._queue_name(context.name),
            QueueName=self._queue_name(context.name),
            VisibilityTimeout=905,
            ReceiveMessageWaitTimeSeconds=20,
            MessageRetentionPeriod=1209600,
            RedrivePolicy=RedrivePolicy(
                deadLetterTargetArn=GetAtt(dlq, 'Arn'),
                maxReceiveCount=1000
            ),
            DependsOn=dlq
        ))
        self._queue_policy(template, queue, self._queue_name(context.name), subscriptions)

        template.add_resource(EventSourceMapping(
            f'{self._lambda_resource_name(context.name)}AsyncMapping',
            BatchSize=1,
            Enabled=True,
            EventSourceArn=GetAtt(queue, 'Arn'),
            FunctionName=f'{self._service_name(service.name)}Async',
            DependsOn=[queue, async_lambda]
        ))
        topic = template.add_resource(Topic(
            self._topic_name(context.name),
            TopicName=self._topic_name(context.name)
        ))

        for context_name, list_ in subscriptions.items():
            if context_name == context.name and len(list_) > 0:
                template.add_resource(SubscriptionResource(
                    self._subscription_name(context_name),
                    Protocol='sqs',
                    Endpoint=GetAtt(queue, 'Arn'),
                    TopicArn=self._topic_arn(context.name),
                    FilterPolicy={
                        '_name': [x['name'] for x in list_],
                    },
                    RedrivePolicy={
                        'deadLetterTargetArn': GetAtt(dlq, 'Arn'),
                    },
                    DependsOn=[queue, dlq, topic]
                ))
            elif len(list_) > 0:
                if context_name not in self._context_map.contexts:
                    self._find_or_create_topic(context_name)
                template.add_resource(SubscriptionResource(
                    self._subscription_name(context.name, context_name),
                    Protocol='sqs',
                    Endpoint=GetAtt(queue, 'Arn'),
                    TopicArn=self._topic_arn(context_name),
                    FilterPolicy={
                        '_name': [x['name'] for x in list_]
                    },
                    RedrivePolicy={
                        'deadLetterTargetArn': GetAtt(dlq, 'Arn'),
                    },
                    DependsOn=[queue, dlq]
                ))

        self.info('Deploying stack')
        stack_name = self._stack_name(context.name)
        try:
            self._cloudformation_client.describe_stacks(StackName=stack_name)
            self._update_stack(self._stack_name(context.name), template)
        except ClientError as e:
            if f'Stack with id {stack_name} does not exist' in str(e):
                self._create_stack(self._stack_name(context.name), template)
            else:
                raise e

        self._execute_ddl(context)

        self.info('Done')
Exemple #8
0
def generate_queues_template(QueueNamePrefix, Environment):
    QueueName = f'{QueueNamePrefix}-{Environment}'
    DLQQueueName = f'{QueueNamePrefix}DLQ-{Environment}'

    t = Template(Description='A template for a messaging queue')
    t.version = '2010-09-09'

    KMSKey = t.add_resource(
        Key('KMSKey',
            Description=f'KMS Key for encrypting {QueueName}',
            Enabled=True,
            EnableKeyRotation=True,
            KeyPolicy=Policy(
                Version='2012-10-17',
                Statement=[
                    Statement(Sid='Enable IAM User Permissions',
                              Effect=Allow,
                              Principal=AWSPrincipal(
                                  Sub('arn:aws:iam::${AWS::AccountId}:root')),
                              Action=[KmsAction(All)],
                              Resource=AllResources),
                    Statement(Sid='Allow access for Key Administrators',
                              Effect=Allow,
                              Principal=AWSPrincipal([
                                  Sub(f'{USER}/frank'),
                                  Sub(f'{USER}/moonunit')
                              ]),
                              Action=[
                                  KmsAction('Create*'),
                                  KmsAction('Describe*'),
                                  KmsAction('Enable*'),
                                  KmsAction('List*'),
                                  KmsAction('Put*'),
                                  KmsAction('Update*'),
                                  KmsAction('Revoke*'),
                                  KmsAction('Disable*'),
                                  KmsAction('Get*'),
                                  KmsAction('Delete*'),
                                  KmsAction('ScheduleKeyDeletion'),
                                  KmsAction('CancelKeyDeletion')
                              ],
                              Resource=AllResources)
                ])))

    t.add_resource(
        Alias('KMSKeyAlias',
              AliasName=f'alias/{QueueName}',
              TargetKeyId=Ref(KMSKey)))

    dlq = t.add_resource(
        Queue(
            'DeadLetterQueue',
            QueueName=DLQQueueName,
            MaximumMessageSize=262144,  # 256KiB
            MessageRetentionPeriod=1209600,  # 14 days
            VisibilityTimeout=30))

    t.add_resource(
        Queue(
            'PrimaryQueue',
            QueueName=QueueName,
            MaximumMessageSize=262144,  # 256KiB
            MessageRetentionPeriod=1209600,  # 14 days
            VisibilityTimeout=30,
            RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt(
                dlq.title, 'Arn'),
                                        maxReceiveCount=10),
            KmsMasterKeyId=Ref(KMSKey),
            KmsDataKeyReusePeriodSeconds=300))

    t.add_output([
        Output('QueueArn',
               Description=f'ARN of {QueueName} Queue',
               Value=GetAtt('PrimaryQueue', 'Arn'),
               Export=Export(Name(Sub('${AWS::StackName}:PrimaryQueueArn')))),
        Output('KmsKeyArn',
               Description=f'KMS Key ARN for {QueueName} Queue',
               Value=GetAtt('KMSKey', 'Arn'),
               Export=Export(Name(Sub('${AWS::StackName}:KmsKeyArn'))))
    ])

    return t
Exemple #9
0
def generate(env='pilot'):
    template = Template()

    template.set_version("2010-09-09")

    # ExistingVPC = template.add_parameter(Parameter(
    #     "ExistingVPC",
    #     Type="AWS::EC2::VPC::Id",
    #     Description=(
    #         "The VPC ID that includes the security groups in the"
    #         "ExistingSecurityGroups parameter."
    #     ),
    # ))
    #
    # ExistingSecurityGroups = template.add_parameter(Parameter(
    #     "ExistingSecurityGroups",
    #     Type="List<AWS::EC2::SecurityGroup::Id>",
    # ))

    param_spider_lambda_memory_size = template.add_parameter(
        Parameter(
            'SpiderLambdaMemorySize',
            Type=NUMBER,
            Description='Amount of memory to allocate to the Lambda Function',
            Default='128',
            AllowedValues=MEMORY_VALUES
        )
    )

    param_spider_lambda_timeout = template.add_parameter(
        Parameter(
            'SpiderLambdaTimeout',
            Type=NUMBER,
            Description='Timeout in seconds for the Lambda function',
            Default='60'
        )
    )

    spider_tasks_queue_dlq_name = f'{env}-spider-tasks-dlq'
    spider_tasks_queue_dlq = template.add_resource(
        Queue(
            "SpiderTasksDLQ",
            QueueName=spider_tasks_queue_dlq_name,
            MessageRetentionPeriod=(60 * 60 * 24 * 14),
        )
    )

    spider_tasks_queue_name = f"{env}-spider-tasks"
    spider_tasks_queue = template.add_resource(
        Queue(
            "SpiderTasksQueue",
            QueueName=spider_tasks_queue_name,
            MessageRetentionPeriod=(60 * 60 * 24 * 14),
            VisibilityTimeout=300,
            RedrivePolicy=RedrivePolicy(
                deadLetterTargetArn=GetAtt(spider_tasks_queue_dlq, "Arn"),
                maxReceiveCount=2,
            ),
            DependsOn=[spider_tasks_queue_dlq],
        )
    )

    spider_lambda_role = template.add_resource(
        Role(
            "SpiderLambdaRole",
            Path="/",
            Policies=[
                Policy(
                    PolicyName="root",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Id="root",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Resource=["*"],
                                Action=[
                                    Action("logs", "*")
                                ]
                            ),
                            Statement(
                                Effect=Allow,
                                Resource=["*"],
                                Action=[
                                    Action("s3", "*")
                                ]
                            ),
                            Statement(
                                Effect=Allow,
                                Resource=["*"],
                                Action=[
                                    Action("sqs", "*")
                                ]
                            ),
                        ]
                    ),
                )
            ],
            AssumeRolePolicyDocument={
                "Version": "2012-10-17",
                "Statement": [{
                    "Action": ["sts:AssumeRole"],
                    "Effect": "Allow",
                    "Principal": {
                        "Service": ["lambda.amazonaws.com"]
                    }
                }]
            },
        )
    )

    spider_file_path = './spider/index.js'
    spider_code = open(spider_file_path, 'r').readlines()
    spider_lambda = template.add_resource(
        Function(
            "SpiderLambda",
            Code=Code(
                S3Bucket='spider-lambda',
                S3Key=f'{env}.zip',
                # ZipFile=Join("", spider_code)
            ),
            Handler="index.handler",
            Role=GetAtt(spider_lambda_role, "Arn"),
            Runtime="nodejs12.x",
            Layers=['arn:aws:lambda:us-east-1:342904801388:layer:spider-node-browser:1'],
            MemorySize=Ref(param_spider_lambda_memory_size),
            Timeout=Ref(param_spider_lambda_timeout),
            DependsOn=[spider_tasks_queue],
        )
    )

    # AllSecurityGroups = template.add_resource(CustomResource(
    #     "AllSecurityGroups",
    #     List=Ref(ExistingSecurityGroups),
    #     AppendedItem=Ref("SecurityGroup"),
    #     ServiceToken=GetAtt(spider_lambda, "Arn"),
    # ))
    #
    # SecurityGroup = template.add_resource(SecurityGroup(
    #     "SecurityGroup",
    #     SecurityGroupIngress=[
    #         {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0",
    #          "FromPort": "80"}],
    #     VpcId=Ref(ExistingVPC),
    #     GroupDescription="Allow HTTP traffic to the host",
    #     SecurityGroupEgress=[
    #         {"ToPort": "80", "IpProtocol": "tcp", "CidrIp": "0.0.0.0/0",
    #          "FromPort": "80"}],
    # ))
    #
    # AllSecurityGroups = template.add_output(Output(
    #     "AllSecurityGroups",
    #     Description="Security Groups that are associated with the EC2 instance",
    #     Value=Join(", ", GetAtt(AllSecurityGroups, "Value")),
    # ))

    source_sns_name = f'{env}-source-sns-topic'
    source_sns_topic = template.add_resource(
        Topic(
            "SNSSource",
            TopicName=source_sns_name,
            Subscription=[
                Subscription(
                    Endpoint=GetAtt(spider_tasks_queue, "Arn"),
                    Protocol='sqs',
                )
            ],
            DependsOn=[spider_tasks_queue]
        )
    )

    source_sns_topic_policy = template.add_resource(
        TopicPolicy(
            "SourceForwardingTopicPolicy",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Id="AllowS3PutMessageInSNS",
                Statement=[
                    Statement(
                        Sid="AllowS3PutMessages",
                        Principal=Principal("Service", "s3.amazonaws.com"),
                        Effect=Allow,
                        Action=[
                            Action("sns", "Publish"),
                        ],
                        Resource=["*"],
                    )
                ]
            ),
            Topics=[Ref(source_sns_topic)],
        )
    )

    sns_sqs_policy = template.add_resource(
        QueuePolicy(
            "AllowSNSPutMessagesInSQS",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Id="AllowSNSPutMessagesInSQS",
                Statement=[
                    Statement(
                        Sid="AllowSNSPutMessagesInSQS2",
                        Principal=Principal("*"),
                        Effect=Allow,
                        Action=[
                            Action("sqs", "SendMessage"),
                        ],
                        Resource=["*"],
                    )
                ]
            ),
            Queues=[Ref(spider_tasks_queue)],
            DependsOn=[spider_tasks_queue],
        )
    )

    # Buckets
    source_bucket_name = f'{env}-source-bucket'
    source_bucket = template.add_resource(
        Bucket(
            "SourceBucket",
            BucketName=source_bucket_name,
            NotificationConfiguration=NotificationConfiguration(
                TopicConfigurations=[
                    TopicConfigurations(
                        Topic=Ref(source_sns_topic),
                        Event="s3:ObjectCreated:*",
                    )
                ],
            ),
            DependsOn=[source_sns_topic_policy],
        )
    )

    results_bucket_name = f'{env}-results-bucket'
    results_bucket = template.add_resource(
        Bucket(
            "ResultsBucket",
            BucketName=results_bucket_name,
        )
    )

    # Lambda trigger
    template.add_resource(
        EventSourceMapping(
            "TriggerLambdaSpiderFromSQS",
            EventSourceArn=GetAtt(spider_tasks_queue, "Arn"),
            FunctionName=Ref(spider_lambda),
            BatchSize=1,  # Default process tasks one by one
        )
    )

    return template.to_json()
Exemple #10
0
kinesis_param = template.add_parameter(
    Parameter(
        "CapturedDataKinesisStreamName",
        Description="Name of Captured Data Kinesis Stream",
        Default="simple-stream",
        Type="String"))

sqs_param = template.add_parameter(
    Parameter(
        "SQSQueueName",
        Description="Name of Captured Data SQS",
        Default="simple-queue",
        Type="String"))

sqsqueue = template.add_resource(
    Queue("CapturedDataQueue", QueueName=Ref("SQSQueueName")))

template.add_resource(
    QueuePolicy(
        "CapturedDataQueuePolicy",
        Queues=[Ref(sqsqueue)],
        PolicyDocument={
            "Version":
            "2012-10-17",
            "Statement": [{
                "Sid": "LambdaWriteToQueue",
                "Effect": "Allow",
                "Principal": {
                    "AWS": GetAtt("LambdaExecutionRole", "Arn")
                },
                "Action": "SQS:*",
    def generate_template(self, template):

        alias = Parameter("LambdaEnvAlias",
                          Default="int",
                          Description="Alias used to reference the lambda",
                          Type="String")
        template.add_parameter(aliasP)

        lambda_bucket = template.add_parameter(
            Parameter("LambdaBucket",
                      Type="String",
                      Default="go-lambda-hello-world",
                      Description=
                      "The S3 Bucket that contains the zip to bootstrap your "
                      "lambda function"))

        s3_key = template.add_parameter(
            Parameter("S3Key",
                      Type="String",
                      Default="main.zip",
                      Description=
                      "The S3 key that references the zip to bootstrap your "
                      "lambda function"))

        handler = template.add_parameter(
            Parameter(
                "LambdaHandler",
                Type="String",
                Default="event_handler.handler",
                Description="The name of the function (within your source code) "
                "that Lambda calls to start running your code."))

        memory_size = template.add_parameter(
            Parameter(
                "LambdaMemorySize",
                Type="Number",
                Default="128",
                Description="The amount of memory, in MB, that is allocated to "
                "your Lambda function.",
                MinValue="128"))

        timeout = template.add_parameter(
            Parameter("LambdaTimeout",
                      Type="Number",
                      Default="300",
                      Description=
                      "The function execution time (in seconds) after which "
                      "Lambda terminates the function. "))

        lambda_function = template.add_resource(
            Function(
                "LambdaGoHelloWorld",
                Code=Code(S3Bucket="go-lambda-hello-world", S3Key=Ref(s3_key)),
                Description="Go function used to demonstate sqs integration",
                Handler=Ref(handler),
                Role=GetAtt("LambdaExecutionRole", "Arn"),
                Runtime="go1.x",
                MemorySize=Ref(memory_size),
                FunctionName="go-lambda-hello-world",
                Timeout=Ref(timeout)))

        alias = template.add_resource(
            Alias("GolLambdaAlias",
                  Description="Alias for the go lambda",
                  FunctionName=Ref(lambda_function),
                  FunctionVersion="$LATEST",
                  Name=Ref(alias)))

        dead_letter_queue = template.add_resource(
            Queue("GoLambdaDeadLetterQueue",
                  QueueName=("golambdaqueue-dlq"),
                  VisibilityTimeout=30,
                  MessageRetentionPeriod=1209600,
                  MaximumMessageSize=262144,
                  DelaySeconds=0,
                  ReceiveMessageWaitTimeSeconds=0))

        go_helloworld_queue = template.add_resource(
            Queue("GoLambdaQueue",
                  QueueName=("golambdaqueue"),
                  VisibilityTimeout=1800,
                  RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt(
                      dead_letter_queue, "Arn"),
                                              maxReceiveCount=3)))

        lambda_execution_role = template.add_resource(
            Role("LambdaExecutionRole",
                 Policies=[
                     iam.Policy(
                         PolicyName="GoFunctionRolePolicy",
                         PolicyDocument=Policy(Statement=[
                             Statement(Effect=Allow,
                                       Action=[
                                           Action("logs", "CreateLogGroup"),
                                           Action("logs", "CreateLogStream"),
                                           Action("logs", "PutLogEvents")
                                       ],
                                       Resource=["arn:aws:logs:*:*:*"]),
                             Statement(
                                 Effect=Allow,
                                 Action=[
                                     Action("sqs", "ChangeMessageVisibility"),
                                     Action("sqs", "DeleteMessage"),
                                     Action("sqs", "GetQueueAttributes"),
                                     Action("sqs", "ReceiveMessage")
                                 ],
                                 Resource=[GetAtt(go_helloworld_queue, "Arn")])
                         ]))
                 ],
                 AssumeRolePolicyDocument=Policy(Statement=[
                     Statement(Effect=Allow,
                               Action=[AssumeRole],
                               Principal=Principal("Service",
                                                   ["lambda.amazonaws.com"]))
                 ])))

        template.add_resource(
            awslambda.Permission(
                'QueueInvokePermission',
                FunctionName=GetAtt(lambda_function, 'Arn'),
                Action="lambda:InvokeFunction",
                Principal="sqs.amazonaws.com",
                SourceArn=GetAtt(go_helloworld_queue, 'Arn'),
            ))

        template.add_output(
            Output("LambdaHelloWorldQueue",
                   Value=GetAtt(go_helloworld_queue, "Arn"),
                   Export=Export("lambda-go-hello-world-queue"),
                   Description="Arn of the queue"))

        template.add_output(
            Output("LambdaHelloWorlFunction",
                   Value=Ref(alias),
                   Export=Export("lambda-go-hello-world-function"),
                   Description="Arn of the function"))
# Converted from IAM_Policies_SNS_Publish_To_SQS.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/

from troposphere import GetAtt, Output, Ref, Template
from troposphere.sns import Subscription, Topic
from troposphere.sqs import Queue, QueuePolicy

t = Template()

t.set_description("AWS CloudFormation Sample Template: This template "
                  "demonstrates the creation of a DynamoDB table.")

sqsqueue = t.add_resource(Queue("SQSQueue"))

snstopic = t.add_resource(
    Topic("SNSTopic",
          Subscription=[
              Subscription(Protocol="sqs", Endpoint=GetAtt(sqsqueue, "Arn"))
          ]))

t.add_output(
    Output(
        "QueueArn",
        Value=GetAtt(sqsqueue, "Arn"),
        Description="ARN of SQS Queue",
    ))

t.add_resource(
    QueuePolicy("AllowSNS2SQSPolicy",
                Queues=[Ref(sqsqueue)],
                PolicyDocument={
Exemple #13
0
from troposphere import GetAtt, Output, Ref, Template
from troposphere.sqs import Queue, RedrivePolicy

t = Template()

t.set_description(
    "AWS CloudFormation Sample Template SQS: Sample template showing how to "
    "create an SQS queue with a dead letter queue. **WARNING** This template "
    "creates Amazon SQS Queues. You will be billed for the AWS resources used "
    "if you create a stack from this template.")

mysourcequeue = t.add_resource(
    Queue(
        "MySourceQueue",
        RedrivePolicy=RedrivePolicy(
            deadLetterTargetArn=GetAtt("MyDeadLetterQueue", "Arn"),
            maxReceiveCount="5",
        ),
    ))

mydeadletterqueue = t.add_resource(Queue("MyDeadLetterQueue"))

t.add_output([
    Output(
        "SourceQueueURL",
        Description="URL of the source queue",
        Value=Ref(mysourcequeue),
    ),
    Output(
        "SourceQueueARN",
        Description="ARN of the source queue",
Exemple #14
0
# -*- coding: utf-8 -*-
from troposphere import GetAtt, Output, Ref
from troposphere.sqs import Queue

from .template import template


queue = Queue(
    "DashboardQueue",
    template=template,
)


template.add_output([
    Output(
        "DashboardQueueURL",
        Description="URL of the source queue",
        Value=Ref(queue)
    ),
    Output(
        "DashboardQueueARN",
        Description="ARN of the source queue",
        Value=GetAtt(queue, "Arn")
    )
])
                         Ref("LambdaEnv")]),
               Action="lambda:InvokeFunction",
               SourceArn=GetAtt("JsonNotificationReceiveQueue", "Arn"),
               Principal="sqs.amazonaws.com"))

t.add_resource(
    Alias("GalileoBabelLambdaAlias",
          Description="Alias for the galileo babel lambda",
          FunctionName=Ref("LambdaFunction"),
          FunctionVersion="$LATEST",
          Name=Ref("LambdaEnv")))

t.add_resource(
    Queue("JsonNotificationReceiveQueue",
          QueueName=Sub("${LambdaEnv}-json-notification-inbound-queue"),
          RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt(
              "JsonNotificationDLQ", "Arn"),
                                      maxReceiveCount=3)))

t.add_resource(
    Queue(
        "JsonNotificationDLQ",
        QueueName=Sub("${LambdaEnv}-json-notification-inbound-dlq"),
    ))

t.add_resource(
    QueuePolicy(
        "JsonNotificationReceiveQueuePolicy",
        Queues=[Ref("JsonNotificationReceiveQueue")],
        PolicyDocument=Policy(Statement=[
            Statement(Effect=Allow,
Exemple #16
0
status_key = 'status/netkan.json'

if not ZONE_ID:
    print('Zone ID Required from EnvVar `CKAN_ZONEID`')
    sys.exit()

t = Template()

t.set_description("Generate NetKAN Infrastructure CF Template")

# Inbound + Outbound SQS Queues
# Inbound: Scheduler Write, Inflation Read
# Outbound: Inflator Write, Indexer Read
inbound = t.add_resource(
    Queue("NetKANInbound",
          QueueName="Inbound.fifo",
          ReceiveMessageWaitTimeSeconds=20,
          FifoQueue=True))
outbound = t.add_resource(
    Queue("NetKANOutbound",
          QueueName="Outbound.fifo",
          ReceiveMessageWaitTimeSeconds=20,
          FifoQueue=True))
addqueue = t.add_resource(
    Queue("Adding",
          QueueName="Adding.fifo",
          ReceiveMessageWaitTimeSeconds=20,
          FifoQueue=True))
mirrorqueue = t.add_resource(
    Queue("Mirroring",
          QueueName="Mirroring.fifo",
          ReceiveMessageWaitTimeSeconds=20,
Exemple #17
0
    ProvisionedThroughput
from troposphere.s3 import Bucket

zone_id = os.environ.get('CKAN_DEV_ZONEID', False)

if not zone_id:
    print('Zone ID Required from EnvVar `CKAN_DEV_ZONEID`')
    sys.exit()

t = Template()

t.set_description("Generate NetKAN Infrastructure CF Template")

inbound = t.add_resource(
    Queue("InboundDev",
          QueueName="InboundDev.fifo",
          ReceiveMessageWaitTimeSeconds=20,
          FifoQueue=True))
outbound = t.add_resource(
    Queue("OutboundDev",
          QueueName="OutboundDev.fifo",
          ReceiveMessageWaitTimeSeconds=20,
          FifoQueue=True))
addqueue = t.add_resource(
    Queue("Adding",
          QueueName="AddingDev.fifo",
          ReceiveMessageWaitTimeSeconds=20,
          FifoQueue=True))
mirrorqueue = t.add_resource(
    Queue("Mirroring",
          QueueName="MirroringDev.fifo",
          ReceiveMessageWaitTimeSeconds=20,
Exemple #18
0
from template import t
from troposphere import GetAtt, Ref, Sub
from troposphere.sqs import Queue, RedrivePolicy
from troposphere.ssm import Parameter

print('  adding sqs')


def hours_in_seconds(n):
    return n * 60 * 60


failed_start_evets = t.add_resource(
    Queue(
        "FailedStartEvents",
        FifoQueue=True,
        ContentBasedDeduplication=True,
    ))

start_events = t.add_resource(
    Queue("HyP3StartEvents",
          FifoQueue=True,
          ContentBasedDeduplication=True,
          RedrivePolicy=RedrivePolicy(
              deadLetterTargetArn=GetAtt(failed_start_evets, "Arn"),
              maxReceiveCount=1,
          ),
          VisibilityTimeout=hours_in_seconds(3)))

ssm_queue_name = t.add_resource(
    Parameter("HyP3SSMParameterStartEventQueueName",
Exemple #19
0
def create_template():
    template = Template(Description=(
        "Static website hosted with S3 and CloudFront. "
        "https://github.com/schlarpc/overengineered-cloudfront-s3-static-website"
    ))

    partition_config = add_mapping(
        template,
        "PartitionConfig",
        {
            "aws": {
                # the region with the control plane for CloudFront, IAM, Route 53, etc
                "PrimaryRegion":
                "us-east-1",
                # assume that Lambda@Edge replicates to all default enabled regions, and that
                # future regions will be opt-in. generated with AWS CLI:
                # aws ec2 describe-regions --all-regions --query "Regions[?OptInStatus=='opt-in-not-required'].RegionName|sort(@)"
                "DefaultRegions": [
                    "ap-northeast-1",
                    "ap-northeast-2",
                    "ap-northeast-3",
                    "ap-south-1",
                    "ap-southeast-1",
                    "ap-southeast-2",
                    "ca-central-1",
                    "eu-central-1",
                    "eu-north-1",
                    "eu-west-1",
                    "eu-west-2",
                    "eu-west-3",
                    "sa-east-1",
                    "us-east-1",
                    "us-east-2",
                    "us-west-1",
                    "us-west-2",
                ],
            },
            # this doesn't actually work, because Lambda@Edge isn't supported in aws-cn
            "aws-cn": {
                "PrimaryRegion": "cn-north-1",
                "DefaultRegions": ["cn-north-1", "cn-northwest-1"],
            },
        },
    )

    acm_certificate_arn = template.add_parameter(
        Parameter(
            "AcmCertificateArn",
            Description=
            "Existing ACM certificate to use for serving TLS. Overrides HostedZoneId.",
            Type="String",
            AllowedPattern="(arn:[^:]+:acm:[^:]+:[^:]+:certificate/.+|)",
            Default="",
        ))

    hosted_zone_id = template.add_parameter(
        Parameter(
            "HostedZoneId",
            Description=
            "Existing Route 53 zone to use for validating a new TLS certificate.",
            Type="String",
            AllowedPattern="(Z[A-Z0-9]+|)",
            Default="",
        ))

    dns_names = template.add_parameter(
        Parameter(
            "DomainNames",
            Description=
            "Comma-separated list of additional domain names to serve.",
            Type="CommaDelimitedList",
            Default="",
        ))

    tls_protocol_version = template.add_parameter(
        Parameter(
            "TlsProtocolVersion",
            Description=
            "CloudFront TLS security policy; see https://amzn.to/2DR91Xq for details.",
            Type="String",
            Default="TLSv1.2_2019",
        ))

    log_retention_days = template.add_parameter(
        Parameter(
            "LogRetentionDays",
            Description=
            "Days to keep CloudFront, S3, and Lambda logs. 0 means indefinite retention.",
            Type="Number",
            AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS,
            Default=365,
        ))

    default_ttl_seconds = template.add_parameter(
        Parameter(
            "DefaultTtlSeconds",
            Description="Cache time-to-live when not set by S3 object headers.",
            Type="Number",
            Default=int(datetime.timedelta(minutes=5).total_seconds()),
        ))

    enable_price_class_hack = template.add_parameter(
        Parameter(
            "EnablePriceClassHack",
            Description="Cut your bill in half with this one weird trick.",
            Type="String",
            Default="false",
            AllowedValues=["true", "false"],
        ))

    retention_defined = add_condition(template, "RetentionDefined",
                                      Not(Equals(Ref(log_retention_days), 0)))

    using_price_class_hack = add_condition(
        template, "UsingPriceClassHack",
        Equals(Ref(enable_price_class_hack), "true"))

    using_acm_certificate = add_condition(
        template, "UsingAcmCertificate",
        Not(Equals(Ref(acm_certificate_arn), "")))

    using_hosted_zone = add_condition(template, "UsingHostedZone",
                                      Not(Equals(Ref(hosted_zone_id), "")))

    using_certificate = add_condition(
        template,
        "UsingCertificate",
        Or(Condition(using_acm_certificate), Condition(using_hosted_zone)),
    )

    should_create_certificate = add_condition(
        template,
        "ShouldCreateCertificate",
        And(Condition(using_hosted_zone),
            Not(Condition(using_acm_certificate))),
    )

    using_dns_names = add_condition(template, "UsingDnsNames",
                                    Not(Equals(Select(0, Ref(dns_names)), "")))

    is_primary_region = "IsPrimaryRegion"
    template.add_condition(
        is_primary_region,
        Equals(Region, FindInMap(partition_config, Partition,
                                 "PrimaryRegion")),
    )

    precondition_region_is_primary = template.add_resource(
        WaitConditionHandle(
            "PreconditionIsPrimaryRegionForPartition",
            Condition=is_primary_region,
        ))

    log_ingester_dlq = template.add_resource(
        Queue(
            "LogIngesterDLQ",
            MessageRetentionPeriod=int(
                datetime.timedelta(days=14).total_seconds()),
            KmsMasterKeyId="alias/aws/sqs",
        ))

    log_ingester_role = template.add_resource(
        Role(
            "LogIngesterRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="DLQPolicy",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[sqs.SendMessage],
                                Resource=[GetAtt(log_ingester_dlq, "Arn")],
                            )
                        ],
                    ),
                )
            ],
        ))

    log_ingester = template.add_resource(
        Function(
            "LogIngester",
            Runtime=PYTHON_RUNTIME,
            Handler="index.{}".format(log_ingest.handler.__name__),
            Code=Code(ZipFile=inspect.getsource(log_ingest)),
            MemorySize=256,
            Timeout=300,
            Role=GetAtt(log_ingester_role, "Arn"),
            DeadLetterConfig=DeadLetterConfig(
                TargetArn=GetAtt(log_ingester_dlq, "Arn")),
        ))

    log_ingester_permission = template.add_resource(
        Permission(
            "LogIngesterPermission",
            FunctionName=GetAtt(log_ingester, "Arn"),
            Action="lambda:InvokeFunction",
            Principal="s3.amazonaws.com",
            SourceAccount=AccountId,
        ))

    log_bucket = template.add_resource(
        Bucket(
            "LogBucket",
            # S3 requires this ACL (regardless of bucket policy) or s3:PutBucketLogging fails.
            # When the CloudFront distribution is created, it adds an additional bucket ACL.
            # That ACL is not possible to model in CloudFormation.
            AccessControl="LogDeliveryWrite",
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                LifecycleRule(ExpirationInDays=1, Status="Enabled"),
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=1),
                    Status="Enabled",
                ),
            ]),
            NotificationConfiguration=NotificationConfiguration(
                LambdaConfigurations=[
                    LambdaConfigurations(Event="s3:ObjectCreated:*",
                                         Function=GetAtt(log_ingester, "Arn"))
                ]),
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            # if we use KMS, we can't read the logs
                            SSEAlgorithm="AES256"))
                ]),
            OwnershipControls=OwnershipControls(Rules=[
                OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred")
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
            DependsOn=[log_ingester_permission],
        ))

    log_ingester_log_group = template.add_resource(
        LogGroup(
            "LogIngesterLogGroup",
            LogGroupName=Join(
                "", ["/aws/lambda/", Ref(log_ingester)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    log_ingester_policy = template.add_resource(
        PolicyType(
            "LogIngesterPolicy",
            Roles=[Ref(log_ingester_role)],
            PolicyName="IngestLogPolicy",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                        Resource=[
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    Region,
                                    AccountId,
                                    "log-group",
                                    "/aws/cloudfront/*",
                                ],
                            ),
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    Region,
                                    AccountId,
                                    "log-group",
                                    "/aws/s3/*",
                                ],
                            ),
                            GetAtt(log_ingester_log_group, "Arn"),
                        ],
                    ),
                    Statement(
                        Effect=Allow,
                        Action=[s3.GetObject],
                        Resource=[Join("", [GetAtt(log_bucket, "Arn"), "/*"])],
                    ),
                ],
            ),
        ))

    bucket = template.add_resource(
        Bucket(
            "ContentBucket",
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                # not supported by CFN yet:
                # LifecycleRule(
                # Transitions=[
                # LifecycleRuleTransition(
                # StorageClass='INTELLIGENT_TIERING',
                # TransitionInDays=1,
                # ),
                # ],
                # Status="Enabled",
                # ),
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=7),
                    Status="Enabled",
                )
            ]),
            LoggingConfiguration=LoggingConfiguration(
                DestinationBucketName=Ref(log_bucket), LogFilePrefix="s3/"),
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            # Origin Access Identities can't use KMS
                            SSEAlgorithm="AES256"))
                ]),
            OwnershipControls=OwnershipControls(Rules=[
                OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred")
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
        ))

    origin_access_identity = template.add_resource(
        CloudFrontOriginAccessIdentity(
            "CloudFrontIdentity",
            CloudFrontOriginAccessIdentityConfig=
            CloudFrontOriginAccessIdentityConfig(
                Comment=GetAtt(bucket, "Arn")),
        ))

    bucket_policy = template.add_resource(
        BucketPolicy(
            "ContentBucketPolicy",
            Bucket=Ref(bucket),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal(
                            "CanonicalUser",
                            GetAtt(origin_access_identity,
                                   "S3CanonicalUserId"),
                        ),
                        Action=[s3.GetObject],
                        Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])],
                    ),
                ],
            ),
        ))

    # Not strictly necessary, as ACLs should take care of this access. However, CloudFront docs
    # state "In some circumstances [...] S3 resets permissions on the bucket to the default value",
    # and this allows logging to work without any ACLs in place.
    log_bucket_policy = template.add_resource(
        BucketPolicy(
            "LogBucketPolicy",
            Bucket=Ref(log_bucket),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "delivery.logs.amazonaws.com"),
                        Action=[s3.PutObject],
                        Resource=[
                            Join(
                                "/",
                                [GetAtt(log_bucket, "Arn"), "cloudfront", "*"])
                        ],
                    ),
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "delivery.logs.amazonaws.com"),
                        Action=[s3.ListBucket],
                        Resource=[Join("/", [GetAtt(log_bucket, "Arn")])],
                    ),
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service", "s3.amazonaws.com"),
                        Action=[s3.PutObject],
                        Resource=[
                            Join("/", [GetAtt(log_bucket, "Arn"), "s3", "*"])
                        ],
                    ),
                ],
            ),
        ))

    certificate_validator_dlq = template.add_resource(
        Queue(
            "CertificateValidatorDLQ",
            MessageRetentionPeriod=int(
                datetime.timedelta(days=14).total_seconds()),
            KmsMasterKeyId="alias/aws/sqs",
            Condition=should_create_certificate,
        ))

    certificate_validator_role = template.add_resource(
        Role(
            "CertificateValidatorRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="DLQPolicy",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[sqs.SendMessage],
                                Resource=[
                                    GetAtt(certificate_validator_dlq, "Arn")
                                ],
                            )
                        ],
                    ),
                )
            ],
            # TODO scope down
            ManagedPolicyArns=[
                "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
                "arn:aws:iam::aws:policy/AmazonRoute53FullAccess",
                "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly",
            ],
            Condition=should_create_certificate,
        ))

    certificate_validator_function = template.add_resource(
        Function(
            "CertificateValidatorFunction",
            Runtime=PYTHON_RUNTIME,
            Handler="index.{}".format(certificate_validator.handler.__name__),
            Code=Code(ZipFile=inspect.getsource(certificate_validator)),
            MemorySize=256,
            Timeout=300,
            Role=GetAtt(certificate_validator_role, "Arn"),
            DeadLetterConfig=DeadLetterConfig(
                TargetArn=GetAtt(certificate_validator_dlq, "Arn")),
            Environment=Environment(
                Variables={
                    certificate_validator.EnvVars.HOSTED_ZONE_ID.name:
                    Ref(hosted_zone_id)
                }),
            Condition=should_create_certificate,
        ))

    certificate_validator_log_group = template.add_resource(
        LogGroup(
            "CertificateValidatorLogGroup",
            LogGroupName=Join(
                "", ["/aws/lambda/",
                     Ref(certificate_validator_function)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
            Condition=should_create_certificate,
        ))

    certificate_validator_rule = template.add_resource(
        Rule(
            "CertificateValidatorRule",
            EventPattern={
                "detail-type": ["AWS API Call via CloudTrail"],
                "detail": {
                    "eventSource": ["acm.amazonaws.com"],
                    "eventName": ["AddTagsToCertificate"],
                    "requestParameters": {
                        "tags": {
                            "key": [certificate_validator_function.title],
                            "value":
                            [GetAtt(certificate_validator_function, "Arn")],
                        }
                    },
                },
            },
            Targets=[
                Target(
                    Id="certificate-validator-lambda",
                    Arn=GetAtt(certificate_validator_function, "Arn"),
                )
            ],
            DependsOn=[certificate_validator_log_group],
            Condition=should_create_certificate,
        ))

    certificate_validator_permission = template.add_resource(
        Permission(
            "CertificateValidatorPermission",
            FunctionName=GetAtt(certificate_validator_function, "Arn"),
            Action="lambda:InvokeFunction",
            Principal="events.amazonaws.com",
            SourceArn=GetAtt(certificate_validator_rule, "Arn"),
            Condition=should_create_certificate,
        ))

    certificate = template.add_resource(
        Certificate(
            "Certificate",
            DomainName=Select(0, Ref(dns_names)),
            SubjectAlternativeNames=Ref(
                dns_names),  # duplicate first name works fine
            ValidationMethod="DNS",
            Tags=Tags(
                **{
                    certificate_validator_function.title:
                    GetAtt(certificate_validator_function, "Arn")
                }),
            DependsOn=[certificate_validator_permission],
            Condition=should_create_certificate,
        ))

    edge_hook_role = template.add_resource(
        Role(
            "EdgeHookRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal(
                            "Service",
                            [
                                "lambda.amazonaws.com",
                                "edgelambda.amazonaws.com"
                            ],
                        ),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
        ))

    edge_hook_function = template.add_resource(
        Function(
            "EdgeHookFunction",
            Runtime=PYTHON_RUNTIME,
            Handler="index.handler",
            Code=Code(ZipFile=inspect.getsource(edge_hook)),
            MemorySize=128,
            Timeout=3,
            Role=GetAtt(edge_hook_role, "Arn"),
        ))
    edge_hook_function_hash = (hashlib.sha256(
        json.dumps(edge_hook_function.to_dict(),
                   sort_keys=True).encode("utf-8")).hexdigest()[:10].upper())

    edge_hook_version = template.add_resource(
        Version(
            "EdgeHookVersion" + edge_hook_function_hash,
            FunctionName=GetAtt(edge_hook_function, "Arn"),
        ))

    replica_log_group_name = Join(
        "/",
        [
            "/aws/lambda",
            Join(
                ".",
                [
                    FindInMap(partition_config, Partition, "PrimaryRegion"),
                    Ref(edge_hook_function),
                ],
            ),
        ],
    )

    edge_hook_role_policy = template.add_resource(
        PolicyType(
            "EdgeHookRolePolicy",
            PolicyName="write-logs",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                        Resource=[
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    "*",
                                    AccountId,
                                    "log-group",
                                    replica_log_group_name,
                                    "log-stream",
                                    "*",
                                ],
                            ),
                        ],
                    ),
                ],
            ),
            Roles=[Ref(edge_hook_role)],
        ))

    stack_set_administration_role = template.add_resource(
        Role(
            "StackSetAdministrationRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "cloudformation.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    ),
                ],
            ),
        ))

    stack_set_execution_role = template.add_resource(
        Role(
            "StackSetExecutionRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal(
                            "AWS", GetAtt(stack_set_administration_role,
                                          "Arn")),
                        Action=[sts.AssumeRole],
                    ),
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="create-stackset-instances",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.DescribeStacks,
                                    logs.DescribeLogGroups,
                                ],
                                Resource=["*"],
                            ),
                            # stack instances communicate with the CFN service via SNS
                            Statement(
                                Effect=Allow,
                                Action=[sns.Publish],
                                NotResource=[
                                    Join(
                                        ":",
                                        [
                                            "arn", Partition, "sns", "*",
                                            AccountId, "*"
                                        ],
                                    )
                                ],
                            ),
                            Statement(
                                Effect=Allow,
                                Action=[
                                    logs.CreateLogGroup,
                                    logs.DeleteLogGroup,
                                    logs.PutRetentionPolicy,
                                    logs.DeleteRetentionPolicy,
                                ],
                                Resource=[
                                    Join(
                                        ":",
                                        [
                                            "arn",
                                            Partition,
                                            "logs",
                                            "*",
                                            AccountId,
                                            "log-group",
                                            replica_log_group_name,
                                            "log-stream",
                                            "",
                                        ],
                                    ),
                                ],
                            ),
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.CreateStack,
                                    cloudformation.DeleteStack,
                                    cloudformation.UpdateStack,
                                ],
                                Resource=[
                                    Join(
                                        ":",
                                        [
                                            "arn",
                                            Partition,
                                            "cloudformation",
                                            "*",
                                            AccountId,
                                            Join(
                                                "/",
                                                [
                                                    "stack",
                                                    Join(
                                                        "-",
                                                        [
                                                            "StackSet",
                                                            StackName, "*"
                                                        ],
                                                    ),
                                                ],
                                            ),
                                        ],
                                    )
                                ],
                            ),
                        ],
                    ),
                ),
            ],
        ))

    stack_set_administration_role_policy = template.add_resource(
        PolicyType(
            "StackSetAdministrationRolePolicy",
            PolicyName="assume-execution-role",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[sts.AssumeRole],
                        Resource=[GetAtt(stack_set_execution_role, "Arn")],
                    ),
                ],
            ),
            Roles=[Ref(stack_set_administration_role)],
        ))

    edge_log_groups = template.add_resource(
        StackSet(
            "EdgeLambdaLogGroupStackSet",
            AdministrationRoleARN=GetAtt(stack_set_administration_role, "Arn"),
            ExecutionRoleName=Ref(stack_set_execution_role),
            StackSetName=Join("-", [StackName, "EdgeLambdaLogGroup"]),
            PermissionModel="SELF_MANAGED",
            Description="Multi-region log groups for Lambda@Edge replicas",
            Parameters=[
                StackSetParameter(
                    ParameterKey="LogGroupName",
                    ParameterValue=replica_log_group_name,
                ),
                StackSetParameter(
                    ParameterKey="LogRetentionDays",
                    ParameterValue=Ref(log_retention_days),
                ),
            ],
            OperationPreferences=OperationPreferences(
                FailureToleranceCount=0,
                MaxConcurrentPercentage=100,
            ),
            StackInstancesGroup=[
                StackInstances(
                    DeploymentTargets=DeploymentTargets(Accounts=[AccountId]),
                    Regions=FindInMap(partition_config, Partition,
                                      "DefaultRegions"),
                )
            ],
            TemplateBody=create_log_group_template().to_json(indent=None),
            DependsOn=[stack_set_administration_role_policy],
        ))

    price_class_distribution = template.add_resource(
        Distribution(
            "PriceClassDistribution",
            DistributionConfig=DistributionConfig(
                Comment="Dummy distribution used for price class hack",
                DefaultCacheBehavior=DefaultCacheBehavior(
                    TargetOriginId="default",
                    ViewerProtocolPolicy="allow-all",
                    ForwardedValues=ForwardedValues(QueryString=False),
                ),
                Enabled=True,
                Origins=[
                    Origin(Id="default",
                           DomainName=GetAtt(bucket, "DomainName"))
                ],
                IPV6Enabled=True,
                ViewerCertificate=ViewerCertificate(
                    CloudFrontDefaultCertificate=True),
                PriceClass="PriceClass_All",
            ),
            Condition=using_price_class_hack,
        ))

    distribution = template.add_resource(
        Distribution(
            "ContentDistribution",
            DistributionConfig=DistributionConfig(
                Enabled=True,
                Aliases=If(using_dns_names, Ref(dns_names), NoValue),
                Logging=Logging(Bucket=GetAtt(log_bucket, "DomainName"),
                                Prefix="cloudfront/"),
                DefaultRootObject="index.html",
                Origins=[
                    Origin(
                        Id="default",
                        DomainName=GetAtt(bucket, "DomainName"),
                        S3OriginConfig=S3OriginConfig(
                            OriginAccessIdentity=Join(
                                "",
                                [
                                    "origin-access-identity/cloudfront/",
                                    Ref(origin_access_identity),
                                ],
                            )),
                    )
                ],
                DefaultCacheBehavior=DefaultCacheBehavior(
                    TargetOriginId="default",
                    Compress=True,
                    ForwardedValues=ForwardedValues(QueryString=False),
                    ViewerProtocolPolicy="redirect-to-https",
                    DefaultTTL=Ref(default_ttl_seconds),
                    LambdaFunctionAssociations=[
                        LambdaFunctionAssociation(
                            EventType="origin-request",
                            LambdaFunctionARN=Ref(edge_hook_version),
                        )
                    ],
                ),
                HttpVersion="http2",
                IPV6Enabled=True,
                ViewerCertificate=ViewerCertificate(
                    AcmCertificateArn=If(
                        using_acm_certificate,
                        Ref(acm_certificate_arn),
                        If(using_hosted_zone, Ref(certificate), NoValue),
                    ),
                    SslSupportMethod=If(using_certificate, "sni-only",
                                        NoValue),
                    CloudFrontDefaultCertificate=If(using_certificate, NoValue,
                                                    True),
                    MinimumProtocolVersion=Ref(tls_protocol_version),
                ),
                PriceClass=If(using_price_class_hack, "PriceClass_100",
                              "PriceClass_All"),
            ),
            DependsOn=[
                bucket_policy,
                log_ingester_policy,
                edge_log_groups,
                precondition_region_is_primary,
            ],
        ))

    distribution_log_group = template.add_resource(
        LogGroup(
            "DistributionLogGroup",
            LogGroupName=Join(
                "", ["/aws/cloudfront/", Ref(distribution)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    bucket_log_group = template.add_resource(
        LogGroup(
            "BucketLogGroup",
            LogGroupName=Join("", ["/aws/s3/", Ref(bucket)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    template.add_output(Output("DistributionId", Value=Ref(distribution)))

    template.add_output(
        Output("DistributionDomain", Value=GetAtt(distribution, "DomainName")))

    template.add_output(
        Output(
            "DistributionDnsTarget",
            Value=If(
                using_price_class_hack,
                GetAtt(price_class_distribution, "DomainName"),
                GetAtt(distribution, "DomainName"),
            ),
        ))

    template.add_output(
        Output(
            "DistributionUrl",
            Value=Join("",
                       ["https://",
                        GetAtt(distribution, "DomainName"), "/"]),
        ))

    template.add_output(Output("ContentBucketArn", Value=GetAtt(bucket,
                                                                "Arn")))

    return template
Exemple #20
0
                    "Effect": "Allow",
                },
                {
                    "Action": ["dynamodb:PutItem"],
                    "Resource": [GetAtt(video_events_table, 'Arn')],
                    "Effect": "Allow",
                },
                {
                    "Action": ["ssm:GetParameter"],
                    "Resource": ['*'],  # Find a way to restrict this
                    "Effect": "Allow",
                }
            ],
        }))

events_to_api_queue = template.add_resource(Queue('EventsToApiQueue', ))

events_to_api_queue_policy = template.add_resource(
    ManagedPolicy(
        'EventsToApiQueuePolicy',
        Description='Allows consuming messages from the api-events queue.',
        PolicyDocument={
            "Version":
            "2012-10-17",
            "Statement": [{
                "Action": [
                    "sqs:DeleteMessage", "sqs:ReceiveMessage",
                    "sqs:GetQueueAttributes"
                ],
                "Resource":
                GetAtt(events_to_api_queue, 'Arn'),
Exemple #21
0
def emit_configuration():
    # Build an SQS queue for the babysitter
    """create_queue = template.add_parameter(
        Parameter(
            'CreateDeregistrationTopic',
            Type='String',
            Description='Whether or not to create the Chef Deregistration queue. This option is provided in case the queue already exists.',
            Default='no',
            AllowedValues=['yes', 'no'],
            ConstraintDescription='Answer must be yes or no'
        )
    )

    conditions = {
        "CreateDeregCondition": Equals(
            Ref(create_queue), "yes"
        )
    }

    for c in conditions:
        template.add_condition(c, conditions[c])"""

    queue_name = '_'.join(['chef-deregistration', CLOUDNAME, CLOUDENV])
    queue = template.add_resource(
        Queue(
            cfn.sanitize_id(queue_name),
            VisibilityTimeout=60,
            MessageRetentionPeriod=1209600,
            MaximumMessageSize=16384,
            QueueName=queue_name,
        ))

    alert_topic = template.add_resource(
        Topic(
            cfn.sanitize_id("BabysitterAlarmTopic{0}".format(CLOUDENV)),
            DisplayName='Babysitter Alarm',
            TopicName=queue_name,
            Subscription=[
                Subscription(Endpoint=GetAtt(queue, "Arn"), Protocol='sqs'),
            ],
            DependsOn=queue.title,
        ))

    queue_depth_alarm = template.add_resource(
        Alarm(
            "BabysitterQueueDepthAlarm",
            AlarmDescription=
            'Alarm if the queue depth grows beyond 200 messages',
            Namespace='AWS/SQS',
            MetricName='ApproximateNumberOfMessagesVisible',
            Dimensions=[
                MetricDimension(Name='QueueName',
                                Value=GetAtt(queue, "QueueName"))
            ],
            Statistic='Sum',
            Period='300',
            EvaluationPeriods='1',
            Threshold='200',
            ComparisonOperator='GreaterThanThreshold',
            #AlarmActions=[Ref(alert_topic), ],
            #InsufficientDataActions=[Ref(alert_topic), ],
            DependsOn=alert_topic.title,
        ), )

    queue_policy = {
        "Version":
        "2012-10-17",
        "Id":
        "BabysitterSNSPublicationPolicy",
        "Statement": [{
            "Sid": "AllowSNSPublishing",
            "Effect": "Allow",
            "Principal": {
                "AWS": "*"
            },
            "Action": ["sqs:SendMessage"],
            "Resource": GetAtt(queue, "Arn"),
            "Condition": {
                "ArnEquals": {
                    "aws:SourceArn": Ref(alert_topic)
                }
            }
        }]
    }

    # Publish all events from SNS to the Queue
    template.add_resource(
        QueuePolicy(
            "BabysitterPublishSNStoSQSPolicy",
            Queues=[Ref(queue)],
            PolicyDocument=queue_policy,
            DependsOn=[queue.title, alert_topic.title],
        ))

    cfn.alert_topic = alert_topic
Exemple #22
0
from troposphere import GetAtt, Output, Parameter, Ref, Template
from troposphere.sns import Subscription, Topic
from troposphere.sqs import Queue, RedrivePolicy, QueuePolicy

template = Template()

template.add_description(
    "AWS CloudFormation Sample Template SNS_AND_SQS_: Sample")

dead_letter_queue = template.add_resource(
    Queue("deadSQS", QueueName="dead__letter_queue__iac_sqs_sample"))

sqs_aws = template.add_resource(
    Queue("iacSQS",
          QueueName="iac_sqs_sample",
          RedrivePolicy=RedrivePolicy(
              deadLetterTargetArn=GetAtt(dead_letter_queue, "Arn"),
              maxReceiveCount="5",
          )))

sns_aws = template.add_resource(
    Topic("iacSNS",
          TopicName="iac_sns_sample",
          Subscription=[
              Subscription(Protocol="sqs", Endpoint=GetAtt(sqs_aws, "Arn"))
          ]))

template.add_output([
    Output("SourceQueueURL",
           Description="URL of the source queue",
           Value=Ref(sqs_aws)),
    ))

rekognition_results_code_key = template.add_parameter(
    Parameter(
        'RekognitionResults',
        Type=constants.STRING,
        Default='lambda-code/video_engine/rekognition_results.zip',
    ))

template.add_parameter_to_group(start_insights_code_key, 'Lambda Keys')
template.add_parameter_to_group(rekognition_code_key, 'Lambda Keys')
template.add_parameter_to_group(video_metadata_event_code_key, 'Lambda Keys')
template.add_parameter_to_group(rekognition_results_code_key, 'Lambda Keys')

rekognition_updates_queue = template.add_resource(
    Queue('RekognitionUpdatesQueue', ))

rekognition_updates_topic = template.add_resource(
    Topic(
        'RekognitionUpdatesTopic',
        Subscription=[
            Subscription(
                Endpoint=GetAtt(rekognition_updates_queue, 'Arn'),
                Protocol='sqs',
            )
        ],
    ))

template.add_resource(
    QueuePolicy(
        "RekognitionUpdatesQueuePolicy",
Exemple #24
0
    def _deploy_service(self, service: ff.Service):
        context = self._context_map.get_context(service.name)
        if self._aws_config.get('image_uri') is None:
            self._package_and_deploy_code(context)

        template = Template()
        template.set_version('2010-09-09')

        memory_size = template.add_parameter(
            Parameter(f'{self._lambda_resource_name(service.name)}MemorySize',
                      Type=NUMBER,
                      Default=self._aws_config.get('memory_sync', '3008')))

        timeout_gateway = template.add_parameter(
            Parameter(
                f'{self._lambda_resource_name(service.name)}GatewayTimeout',
                Type=NUMBER,
                Default='30'))

        timeout_async = template.add_parameter(
            Parameter(
                f'{self._lambda_resource_name(service.name)}AsyncTimeout',
                Type=NUMBER,
                Default='900'))

        role_title = f'{self._lambda_resource_name(service.name)}ExecutionRole'
        role = self._add_role(role_title, template)

        params = {
            'FunctionName': f'{self._service_name(service.name)}Sync',
            'Role': GetAtt(role_title, 'Arn'),
            'MemorySize': Ref(memory_size),
            'Timeout': Ref(timeout_gateway),
            'Environment': self._lambda_environment(context)
        }

        image_uri = self._aws_config.get('image_uri')
        if image_uri is not None:
            params.update({
                'Code': Code(ImageUri=image_uri),
                'PackageType': 'Image',
            })
        else:
            params.update({
                'Code':
                Code(S3Bucket=self._bucket, S3Key=self._code_key),
                'Runtime':
                'python3.7',
                'Handler':
                'handlers.main',
            })

        if self._security_group_ids and self._subnet_ids:
            params['VpcConfig'] = VPCConfig(
                SecurityGroupIds=self._security_group_ids,
                SubnetIds=self._subnet_ids)
        api_lambda = template.add_resource(
            Function(f'{self._lambda_resource_name(service.name)}Sync',
                     **params))

        route = inflection.dasherize(context.name)
        proxy_route = f'{route}/{{proxy+}}'
        template.add_resource(
            Permission(
                f'{self._lambda_resource_name(service.name)}SyncPermission',
                Action='lambda:InvokeFunction',
                FunctionName=f'{self._service_name(service.name)}Sync',
                Principal='apigateway.amazonaws.com',
                SourceArn=Join('', [
                    'arn:aws:execute-api:', self._region, ':',
                    self._account_id, ':',
                    ImportValue(
                        self._rest_api_reference()), '/*/*/', route, '*'
                ]),
                DependsOn=api_lambda))

        if self._adaptive_memory:
            value = '3008' if not self._adaptive_memory else '256'
            try:
                value = int(self._aws_config.get('memory_async'))
            except ValueError:
                pass
            memory_size = template.add_parameter(
                Parameter(
                    f'{self._lambda_resource_name(service.name)}MemorySizeAsync',
                    Type=NUMBER,
                    Default=value))

        params = {
            'FunctionName': self._lambda_function_name(service.name, 'Async'),
            'Role': GetAtt(role_title, 'Arn'),
            'MemorySize': Ref(memory_size),
            'Timeout': Ref(timeout_async),
            'Environment': self._lambda_environment(context)
        }

        if image_uri is not None:
            params.update({
                'Code': Code(ImageUri=image_uri),
                'PackageType': 'Image',
            })
        else:
            params.update({
                'Code':
                Code(S3Bucket=self._bucket, S3Key=self._code_key),
                'Runtime':
                'python3.7',
                'Handler':
                'handlers.main',
            })

        if self._security_group_ids and self._subnet_ids:
            params['VpcConfig'] = VPCConfig(
                SecurityGroupIds=self._security_group_ids,
                SubnetIds=self._subnet_ids)
        async_lambda = template.add_resource(
            Function(self._lambda_resource_name(service.name, type_='Async'),
                     **params))

        if self._adaptive_memory:
            self._add_adaptive_memory_functions(template, context,
                                                timeout_async, role_title,
                                                async_lambda)
            # self._add_adaptive_memory_streams(template, context, async_lambda, role)

        # Timers
        for cls, _ in context.command_handlers.items():
            if cls.has_timer():
                timer = cls.get_timer()
                if timer.environment is not None and timer.environment != self._env:
                    continue
                if isinstance(timer.command, str):
                    timer_name = timer.command
                else:
                    timer_name = timer.command.__name__

                target = Target(
                    f'{self._service_name(service.name)}AsyncTarget',
                    Arn=GetAtt(
                        self._lambda_resource_name(service.name,
                                                   type_='Async'), 'Arn'),
                    Id=self._lambda_resource_name(service.name, type_='Async'),
                    Input=
                    f'{{"_context": "{context.name}", "_type": "command", "_name": "{cls.__name__}"}}'
                )
                rule = template.add_resource(
                    Rule(f'{timer_name}TimerRule',
                         ScheduleExpression=f'cron({timer.cron})',
                         State='ENABLED',
                         Targets=[target]))
                template.add_resource(
                    Permission(f'{timer_name}TimerPermission',
                               Action='lambda:invokeFunction',
                               Principal='events.amazonaws.com',
                               FunctionName=Ref(async_lambda),
                               SourceArn=GetAtt(rule, 'Arn')))

        integration = template.add_resource(
            Integration(
                self._integration_name(context.name),
                ApiId=ImportValue(self._rest_api_reference()),
                PayloadFormatVersion='2.0',
                IntegrationType='AWS_PROXY',
                IntegrationUri=Join('', [
                    'arn:aws:lambda:',
                    self._region,
                    ':',
                    self._account_id,
                    ':function:',
                    Ref(api_lambda),
                ]),
            ))

        template.add_resource(
            Route(f'{self._route_name(context.name)}Base',
                  ApiId=ImportValue(self._rest_api_reference()),
                  RouteKey=f'ANY /{route}',
                  AuthorizationType='NONE',
                  Target=Join(
                      '/', ['integrations', Ref(integration)]),
                  DependsOn=integration))

        template.add_resource(
            Route(f'{self._route_name(context.name)}Proxy',
                  ApiId=ImportValue(self._rest_api_reference()),
                  RouteKey=f'ANY /{proxy_route}',
                  AuthorizationType='NONE',
                  Target=Join(
                      '/', ['integrations', Ref(integration)]),
                  DependsOn=integration))

        # Error alarms / subscriptions

        if 'errors' in self._aws_config:
            alerts_topic = template.add_resource(
                Topic(self._alert_topic_name(service.name),
                      TopicName=self._alert_topic_name(service.name)))

            if 'email' in self._aws_config.get('errors'):
                for address in self._aws_config.get('errors').get('email').get(
                        'recipients').split(','):
                    template.add_resource(
                        SubscriptionResource(
                            self._alarm_subscription_name(context.name),
                            Protocol='email',
                            Endpoint=address,
                            TopicArn=self._alert_topic_arn(context.name),
                            DependsOn=[alerts_topic]))

        # Queues / Topics

        subscriptions = {}
        for subscription in self._get_subscriptions(context):
            if subscription['context'] not in subscriptions:
                subscriptions[subscription['context']] = []
            subscriptions[subscription['context']].append(subscription)

        dlq = template.add_resource(
            Queue(f'{self._queue_name(context.name)}Dlq',
                  QueueName=f'{self._queue_name(context.name)}Dlq',
                  VisibilityTimeout=905,
                  ReceiveMessageWaitTimeSeconds=20,
                  MessageRetentionPeriod=1209600))
        self._queue_policy(template, dlq,
                           f'{self._queue_name(context.name)}Dlq',
                           subscriptions)

        queue = template.add_resource(
            Queue(self._queue_name(context.name),
                  QueueName=self._queue_name(context.name),
                  VisibilityTimeout=905,
                  ReceiveMessageWaitTimeSeconds=20,
                  MessageRetentionPeriod=1209600,
                  RedrivePolicy=RedrivePolicy(deadLetterTargetArn=GetAtt(
                      dlq, 'Arn'),
                                              maxReceiveCount=1000),
                  DependsOn=dlq))
        self._queue_policy(template, queue, self._queue_name(context.name),
                           subscriptions)

        template.add_resource(
            EventSourceMapping(
                f'{self._lambda_resource_name(context.name)}AsyncMapping',
                BatchSize=1,
                Enabled=True,
                EventSourceArn=GetAtt(queue, 'Arn'),
                FunctionName=self._lambda_function_name(service.name, 'Async'),
                DependsOn=[queue, async_lambda]))
        topic = template.add_resource(
            Topic(self._topic_name(context.name),
                  TopicName=self._topic_name(context.name)))

        for context_name, list_ in subscriptions.items():
            if context_name == context.name and len(list_) > 0:
                template.add_resource(
                    SubscriptionResource(
                        self._subscription_name(context_name),
                        Protocol='sqs',
                        Endpoint=GetAtt(queue, 'Arn'),
                        TopicArn=self._topic_arn(context.name),
                        FilterPolicy={
                            '_name': [x['name'] for x in list_],
                        },
                        RedrivePolicy={
                            'deadLetterTargetArn': GetAtt(dlq, 'Arn'),
                        },
                        DependsOn=[queue, dlq, topic]))
            elif len(list_) > 0:
                if context_name not in self._context_map.contexts:
                    self._find_or_create_topic(context_name)
                template.add_resource(
                    SubscriptionResource(
                        self._subscription_name(context.name, context_name),
                        Protocol='sqs',
                        Endpoint=GetAtt(queue, 'Arn'),
                        TopicArn=self._topic_arn(context_name),
                        FilterPolicy={'_name': [x['name'] for x in list_]},
                        RedrivePolicy={
                            'deadLetterTargetArn': GetAtt(dlq, 'Arn'),
                        },
                        DependsOn=[queue, dlq]))

        # DynamoDB Table

        ddb_table = template.add_resource(
            Table(self._ddb_resource_name(context.name),
                  TableName=self._ddb_table_name(context.name),
                  AttributeDefinitions=[
                      AttributeDefinition(AttributeName='pk',
                                          AttributeType='S'),
                      AttributeDefinition(AttributeName='sk',
                                          AttributeType='S'),
                  ],
                  BillingMode='PAY_PER_REQUEST',
                  KeySchema=[
                      KeySchema(AttributeName='pk', KeyType='HASH'),
                      KeySchema(AttributeName='sk', KeyType='RANGE'),
                  ],
                  TimeToLiveSpecification=TimeToLiveSpecification(
                      AttributeName='TimeToLive', Enabled=True)))

        template.add_output(
            Output("DDBTable",
                   Value=Ref(ddb_table),
                   Description="Document table"))

        for cb in self._pre_deployment_hooks:
            cb(template=template, context=context, env=self._env)

        self.info('Deploying stack')
        self._s3_client.put_object(Body=template.to_json(),
                                   Bucket=self._bucket,
                                   Key=self._template_key)
        url = self._s3_client.generate_presigned_url(ClientMethod='get_object',
                                                     Params={
                                                         'Bucket':
                                                         self._bucket,
                                                         'Key':
                                                         self._template_key
                                                     })

        stack_name = self._stack_name(context.name)
        try:
            self._cloudformation_client.describe_stacks(StackName=stack_name)
            self._update_stack(self._stack_name(context.name), url)
        except ClientError as e:
            if f'Stack with id {stack_name} does not exist' in str(e):
                self._create_stack(self._stack_name(context.name), url)
            else:
                raise e

        for cb in self._post_deployment_hooks:
            cb(template=template, context=context, env=self._env)

        self._migrate_schema(context)

        self.info('Done')
        Default='custom_resources/elastictranscoder.zip',
    ))

template.add_parameter_to_group(start_encode_lambda_code_key, 'Lambda Keys')
template.add_parameter_to_group(request_encoding_lambda_code_key,
                                'Lambda Keys')
template.add_parameter_to_group(update_encoding_state_lambda_code_key,
                                'Lambda Keys')
template.add_parameter_to_group(elastictranscoder_code_key, 'Lambda Keys')

_video_events_table = ImportValue(
    Join('-', [Ref(core_stack), 'VideoEventsTable', 'Ref']))
_lambda_managed_policy = ImportValue(
    Join('-', [Ref(core_stack), 'LambdaDefaultPolicy', 'Arn']))

request_encoding_queue = template.add_resource(Queue('RequestEncodingQueue', ))

start_media_insights_queue = template.add_resource(
    Queue('StartMediaInsightsQueue', ))

processing_failed_queue = template.add_resource(
    Queue('ProcessingFailedQueue', ))

consume_insights_queue_policy = template.add_resource(
    ManagedPolicy(
        'ConsumeMediaInsightsQueuePolicy',
        Description='Allows consuming messages from the media-insights queue.',
        PolicyDocument={
            "Version":
            "2012-10-17",
            "Statement": [{
Exemple #26
0
    def _add_adaptive_memory_functions(self, template, context: ff.Context,
                                       timeout, role_title, async_lambda):
        memory_settings = self._configuration.contexts.get('firefly_aws').get(
            'memory_settings')
        if memory_settings is None:
            raise ff.ConfigurationError(
                'To use adaptive memory, you must provide a list of memory_settings'
            )

        for memory in memory_settings:
            memory_size = template.add_parameter(
                Parameter(
                    f'{self._lambda_resource_name(context.name)}{memory}MemorySize',
                    Type=NUMBER,
                    Default=str(memory)))

            params = {
                'FunctionName':
                self._lambda_function_name(context.name,
                                           'Async',
                                           memory=memory),
                'Role':
                GetAtt(role_title, 'Arn'),
                'MemorySize':
                Ref(memory_size),
                'Timeout':
                Ref(timeout),
                'Environment':
                self._lambda_environment(context),
                'DependsOn':
                async_lambda,
            }

            image_uri = self._aws_config.get('image_uri')
            if image_uri is not None:
                params.update({
                    'Code': Code(ImageUri=image_uri),
                    'PackageType': 'Image',
                })
            else:
                params.update({
                    'Code':
                    Code(S3Bucket=self._bucket, S3Key=self._code_key),
                    'Runtime':
                    'python3.7',
                    'Handler':
                    'handlers.main',
                })

            if self._security_group_ids and self._subnet_ids:
                params['VpcConfig'] = VPCConfig(
                    SecurityGroupIds=self._security_group_ids,
                    SubnetIds=self._subnet_ids)

            adaptive_memory_lambda = template.add_resource(
                Function(
                    self._lambda_resource_name(context.name, memory=memory),
                    **params))

            queue = template.add_resource(
                Queue(self._queue_name(context.name, memory=memory),
                      QueueName=self._queue_name(context.name, memory=memory),
                      VisibilityTimeout=905,
                      ReceiveMessageWaitTimeSeconds=20,
                      MessageRetentionPeriod=1209600,
                      DependsOn=[adaptive_memory_lambda]))
            # self._queue_policy(template, queue, self._queue_name(context.name), subscriptions)

            template.add_resource(
                EventSourceMapping(
                    f'{self._lambda_resource_name(context.name, memory=memory)}AsyncMapping',
                    BatchSize=1,
                    Enabled=True,
                    EventSourceArn=GetAtt(queue, 'Arn'),
                    FunctionName=self._lambda_function_name(context.name,
                                                            'Async',
                                                            memory=memory),
                    DependsOn=[queue, adaptive_memory_lambda]))
# Converted from SQS_With_CloudWatch_Alarms.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/

from troposphere import GetAtt, Output, Ref, Template
from troposphere.sqs import Queue

t = Template()

t.add_description(
    "AWS CloudFormation Sample Template SQS: Sample template showing how to "
    "create an SQS queue with Server Side Encryption. **WARNING** This "
    "template creates Amazon SQS Queues. You will be billed for the AWS "
    "resources used if you create a stack from this template.")

mysourcequeue = t.add_resource(
    Queue("MySourceQueue",
          KmsMasterKeyId='testing',
          KmsDataKeyReusePeriodSeconds=60))

t.add_output([
    Output("SourceQueueURL",
           Description="URL of the source queue",
           Value=Ref(mysourcequeue)),
    Output("SourceQueueARN",
           Description="ARN of the source queue",
           Value=GetAtt(mysourcequeue, "Arn"))
])

print(t.to_json())
Exemple #28
0
def init_cloud(args):
    template = Template()

    queue = template.add_resource(
        Queue(
            "{0}".format(args.sqs_name),
            QueueName="{0}".format(args.sqs_name),
        ))

    bucket = template.add_resource(
        Bucket("{0}".format(args.s3_name),
               BucketName="{0}".format(args.s3_name)))

    kala_security_group = template.add_resource(
        ec2.SecurityGroup(
            "{0}".format(args.kala_security_group),
            GroupName="{0}".format(args.kala_security_group),
            GroupDescription="Enable HTTP and HTTPS access on the inbound port",
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="80",
                    ToPort="80",
                    CidrIp="0.0.0.0/0",
                ),
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="443",
                    ToPort="443",
                    CidrIp="0.0.0.0/0",
                ),
            ]))

    database_security_group = template.add_resource(
        ec2.SecurityGroup(
            "{0}".format(args.database_security_group),
            GroupName="{0}".format(args.database_security_group),
            GroupDescription="Enable Database access for the security groups",
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="5432",
                    ToPort="5432",
                    SourceSecurityGroupName=Ref(kala_security_group),
                ),
            ]))

    database = template.add_resource(
        rds.DBInstance(
            "{0}".format(args.rds_instance_name),
            DBInstanceIdentifier="{0}".format(args.rds_instance_name),
            DBName=args.rds_name,
            MasterUsername="******".format(args.rds_username),
            MasterUserPassword="******".format(args.rds_password),
            AllocatedStorage=args.rds_allocated_storage,
            DBInstanceClass=args.rds_instance_class,
            Engine="postgres",
            MultiAZ=args.production,
            StorageEncrypted=True,
            VPCSecurityGroups=[GetAtt(database_security_group, "GroupId")]))

    s3_policy = PolicyDocument(
        Version="2012-10-17",
        Id="{0}Policy".format(args.s3_name),
        Statement=[
            Statement(Effect="Allow",
                      Action=[S3Action("*")],
                      Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])]),
        ])

    sqs_policy = PolicyDocument(Version="2012-10-17",
                                Id="{0}Policy".format(args.s3_name),
                                Statement=[
                                    Statement(Effect="Allow",
                                              Action=[SQSAction("*")],
                                              Resource=[GetAtt(queue, "Arn")])
                                ])
    role = Role('{0}Role'.format(args.iam_role),
                RoleName='{0}Role'.format(args.iam_role),
                AssumeRolePolicyDocument={
                    "Version":
                    "2012-10-17",
                    "Statement": [{
                        "Action": "sts:AssumeRole",
                        "Effect": "Allow",
                        "Principal": {
                            "Service": "ec2.amazonaws.com"
                        }
                    }]
                },
                Policies=[
                    Policy(PolicyName="KalaS3Policy",
                           PolicyDocument=s3_policy),
                    Policy(PolicyName="KalaSQSPolicy",
                           PolicyDocument=sqs_policy)
                ])
    template.add_resource(role)
    template.add_resource(
        InstanceProfile("{0}InstanceProfile".format(args.iam_role),
                        Roles=[Ref(role)],
                        InstanceProfileName="{0}InstanceProfile".format(
                            args.iam_role)))

    return template