コード例 #1
0
ファイル: dynamoDb.py プロジェクト: zakia2/myRetail
from troposphere import Output, Parameter, Ref, Template
from troposphere.dynamodb import (KeySchema, AttributeDefinition,
                                  ProvisionedThroughput)
from troposphere.dynamodb import Table

t = Template()

t.set_description("AWS CloudFormation template for creating db for products")

myDynamoDB = t.add_resource(
    Table("products",
          AttributeDefinitions=[
              AttributeDefinition(AttributeName="productid", AttributeType="S")
          ],
          KeySchema=[KeySchema(AttributeName="productid", KeyType="HASH")],
          ProvisionedThroughput=ProvisionedThroughput(ReadCapacityUnits=5,
                                                      WriteCapacityUnits=5)))

t.add_output(Output("products", Value=Ref(myDynamoDB)))

print(t.to_json())
コード例 #2
0
ファイル: elb.py プロジェクト: ggaugain/sceptre
 def add_outputs(self):
     self.out = self.template.add_output([
         Output("ElbWeb", Value=Ref(self.ElasticLoadBalancer)),
     ])
コード例 #3
0
ファイル: dev-stack.py プロジェクト: techman83/NetKAN-Infra
                               GetAtt(addqueue, "Arn"),
                               GetAtt(mirrorqueue, "Arn"),
                           ]
                       },
                       {
                           "Effect": "Allow",
                           "Action": "sqs:ListQueues",
                           "Resource": "*",
                       },
                   ],
               }))

for queue in [inbound, outbound, addqueue, mirrorqueue]:
    t.add_output([
        Output("{}QueueURL".format(queue.title),
               Description="{} SQS Queue URL".format(queue.title),
               Value=Ref(queue)),
        Output("{}QueueARN".format(queue.title),
               Description="ARN of {} SQS Queue".format(queue.title),
               Value=GetAtt(queue, "Arn")),
    ])

dev_db = t.add_resource(
    Table("DevNetKANStatus",
          AttributeDefinitions=[
              AttributeDefinition(AttributeName="ModIdentifier",
                                  AttributeType="S"),
          ],
          KeySchema=[KeySchema(AttributeName="ModIdentifier", KeyType="HASH")],
          TableName="DevNetKANStatus",
          ProvisionedThroughput=ProvisionedThroughput(ReadCapacityUnits=5,
コード例 #4
0
param_laearn = template.add_parameter(
    Parameter(
        "ParamLaeArn",
        Type=constants.STRING,
        Description="ARN of the Lambda@Edge function",
    ))
template.set_parameter_label(param_laearn, "Lambda@Edge ARN")

cloudformation_tags = template.add_resource(
    custom_resources.cloudformation.Tags("CfnTags"))

template.add_output(
    Output(
        "ApiDomain",
        Description='Domain name of the API',
        Value=ImportValue(
            Sub('${' + param_authorizer_stack.title + '}-domain-name')),
        Export=Export(Join('-', [Ref(AWS_STACK_NAME), 'domain-name'])),
    ))

template.add_output(
    Output(
        "MagicPath",
        Description='Magic path',
        Value=ImportValue(
            Sub('${' + param_authorizer_stack.title + '}-magic-path')),
        Export=Export(Join('-', [Ref(AWS_STACK_NAME), 'magic-path'])),
    ))

# Don't simply import-output the Lambda@Edge ARN, but do it via a Parameter
# This allows us to migrate to a new L@E function gradually (otherwise, the output value would be locked and can't
コード例 #5
0
    def add_ec2_instance(self):
        self._resources.update({
            'Ec2Instance': ec2.Instance(
                'Ec2Instance',
                ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
                InstanceType=Ref(self._parameters['Ec2InstanceType']),
                KeyName=Ref(self._parameters['SshKeyName']),
                NetworkInterfaces=[
                    ec2.NetworkInterfaceProperty(
                        GroupSet=[
                            Ref(self._resources['Ec2InstanceSecurityGroup']),
                        ],
                        AssociatePublicIpAddress='true',
                        DeviceIndex='0',
                        DeleteOnTermination='true',
                        SubnetId=Ref(self._parameters['SubnetId']),
                    ),
                ],
                UserData=Base64(Join(
                    '',
                    [
                        '#!/bin/bash\n',
                        '# Install the files and packages from the metadata\n',
                        '/usr/local/bin/cfn-init -v ',
                        '         --stack ', self._stack_name,
                        '         --resource Ec2Instance ',
                        '         --configsets InstallAndConfigure ',
                        '         --region ', self._region, '\n',
                        # Add a temporary /usr/local/bin/streamlit so
                        # user knows its still installing.
                        'echo -e \'#!/bin/sh\necho Streamlit is still installing. Please try again in a few minutes.\n\' > /usr/local/bin/streamlit \n',
                        'chmod +x /usr/local/bin/streamlit \n',
                        # Create ~/sshfs dir which is the target of the sshfs mount commmand
                        'install -o ubuntu -g ubuntu -m 755 -d ~ubuntu/sshfs \n',
                        # Install streamlit.
                        '/home/ubuntu/anaconda3/bin/pip install streamlit \n',
                        # Install rmate.
                        'curl -o /usr/local/bin/rmate https://raw.githubusercontent.com/aurora/rmate/master/rmate \n',
                        'chmod +x /usr/local/bin/rmate \n',
                        # After streamlit is installed, remove the
                        # temporary script and add a link to point to
                        # streamlit in the anaconda directory.  This is
                        # needed so we dont have to make the user to
                        # `rehash` in order to pick up the new location.
                        'rm -f /usr/local/bin/streamlit \n',
                        'ln -fs /home/ubuntu/anaconda3/bin/streamlit /usr/local/bin/streamlit \n',
                        # Get streamlit config which has the proxy wait
                        # for 2 minutes and any other options we added.
                        'curl -o /tmp/config.toml https://streamlit.io/cf/config.toml \n',
                        'install -m 755 -o ubuntu -g ubuntu -d ~ubuntu/.streamlit \n',
                        'install -m 600 -o ubuntu -g ubuntu -t ~ubuntu/.streamlit /tmp/config.toml \n',
                    ]
                )),
                Metadata=cloudformation.Metadata(
                    cloudformation.Init(
                        cloudformation.InitConfigSets(InstallAndConfigure=['config']),
                        config=cloudformation.InitConfig(
                            files={
                                '/usr/local/bin/deletestack.sh' : {
                                    'content' : Join('\n', [
                                        Sub('aws cloudformation delete-stack --region ${AWS::Region} --stack-name ${AWS::StackName}'),
                                    ]),
                                    'mode'    : '000444',
                                    'owner'   : 'root',
                                    'group'   : 'root'
                                },
                            },
                            commands={
                                'schedule_stack_deletion': {
                                    'command': Join('', [
                                        'at -f /usr/local/bin/deletestack.sh "now + ',
                                        Ref(self._parameters['StackTTL']), ' ', Ref(self._parameters['StackTTLUnits']), '"'
                                    ]),
                                }
                            }
                        )
                    )
                ),
                Tags=Tags(
                    Application=self._stack_id,
                    Name=Sub('Streamlit EC2 Instance (${AWS::StackName})'),
                ),
                IamInstanceProfile=Ref(self._resources['Ec2IamInstanceProfile']),
                DependsOn='StackDeletorRole',
            ),
        })

        self._resources.update({
            'IPAddress': ec2.EIP(
                'IPAddress',
                Domain='vpc',
                InstanceId=Ref(self._resources['Ec2Instance']),
                DependsOn='StackDeletorRole',
            ),
        })

        self._outputs.update({
            'SshIp': Output(
                'SshIp',
                Description='SshIp',
                Value=GetAtt('Ec2Instance', 'PublicIp'),
            ),
            'SshCommand': Output(
                'SshCommand',
                Description='SshCommand',
                Value=Join('',
                    [
                        'ssh ubuntu@',
                        GetAtt('Ec2Instance', 'PublicIp'),
                    ]
                )
            ),
            'StreamlitEndpoint': Output(
                'StreamlitEndpoint',
                Description='Streamlit endpoint',
                Value=Join('',
                    [
                        GetAtt('Ec2Instance', 'PublicIp'),
                        ':8501',
                    ]
                )
            ),
        })
コード例 #6
0
    def set_up_stack(self):
        super(Application, self).set_up_stack()

        tags = self.get_input('Tags').copy()
        tags.update({'StackType': 'Application'})

        self.default_tags = tags
        self.region = self.get_input('Region')

        self.add_description('Application server stack for Raster Foundry')

        # Parameters
        self.color = self.add_parameter(
            Parameter('StackColor',
                      Type='String',
                      Description='Stack color',
                      AllowedValues=['Blue', 'Green']), 'StackColor')

        self.keyname = self.add_parameter(
            Parameter('KeyName',
                      Type='String',
                      Description='Name of an existing EC2 key pair'),
            'KeyName')

        self.availability_zones = self.add_parameter(
            Parameter(
                'AvailabilityZones',
                Type='CommaDelimitedList',
                Description='Comma delimited list of availability zones'),
            'AvailabilityZones')

        self.rds_password = self.add_parameter(
            Parameter(
                'RDSPassword',
                Type='String',
                NoEcho=True,
                Description='Database password',
            ), 'RDSPassword')

        self.app_server_instance_type = self.add_parameter(
            Parameter(
                'AppServerInstanceType',
                Type='String',
                Default='t2.micro',
                Description='Application server EC2 instance type',
                AllowedValues=EC2_INSTANCE_TYPES,
                ConstraintDescription='must be a valid EC2 instance type.'),
            'AppServerInstanceType')

        self.app_server_ami = self.add_parameter(
            Parameter('AppServerAMI',
                      Type='String',
                      Default=self.get_recent_app_server_ami(),
                      Description='Application server AMI'), 'AppServerAMI')

        self.app_server_instance_profile = self.add_parameter(
            Parameter('AppServerInstanceProfile',
                      Type='String',
                      Default='AppServerInstanceProfile',
                      Description='Application server instance profile'),
            'AppServerInstanceProfile')

        self.app_server_auto_scaling_desired = self.add_parameter(
            Parameter(
                'AppServerAutoScalingDesired',
                Type='String',
                Default='1',
                Description='Application server AutoScalingGroup desired'),
            'AppServerAutoScalingDesired')

        self.app_server_auto_scaling_min = self.add_parameter(
            Parameter(
                'AppServerAutoScalingMin',
                Type='String',
                Default='1',
                Description='Application server AutoScalingGroup minimum'),
            'AppServerAutoScalingMin')

        self.app_server_auto_scaling_max = self.add_parameter(
            Parameter(
                'AppServerAutoScalingMax',
                Type='String',
                Default='1',
                Description='Application server AutoScalingGroup maximum'),
            'AppServerAutoScalingMax')

        self.ssl_certificate_arn = self.add_parameter(
            Parameter('SSLCertificateARN',
                      Type='String',
                      Description='ARN for a SSL certificate stored in IAM'),
            'SSLCertificateARN')

        self.public_subnets = self.add_parameter(
            Parameter('PublicSubnets',
                      Type='CommaDelimitedList',
                      Description='A list of public subnets'), 'PublicSubnets')

        self.private_subnets = self.add_parameter(
            Parameter('PrivateSubnets',
                      Type='CommaDelimitedList',
                      Description='A list of private subnets'),
            'PrivateSubnets')

        self.vpc_id = self.add_parameter(
            Parameter('VpcId', Type='String', Description='VPC ID'), 'VpcId')

        self.notification_topic_arn = self.add_parameter(
            Parameter(
                'GlobalNotificationsARN',
                Type='String',
                Description='ARN for an SNS topic to broadcast notifications'),
            'GlobalNotificationsARN')

        app_server_lb_security_group, \
            app_server_security_group = self.create_security_groups()
        app_server_lb = self.create_load_balancer(app_server_lb_security_group)

        self.create_auto_scaling_resources(app_server_security_group,
                                           app_server_lb)

        self.create_cloud_watch_resources(app_server_lb)

        self.add_output(
            Output('AppServerLoadBalancerEndpoint',
                   Value=GetAtt(app_server_lb, 'DNSName')))
        self.add_output(
            Output('AppServerLoadBalancerHostedZoneNameID',
                   Value=GetAtt(app_server_lb, 'CanonicalHostedZoneNameID')))
コード例 #7
0
                                    'Effect': 'Allow',
                                    'Resource': '*'
                                }, {
                                    'Action': 'cloudfront:CreateInvalidation',
                                    'Effect': 'Allow',
                                    'Resource': '*'
                                }]
                            })
             ]))

# ==================================================
# Outputs.
# ==================================================
template.add_output(
    Output('BucketName',
           Description='The S3 bucket name',
           Value=bucket_name_variable))

template.add_output(
    Output('CloudFrontId',
           Description='The ID of the CloudFront distribution',
           Value=Ref(distribution_resource)))

template.add_output(
    Output('CloudFrontDomain',
           Description='The domain name of the CloudFront distribution',
           Value=GetAtt(distribution_resource, 'DomainName')))

# ==================================================
# Print the generated template in JSON.
# ==================================================
コード例 #8
0
        ],
        KeySchema=[
            KeySchema(AttributeName=Ref(tableIndexName), KeyType="HASH")
        ],
        ProvisionedThroughput=ProvisionedThroughput(
            ReadCapacityUnits=Ref(readunits),
            WriteCapacityUnits=Ref(writeunits)),
        GlobalSecondaryIndexes=[
            GlobalSecondaryIndex(
                IndexName="SecondaryIndex",
                KeySchema=[
                    KeySchema(AttributeName=Ref(secondaryIndexHashName),
                              KeyType="HASH"),
                    KeySchema(AttributeName=Ref(secondaryIndexRangeName),
                              KeyType="RANGE")
                ],
                Projection=Projection(ProjectionType="ALL"),
                ProvisionedThroughput=ProvisionedThroughput(
                    ReadCapacityUnits=Ref(readunits),
                    WriteCapacityUnits=Ref(writeunits)))
        ]))

template.add_output(
    Output(
        "GSITable",
        Value=Ref(GSITable),
        Description="Table with a Global Secondary Index",
    ))

print(template.to_json())
コード例 #9
0
    def create_bastion_host(self):

        template = Template()
        template.add_version('2010-09-09')

        # Wordpress preparation: format vpc name and dump public subnets in a separate list

        vpc_name_formatted = ''.join(
            e for e in self.bastion_vpc_name if e.isalnum()).capitalize()
        
        filter_public_subnets = filter(lambda x : x["type"] == "public", self.bastion_vpc_subnets)

        public_subnets = []
        for subnet in filter_public_subnets:
            subnet_name_formatted = ''.join(e for e in subnet["name"] if e.isalnum()).capitalize()

            public_subnets.append(ImportValue("{}{}{}SubnetId".format(self.stage, vpc_name_formatted, subnet_name_formatted)))

        bastion_host_security_group = template.add_resource(
            SecurityGroup(
                "{}BastionHostSecurityGroup".format(self.stage),
                GroupName=self.security_group_name,
                GroupDescription="Enables external ssh access to the bastion host",
                VpcId=ImportValue("{}{}VpcId".format(self.stage,vpc_name_formatted)),
                SecurityGroupIngress=[
                    SecurityGroupRule(
                        IpProtocol="tcp",
                        FromPort="22",
                        ToPort="22",
                        CidrIp="0.0.0.0/0"
                    )
                ]
            )
        )

        template.add_resource(
            Instance(
                "{}BastionHost".format(self.stage),
                Tags=Tags(
                    Name=self.instance_name
                ),
                SecurityGroupIds=[Ref(bastion_host_security_group)],
                InstanceType=self.instance_type,
                ImageId=self.instance_ami,
                KeyName=self.key_name,
                SubnetId=next(iter(public_subnets))
            )
        )

        template.add_output(
            [
                Output(
                    "{}BastionHostSecurityGroupID".format(self.stage),
                    Description="Group ID of the security group",
                    Value=Ref(bastion_host_security_group),
                    Export=Export("{}BastionHostSecurityGroupID".format(self.stage))
                )
            ]
        )

        f = open("modules/template_bastion_host.yaml", 'w')
        print(template.to_yaml(), file=f)
コード例 #10
0
        ]),
        Roles=[Ref("Role")]))

t.add_resource(
    ec2.Instance("KubernetesNodes",
                 ImageId="ami-8ec0e1f4",
                 UserData=ud,
                 InstanceType=Ref("InstanceType"),
                 KeyName=Ref("KeyPair"),
                 IamInstanceProfile=Ref("NodesInstanceProfile"),
                 NetworkInterfaces=[
                     ec2.NetworkInterfaceProperty(
                         GroupSet=[Ref("NodesSecurityGroup")],
                         AssociatePublicIpAddress='false',
                         SubnetId="subnet-d1c1d09a",
                         DeviceIndex='0',
                     )
                 ]))

t.add_output(
    Output(
        "WebUrl",
        Description="Application endpoint",
        Value=Join("", [
            "http://",
            GetAtt("LoadBalancer", "DNSName"), ":", ApplicationPort
        ]),
    ))

print t.to_json()
コード例 #11
0
                     Name="ApplicationElasticLB",
                     Scheme="internet-facing",
                     Subnets=[x["SubnetId"] for x in subnets]))

t.add_resource(
    elb.Listener("Listener",
                 LoadBalancerArn=Ref(ApplicationElasticLB),
                 Port=80,
                 Protocol="HTTP",
                 DefaultActions=[
                     elb.Action(Type="forward",
                                TargetGroupArn=Ref(targetGroup))
                 ]))

t.add_output([
    Output("LoadbalancerArn", Value=Ref(ApplicationElasticLB)),
    Output("LoadbalancerDNSName",
           Value=GetAtt(ApplicationElasticLB, 'DNSName')),
    Output("AppFunctionArn", Value=GetAtt(app_function, "Arn")),
])

open("cloud.json", "w").write(t.to_json())

cf = boto3.client('cloudformation')
print("Validating template")
cf.validate_template(TemplateBody=t.to_json())

stack_name = app_name if args.stack_name == None else args.stack_name

# Using the filter functions on describe_stacks makes it fail when there's zero entries...
print("Checking existing CloudFormation stacks")
コード例 #12
0
ファイル: tiler_api_stack.py プロジェクト: hotosm/oam-server
    MetricName='HTTPCode_Backend_5XX',
    Namespace='AWS/ELB',
    Dimensions=[
        cw.MetricDimension(
            'metricLoadBalancerName',
            Name='LoadBalancerName',
            Value=Ref(tiler_load_balancer)
        )
    ],
))

#
# Outputs
#
t.add_output([
    Output('ServerLoadBalancerEndpoint',
           Description='Server load balancer server endpoint',
           Value=GetAtt(tiler_load_balancer, 'DNSName'))
])

if __name__ == '__main__':
    template_json = t.to_json()
    file_name = __file__.replace('.py', '.json')

    validate_cloudformation_template(template_json)

    with open(file_name, 'w') as f:
        f.write(template_json)

    print('Template validated and written to %s' % file_name)
コード例 #13
0
ファイル: elb.py プロジェクト: MIams-REISys/amazonia
    def __init__(self, title, template, network_config, elb_config):
        """
        Public Class to create an Elastic Loadbalancer in the unit stack environment
        AWS Cloud Formation: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-elb.html
        Troposphere: https://github.com/cloudtools/troposphere/blob/master/troposphere/elasticloadbalancing.py
        :param title: Name of the Cloud formation stack object
        :param template: The troposphere template to add the Elastic Loadbalancer to.
        :param network_config: object containing network related variables
        :param elb_config: object containing elb related variables, including list of listeners (elb_listener_config)
        """
        self.title = title + 'Elb'
        self.elb_r53 = None
        self.elb_config = elb_config
        self.network_config = network_config
        super(Elb, self).__init__(vpc=network_config.vpc,
                                  title=title,
                                  template=template)
        elb_listeners = elb_config.elb_listeners_config
        subnets = network_config.public_subnets if elb_config.public_unit is True else network_config.private_subnets
        # Create Tags
        tags = Tags(Name=self.title)
        tags += Tags(owner=elb_config.owner)

        # Create ELB
        self.trop_elb = self.template.add_resource(
            elb.LoadBalancer(
                self.title,
                CrossZone=True,
                HealthCheck=elb.HealthCheck(
                    Target=elb_config.elb_health_check,
                    HealthyThreshold=elb_config.healthy_threshold,
                    UnhealthyThreshold=elb_config.unhealthy_threshold,
                    Interval=elb_config.interval,
                    Timeout=elb_config.timeout),
                Listeners=[
                    elb.Listener(
                        LoadBalancerPort=elb_listener.loadbalancer_port,
                        Protocol=elb_listener.loadbalancer_protocol,
                        InstancePort=elb_listener.instance_port,
                        InstanceProtocol=elb_listener.instance_protocol)
                    for elb_listener in elb_listeners
                ],
                Scheme='internet-facing'
                if elb_config.public_unit is True else 'internal',
                SecurityGroups=[self.security_group],
                Subnets=subnets,
                Tags=tags))
        if network_config.get_depends_on():
            self.trop_elb.DependsOn = network_config.get_depends_on()

        # App sticky session cookies
        sticky_app_cookie_policies = []
        # sticky_app_cookie defaults to None, gather listeners that have cookies
        listeners_with_cookies = [
            listener for listener in elb_listeners
            if listener.sticky_app_cookie
        ]

        for listener_num, listener in enumerate(listeners_with_cookies):
            policy_name = self.title + 'AppCookiePolicy' + listener.sticky_app_cookie \
                          + str(listener.instance_port) + str(listener.loadbalancer_port) \
                          + str(listener.instance_protocol)

            sticky_app_cookie_policies.append(
                elb.AppCookieStickinessPolicy(
                    CookieName=listener.sticky_app_cookie,
                    PolicyName=policy_name))

            # Even though ELB.Listeners.PolicyNames is a List in the cloudformation documentation,
            # it only accepts a single list element, not multiple...
            self.trop_elb.Listeners[listener_num].PolicyNames = [policy_name]

        if sticky_app_cookie_policies:
            self.trop_elb.AppCookieStickinessPolicy = sticky_app_cookie_policies

        # Create SSL for Listeners
        for listener in self.trop_elb.Listeners:
            if elb_config.ssl_certificate_id and listener.Protocol == 'HTTPS':
                listener.SSLCertificateId = elb_config.ssl_certificate_id

        # Create ELB Log Bucket
        if elb_config.elb_log_bucket:
            self.trop_elb.AccessLoggingPolicy = elb.AccessLoggingPolicy(
                EmitInterval='60',
                Enabled=True,
                S3BucketName=elb_config.elb_log_bucket,
                S3BucketPrefix=Join('',
                                    [Ref('AWS::StackName'), '-', self.title]))

        if not elb_config.public_unit:
            self.create_r53_record(network_config.private_hosted_zone_domain)
        elif network_config.public_hosted_zone_name:
            self.create_r53_record(network_config.public_hosted_zone_name)

        else:
            self.template.add_output(
                Output(self.trop_elb.title,
                       Description='URL of the {0} ELB'.format(self.title),
                       Value=Join(
                           '', ['http://',
                                GetAtt(self.trop_elb, 'DNSName')])))

        self.network_config.endpoints[title] = GetAtt(self.trop_elb, 'DNSName')
コード例 #14
0
    SubnetRouteTableAssociation("SubnetRoutetablePublic2Assoc",
                                RouteTableId=Ref(routetablepublic),
                                SubnetId=Ref(subnetpublic2)))

subnetroutetableprivate1assoc = t.add_resource(
    SubnetRouteTableAssociation("SubnetRoutetablePrivate1Assoc",
                                RouteTableId=Ref(routetableprivate),
                                SubnetId=Ref(subnetprivate1)))

subnetroutetableprivate2assoc = t.add_resource(
    SubnetRouteTableAssociation("SubnetRoutetablePrivate2Assoc",
                                RouteTableId=Ref(routetableprivate),
                                SubnetId=Ref(subnetprivate2)))

t.add_output([
    Output("VpcId", Description="Newly Created Vpc Id", Value=Ref(vpc)),
    Output("PublicRoutable",
           Description="Newly Created Public Roubtable",
           Value=Ref(routetablepublic)),
    Output("PrivateRoutable",
           Description="Newly Created Private Roubtable",
           Value=Ref(routetableprivate)),
    Output("PublicNetworkACL",
           Description="Newly Created Public NetworkAcl",
           Value=Ref(networkaclpublic)),
    Output("PrivateNetworkACL",
           Description="Newly Created Private NetworkAcl",
           Value=Ref(networkaclprivate)),
    Output("InternetGateway",
           Description="Newly Created Internet Gateway",
           Value=Ref(internetgateway)),
コード例 #15
0
                                    'Resource': [
                                        GetAtt(default_queue_resource, 'Arn'),
                                        GetAtt(notifications_queue_resource,
                                               'Arn'),
                                        GetAtt(search_queue_resource, 'Arn')
                                    ]
                                }]
                            })
             ]))

# ==================================================
# Outputs.
# ==================================================
template.add_output(
    Output('BucketName',
           Description='The S3 bucket name',
           Value=bucket_name_variable))

template.add_output(
    Output('DefaultQueue',
           Description='The URI of the default queue',
           Value=Ref(default_queue_resource)))

template.add_output(
    Output('NotificationsQueue',
           Description='The URI of the notifications queue',
           Value=Ref(notifications_queue_resource)))

template.add_output(
    Output('SearchQueue',
           Description='The URI of the search queue',
コード例 #16
0
def create_template():
    template = Template(Description=(
        "Static website hosted with S3 and CloudFront. "
        "https://github.com/schlarpc/overengineered-cloudfront-s3-static-website"
    ))

    partition_config = add_mapping(
        template,
        "PartitionConfig",
        {
            "aws": {
                # the region with the control plane for CloudFront, IAM, Route 53, etc
                "PrimaryRegion":
                "us-east-1",
                # assume that Lambda@Edge replicates to all default enabled regions, and that
                # future regions will be opt-in. generated with AWS CLI:
                # aws ec2 describe-regions --all-regions --query "Regions[?OptInStatus=='opt-in-not-required'].RegionName|sort(@)"
                "DefaultRegions": [
                    "ap-northeast-1",
                    "ap-northeast-2",
                    "ap-northeast-3",
                    "ap-south-1",
                    "ap-southeast-1",
                    "ap-southeast-2",
                    "ca-central-1",
                    "eu-central-1",
                    "eu-north-1",
                    "eu-west-1",
                    "eu-west-2",
                    "eu-west-3",
                    "sa-east-1",
                    "us-east-1",
                    "us-east-2",
                    "us-west-1",
                    "us-west-2",
                ],
            },
            # this doesn't actually work, because Lambda@Edge isn't supported in aws-cn
            "aws-cn": {
                "PrimaryRegion": "cn-north-1",
                "DefaultRegions": ["cn-north-1", "cn-northwest-1"],
            },
        },
    )

    acm_certificate_arn = template.add_parameter(
        Parameter(
            "AcmCertificateArn",
            Description=
            "Existing ACM certificate to use for serving TLS. Overrides HostedZoneId.",
            Type="String",
            AllowedPattern="(arn:[^:]+:acm:[^:]+:[^:]+:certificate/.+|)",
            Default="",
        ))

    hosted_zone_id = template.add_parameter(
        Parameter(
            "HostedZoneId",
            Description=
            "Existing Route 53 zone to use for validating a new TLS certificate.",
            Type="String",
            AllowedPattern="(Z[A-Z0-9]+|)",
            Default="",
        ))

    dns_names = template.add_parameter(
        Parameter(
            "DomainNames",
            Description=
            "Comma-separated list of additional domain names to serve.",
            Type="CommaDelimitedList",
            Default="",
        ))

    tls_protocol_version = template.add_parameter(
        Parameter(
            "TlsProtocolVersion",
            Description=
            "CloudFront TLS security policy; see https://amzn.to/2DR91Xq for details.",
            Type="String",
            Default="TLSv1.2_2019",
        ))

    log_retention_days = template.add_parameter(
        Parameter(
            "LogRetentionDays",
            Description=
            "Days to keep CloudFront, S3, and Lambda logs. 0 means indefinite retention.",
            Type="Number",
            AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS,
            Default=365,
        ))

    default_ttl_seconds = template.add_parameter(
        Parameter(
            "DefaultTtlSeconds",
            Description="Cache time-to-live when not set by S3 object headers.",
            Type="Number",
            Default=int(datetime.timedelta(minutes=5).total_seconds()),
        ))

    enable_price_class_hack = template.add_parameter(
        Parameter(
            "EnablePriceClassHack",
            Description="Cut your bill in half with this one weird trick.",
            Type="String",
            Default="false",
            AllowedValues=["true", "false"],
        ))

    retention_defined = add_condition(template, "RetentionDefined",
                                      Not(Equals(Ref(log_retention_days), 0)))

    using_price_class_hack = add_condition(
        template, "UsingPriceClassHack",
        Equals(Ref(enable_price_class_hack), "true"))

    using_acm_certificate = add_condition(
        template, "UsingAcmCertificate",
        Not(Equals(Ref(acm_certificate_arn), "")))

    using_hosted_zone = add_condition(template, "UsingHostedZone",
                                      Not(Equals(Ref(hosted_zone_id), "")))

    using_certificate = add_condition(
        template,
        "UsingCertificate",
        Or(Condition(using_acm_certificate), Condition(using_hosted_zone)),
    )

    should_create_certificate = add_condition(
        template,
        "ShouldCreateCertificate",
        And(Condition(using_hosted_zone),
            Not(Condition(using_acm_certificate))),
    )

    using_dns_names = add_condition(template, "UsingDnsNames",
                                    Not(Equals(Select(0, Ref(dns_names)), "")))

    is_primary_region = "IsPrimaryRegion"
    template.add_condition(
        is_primary_region,
        Equals(Region, FindInMap(partition_config, Partition,
                                 "PrimaryRegion")),
    )

    precondition_region_is_primary = template.add_resource(
        WaitConditionHandle(
            "PreconditionIsPrimaryRegionForPartition",
            Condition=is_primary_region,
        ))

    log_ingester_dlq = template.add_resource(
        Queue(
            "LogIngesterDLQ",
            MessageRetentionPeriod=int(
                datetime.timedelta(days=14).total_seconds()),
            KmsMasterKeyId="alias/aws/sqs",
        ))

    log_ingester_role = template.add_resource(
        Role(
            "LogIngesterRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="DLQPolicy",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[sqs.SendMessage],
                                Resource=[GetAtt(log_ingester_dlq, "Arn")],
                            )
                        ],
                    ),
                )
            ],
        ))

    log_ingester = template.add_resource(
        Function(
            "LogIngester",
            Runtime=PYTHON_RUNTIME,
            Handler="index.{}".format(log_ingest.handler.__name__),
            Code=Code(ZipFile=inspect.getsource(log_ingest)),
            MemorySize=256,
            Timeout=300,
            Role=GetAtt(log_ingester_role, "Arn"),
            DeadLetterConfig=DeadLetterConfig(
                TargetArn=GetAtt(log_ingester_dlq, "Arn")),
        ))

    log_ingester_permission = template.add_resource(
        Permission(
            "LogIngesterPermission",
            FunctionName=GetAtt(log_ingester, "Arn"),
            Action="lambda:InvokeFunction",
            Principal="s3.amazonaws.com",
            SourceAccount=AccountId,
        ))

    log_bucket = template.add_resource(
        Bucket(
            "LogBucket",
            # S3 requires this ACL (regardless of bucket policy) or s3:PutBucketLogging fails.
            # When the CloudFront distribution is created, it adds an additional bucket ACL.
            # That ACL is not possible to model in CloudFormation.
            AccessControl="LogDeliveryWrite",
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                LifecycleRule(ExpirationInDays=1, Status="Enabled"),
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=1),
                    Status="Enabled",
                ),
            ]),
            NotificationConfiguration=NotificationConfiguration(
                LambdaConfigurations=[
                    LambdaConfigurations(Event="s3:ObjectCreated:*",
                                         Function=GetAtt(log_ingester, "Arn"))
                ]),
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            # if we use KMS, we can't read the logs
                            SSEAlgorithm="AES256"))
                ]),
            OwnershipControls=OwnershipControls(Rules=[
                OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred")
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
            DependsOn=[log_ingester_permission],
        ))

    log_ingester_log_group = template.add_resource(
        LogGroup(
            "LogIngesterLogGroup",
            LogGroupName=Join(
                "", ["/aws/lambda/", Ref(log_ingester)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    log_ingester_policy = template.add_resource(
        PolicyType(
            "LogIngesterPolicy",
            Roles=[Ref(log_ingester_role)],
            PolicyName="IngestLogPolicy",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                        Resource=[
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    Region,
                                    AccountId,
                                    "log-group",
                                    "/aws/cloudfront/*",
                                ],
                            ),
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    Region,
                                    AccountId,
                                    "log-group",
                                    "/aws/s3/*",
                                ],
                            ),
                            GetAtt(log_ingester_log_group, "Arn"),
                        ],
                    ),
                    Statement(
                        Effect=Allow,
                        Action=[s3.GetObject],
                        Resource=[Join("", [GetAtt(log_bucket, "Arn"), "/*"])],
                    ),
                ],
            ),
        ))

    bucket = template.add_resource(
        Bucket(
            "ContentBucket",
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                # not supported by CFN yet:
                # LifecycleRule(
                # Transitions=[
                # LifecycleRuleTransition(
                # StorageClass='INTELLIGENT_TIERING',
                # TransitionInDays=1,
                # ),
                # ],
                # Status="Enabled",
                # ),
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=7),
                    Status="Enabled",
                )
            ]),
            LoggingConfiguration=LoggingConfiguration(
                DestinationBucketName=Ref(log_bucket), LogFilePrefix="s3/"),
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            # Origin Access Identities can't use KMS
                            SSEAlgorithm="AES256"))
                ]),
            OwnershipControls=OwnershipControls(Rules=[
                OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred")
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
        ))

    origin_access_identity = template.add_resource(
        CloudFrontOriginAccessIdentity(
            "CloudFrontIdentity",
            CloudFrontOriginAccessIdentityConfig=
            CloudFrontOriginAccessIdentityConfig(
                Comment=GetAtt(bucket, "Arn")),
        ))

    bucket_policy = template.add_resource(
        BucketPolicy(
            "ContentBucketPolicy",
            Bucket=Ref(bucket),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal(
                            "CanonicalUser",
                            GetAtt(origin_access_identity,
                                   "S3CanonicalUserId"),
                        ),
                        Action=[s3.GetObject],
                        Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])],
                    ),
                ],
            ),
        ))

    # Not strictly necessary, as ACLs should take care of this access. However, CloudFront docs
    # state "In some circumstances [...] S3 resets permissions on the bucket to the default value",
    # and this allows logging to work without any ACLs in place.
    log_bucket_policy = template.add_resource(
        BucketPolicy(
            "LogBucketPolicy",
            Bucket=Ref(log_bucket),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "delivery.logs.amazonaws.com"),
                        Action=[s3.PutObject],
                        Resource=[
                            Join(
                                "/",
                                [GetAtt(log_bucket, "Arn"), "cloudfront", "*"])
                        ],
                    ),
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "delivery.logs.amazonaws.com"),
                        Action=[s3.ListBucket],
                        Resource=[Join("/", [GetAtt(log_bucket, "Arn")])],
                    ),
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service", "s3.amazonaws.com"),
                        Action=[s3.PutObject],
                        Resource=[
                            Join("/", [GetAtt(log_bucket, "Arn"), "s3", "*"])
                        ],
                    ),
                ],
            ),
        ))

    certificate_validator_dlq = template.add_resource(
        Queue(
            "CertificateValidatorDLQ",
            MessageRetentionPeriod=int(
                datetime.timedelta(days=14).total_seconds()),
            KmsMasterKeyId="alias/aws/sqs",
            Condition=should_create_certificate,
        ))

    certificate_validator_role = template.add_resource(
        Role(
            "CertificateValidatorRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="DLQPolicy",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[sqs.SendMessage],
                                Resource=[
                                    GetAtt(certificate_validator_dlq, "Arn")
                                ],
                            )
                        ],
                    ),
                )
            ],
            # TODO scope down
            ManagedPolicyArns=[
                "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
                "arn:aws:iam::aws:policy/AmazonRoute53FullAccess",
                "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly",
            ],
            Condition=should_create_certificate,
        ))

    certificate_validator_function = template.add_resource(
        Function(
            "CertificateValidatorFunction",
            Runtime=PYTHON_RUNTIME,
            Handler="index.{}".format(certificate_validator.handler.__name__),
            Code=Code(ZipFile=inspect.getsource(certificate_validator)),
            MemorySize=256,
            Timeout=300,
            Role=GetAtt(certificate_validator_role, "Arn"),
            DeadLetterConfig=DeadLetterConfig(
                TargetArn=GetAtt(certificate_validator_dlq, "Arn")),
            Environment=Environment(
                Variables={
                    certificate_validator.EnvVars.HOSTED_ZONE_ID.name:
                    Ref(hosted_zone_id)
                }),
            Condition=should_create_certificate,
        ))

    certificate_validator_log_group = template.add_resource(
        LogGroup(
            "CertificateValidatorLogGroup",
            LogGroupName=Join(
                "", ["/aws/lambda/",
                     Ref(certificate_validator_function)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
            Condition=should_create_certificate,
        ))

    certificate_validator_rule = template.add_resource(
        Rule(
            "CertificateValidatorRule",
            EventPattern={
                "detail-type": ["AWS API Call via CloudTrail"],
                "detail": {
                    "eventSource": ["acm.amazonaws.com"],
                    "eventName": ["AddTagsToCertificate"],
                    "requestParameters": {
                        "tags": {
                            "key": [certificate_validator_function.title],
                            "value":
                            [GetAtt(certificate_validator_function, "Arn")],
                        }
                    },
                },
            },
            Targets=[
                Target(
                    Id="certificate-validator-lambda",
                    Arn=GetAtt(certificate_validator_function, "Arn"),
                )
            ],
            DependsOn=[certificate_validator_log_group],
            Condition=should_create_certificate,
        ))

    certificate_validator_permission = template.add_resource(
        Permission(
            "CertificateValidatorPermission",
            FunctionName=GetAtt(certificate_validator_function, "Arn"),
            Action="lambda:InvokeFunction",
            Principal="events.amazonaws.com",
            SourceArn=GetAtt(certificate_validator_rule, "Arn"),
            Condition=should_create_certificate,
        ))

    certificate = template.add_resource(
        Certificate(
            "Certificate",
            DomainName=Select(0, Ref(dns_names)),
            SubjectAlternativeNames=Ref(
                dns_names),  # duplicate first name works fine
            ValidationMethod="DNS",
            Tags=Tags(
                **{
                    certificate_validator_function.title:
                    GetAtt(certificate_validator_function, "Arn")
                }),
            DependsOn=[certificate_validator_permission],
            Condition=should_create_certificate,
        ))

    edge_hook_role = template.add_resource(
        Role(
            "EdgeHookRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal(
                            "Service",
                            [
                                "lambda.amazonaws.com",
                                "edgelambda.amazonaws.com"
                            ],
                        ),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
        ))

    edge_hook_function = template.add_resource(
        Function(
            "EdgeHookFunction",
            Runtime=PYTHON_RUNTIME,
            Handler="index.handler",
            Code=Code(ZipFile=inspect.getsource(edge_hook)),
            MemorySize=128,
            Timeout=3,
            Role=GetAtt(edge_hook_role, "Arn"),
        ))
    edge_hook_function_hash = (hashlib.sha256(
        json.dumps(edge_hook_function.to_dict(),
                   sort_keys=True).encode("utf-8")).hexdigest()[:10].upper())

    edge_hook_version = template.add_resource(
        Version(
            "EdgeHookVersion" + edge_hook_function_hash,
            FunctionName=GetAtt(edge_hook_function, "Arn"),
        ))

    replica_log_group_name = Join(
        "/",
        [
            "/aws/lambda",
            Join(
                ".",
                [
                    FindInMap(partition_config, Partition, "PrimaryRegion"),
                    Ref(edge_hook_function),
                ],
            ),
        ],
    )

    edge_hook_role_policy = template.add_resource(
        PolicyType(
            "EdgeHookRolePolicy",
            PolicyName="write-logs",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                        Resource=[
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    "*",
                                    AccountId,
                                    "log-group",
                                    replica_log_group_name,
                                    "log-stream",
                                    "*",
                                ],
                            ),
                        ],
                    ),
                ],
            ),
            Roles=[Ref(edge_hook_role)],
        ))

    stack_set_administration_role = template.add_resource(
        Role(
            "StackSetAdministrationRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "cloudformation.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    ),
                ],
            ),
        ))

    stack_set_execution_role = template.add_resource(
        Role(
            "StackSetExecutionRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal(
                            "AWS", GetAtt(stack_set_administration_role,
                                          "Arn")),
                        Action=[sts.AssumeRole],
                    ),
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="create-stackset-instances",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.DescribeStacks,
                                    logs.DescribeLogGroups,
                                ],
                                Resource=["*"],
                            ),
                            # stack instances communicate with the CFN service via SNS
                            Statement(
                                Effect=Allow,
                                Action=[sns.Publish],
                                NotResource=[
                                    Join(
                                        ":",
                                        [
                                            "arn", Partition, "sns", "*",
                                            AccountId, "*"
                                        ],
                                    )
                                ],
                            ),
                            Statement(
                                Effect=Allow,
                                Action=[
                                    logs.CreateLogGroup,
                                    logs.DeleteLogGroup,
                                    logs.PutRetentionPolicy,
                                    logs.DeleteRetentionPolicy,
                                ],
                                Resource=[
                                    Join(
                                        ":",
                                        [
                                            "arn",
                                            Partition,
                                            "logs",
                                            "*",
                                            AccountId,
                                            "log-group",
                                            replica_log_group_name,
                                            "log-stream",
                                            "",
                                        ],
                                    ),
                                ],
                            ),
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.CreateStack,
                                    cloudformation.DeleteStack,
                                    cloudformation.UpdateStack,
                                ],
                                Resource=[
                                    Join(
                                        ":",
                                        [
                                            "arn",
                                            Partition,
                                            "cloudformation",
                                            "*",
                                            AccountId,
                                            Join(
                                                "/",
                                                [
                                                    "stack",
                                                    Join(
                                                        "-",
                                                        [
                                                            "StackSet",
                                                            StackName, "*"
                                                        ],
                                                    ),
                                                ],
                                            ),
                                        ],
                                    )
                                ],
                            ),
                        ],
                    ),
                ),
            ],
        ))

    stack_set_administration_role_policy = template.add_resource(
        PolicyType(
            "StackSetAdministrationRolePolicy",
            PolicyName="assume-execution-role",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[sts.AssumeRole],
                        Resource=[GetAtt(stack_set_execution_role, "Arn")],
                    ),
                ],
            ),
            Roles=[Ref(stack_set_administration_role)],
        ))

    edge_log_groups = template.add_resource(
        StackSet(
            "EdgeLambdaLogGroupStackSet",
            AdministrationRoleARN=GetAtt(stack_set_administration_role, "Arn"),
            ExecutionRoleName=Ref(stack_set_execution_role),
            StackSetName=Join("-", [StackName, "EdgeLambdaLogGroup"]),
            PermissionModel="SELF_MANAGED",
            Description="Multi-region log groups for Lambda@Edge replicas",
            Parameters=[
                StackSetParameter(
                    ParameterKey="LogGroupName",
                    ParameterValue=replica_log_group_name,
                ),
                StackSetParameter(
                    ParameterKey="LogRetentionDays",
                    ParameterValue=Ref(log_retention_days),
                ),
            ],
            OperationPreferences=OperationPreferences(
                FailureToleranceCount=0,
                MaxConcurrentPercentage=100,
            ),
            StackInstancesGroup=[
                StackInstances(
                    DeploymentTargets=DeploymentTargets(Accounts=[AccountId]),
                    Regions=FindInMap(partition_config, Partition,
                                      "DefaultRegions"),
                )
            ],
            TemplateBody=create_log_group_template().to_json(indent=None),
            DependsOn=[stack_set_administration_role_policy],
        ))

    price_class_distribution = template.add_resource(
        Distribution(
            "PriceClassDistribution",
            DistributionConfig=DistributionConfig(
                Comment="Dummy distribution used for price class hack",
                DefaultCacheBehavior=DefaultCacheBehavior(
                    TargetOriginId="default",
                    ViewerProtocolPolicy="allow-all",
                    ForwardedValues=ForwardedValues(QueryString=False),
                ),
                Enabled=True,
                Origins=[
                    Origin(Id="default",
                           DomainName=GetAtt(bucket, "DomainName"))
                ],
                IPV6Enabled=True,
                ViewerCertificate=ViewerCertificate(
                    CloudFrontDefaultCertificate=True),
                PriceClass="PriceClass_All",
            ),
            Condition=using_price_class_hack,
        ))

    distribution = template.add_resource(
        Distribution(
            "ContentDistribution",
            DistributionConfig=DistributionConfig(
                Enabled=True,
                Aliases=If(using_dns_names, Ref(dns_names), NoValue),
                Logging=Logging(Bucket=GetAtt(log_bucket, "DomainName"),
                                Prefix="cloudfront/"),
                DefaultRootObject="index.html",
                Origins=[
                    Origin(
                        Id="default",
                        DomainName=GetAtt(bucket, "DomainName"),
                        S3OriginConfig=S3OriginConfig(
                            OriginAccessIdentity=Join(
                                "",
                                [
                                    "origin-access-identity/cloudfront/",
                                    Ref(origin_access_identity),
                                ],
                            )),
                    )
                ],
                DefaultCacheBehavior=DefaultCacheBehavior(
                    TargetOriginId="default",
                    Compress=True,
                    ForwardedValues=ForwardedValues(QueryString=False),
                    ViewerProtocolPolicy="redirect-to-https",
                    DefaultTTL=Ref(default_ttl_seconds),
                    LambdaFunctionAssociations=[
                        LambdaFunctionAssociation(
                            EventType="origin-request",
                            LambdaFunctionARN=Ref(edge_hook_version),
                        )
                    ],
                ),
                HttpVersion="http2",
                IPV6Enabled=True,
                ViewerCertificate=ViewerCertificate(
                    AcmCertificateArn=If(
                        using_acm_certificate,
                        Ref(acm_certificate_arn),
                        If(using_hosted_zone, Ref(certificate), NoValue),
                    ),
                    SslSupportMethod=If(using_certificate, "sni-only",
                                        NoValue),
                    CloudFrontDefaultCertificate=If(using_certificate, NoValue,
                                                    True),
                    MinimumProtocolVersion=Ref(tls_protocol_version),
                ),
                PriceClass=If(using_price_class_hack, "PriceClass_100",
                              "PriceClass_All"),
            ),
            DependsOn=[
                bucket_policy,
                log_ingester_policy,
                edge_log_groups,
                precondition_region_is_primary,
            ],
        ))

    distribution_log_group = template.add_resource(
        LogGroup(
            "DistributionLogGroup",
            LogGroupName=Join(
                "", ["/aws/cloudfront/", Ref(distribution)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    bucket_log_group = template.add_resource(
        LogGroup(
            "BucketLogGroup",
            LogGroupName=Join("", ["/aws/s3/", Ref(bucket)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    template.add_output(Output("DistributionId", Value=Ref(distribution)))

    template.add_output(
        Output("DistributionDomain", Value=GetAtt(distribution, "DomainName")))

    template.add_output(
        Output(
            "DistributionDnsTarget",
            Value=If(
                using_price_class_hack,
                GetAtt(price_class_distribution, "DomainName"),
                GetAtt(distribution, "DomainName"),
            ),
        ))

    template.add_output(
        Output(
            "DistributionUrl",
            Value=Join("",
                       ["https://",
                        GetAtt(distribution, "DomainName"), "/"]),
        ))

    template.add_output(Output("ContentBucketArn", Value=GetAtt(bucket,
                                                                "Arn")))

    return template
コード例 #17
0
# Converted from S3_Bucket.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/

from troposphere import Output, Ref, Template
from troposphere.s3 import Bucket, PublicRead

t = Template()

t.add_description(
    "AWS CloudFormation Sample Template S3_Bucket: Sample template showing "
    "how to create a publicly accessible S3 bucket. "
    "**WARNING** This template creates an Amazon S3 Bucket. "
    "You will be billed for the AWS resources used if you create "
    "a stack from this template.")

s3bucket = t.add_resource(Bucket(
    "S3Bucket",
    AccessControl=PublicRead,
))

t.add_output(
    Output("BucketName",
           Value=Ref(s3bucket),
           Description="Name of S3 bucket to hold website content"))

print(t.to_json())
コード例 #18
0
ファイル: s3_user.py プロジェクト: gtie/tropostack
 def o_bucket_arn(self):
     _id = 'BucketArn'
     return Output(_id,
                   Description='The ARN of the S3 bucket',
                   Value=GetAtt(self.r_bucket, 'Arn'),
                   Export=Export(Sub("${AWS::StackName}-%s" % _id)))
コード例 #19
0
ファイル: resources.py プロジェクト: LizRuelas/lambdachat
def generate_cf_template():
    """
    Returns an entire CloudFormation stack by using troposphere to construct
    each piece
    """
    # Header of CloudFormation template
    t = Template()
    t.add_version("2010-09-09")
    t.add_description("Lambda Chat AWS Resources")
    # Paramters
    description = "should match [0-9]+-[a-z0-9]+.apps.googleusercontent.com"
    google_oauth_client_id = t.add_parameter(Parameter(
        "GoogleOAuthClientID",
        AllowedPattern="[0-9]+-[a-z0-9]+.apps.googleusercontent.com",
        Type="String",
        Description="The Client ID of your Google project",
        ConstraintDescription=description
    ))

    website_s3_bucket_name = t.add_parameter(Parameter(
        "WebsiteS3BucketName",
        AllowedPattern="[a-zA-Z0-9\-]*",
        Type="String",
        Description="Name of S3 bucket to store the website in",
        ConstraintDescription="can contain only alphanumeric characters and dashes.",
    ))

    # The SNS topic the website will publish chat messages to
    website_sns_topic = t.add_resource(sns.Topic(
        'WebsiteSnsTopic',
        TopicName='lambda-chat',
        DisplayName='Lambda Chat'
    ))
    t.add_output(Output(
        "WebsiteSnsTopic",
        Description="sns_topic_arn",
        Value=Ref(website_sns_topic),
    ))

    # The IAM Role and Policy the website will assume to publish to SNS
    website_role = t.add_resource(iam.Role(
        "WebsiteRole",
        Path="/",
        AssumeRolePolicyDocument=Policy(
            Statement=[
                Statement(
                    Effect=Allow,
                    Action=[Action("sts", "AssumeRoleWithWebIdentity")],
                    Principal=Principal("Federated", "accounts.google.com"),
                    Condition=Condition(
                        StringEquals(
                            "accounts.google.com:aud",
                            Ref(google_oauth_client_id)
                        )
                    ),
                ),
            ],
        ),
    ))
    t.add_resource(iam.PolicyType(
        "WebsitePolicy",
        PolicyName="lambda-chat-website-policy",
        Roles=[Ref(website_role)],
        PolicyDocument=Policy(
            Version="2012-10-17",
            Statement=[
                Statement(
                    Effect=Allow,
                    Action=[Action("sns", "Publish")],
                    Resource=[
                        Ref(website_sns_topic)
                    ],
                ),
            ],
        )
    ))
    t.add_output(Output(
        "WebsiteRole",
        Description="website_iam_role_arn",
        Value=GetAtt(website_role, "Arn"),
    ))

    website_bucket = t.add_resource(s3.Bucket(
        'WebsiteS3Bucket',
        BucketName=Ref(website_s3_bucket_name),
        WebsiteConfiguration=s3.WebsiteConfiguration(
            ErrorDocument="error.html",
            IndexDocument="index.html"
        )
    ))
    t.add_output(Output(
        "S3Bucket",
        Description="s3_bucket",
        Value=Ref(website_bucket),
    ))
    t.add_resource(s3.BucketPolicy(
        'WebsiteS3BucketPolicy',
        Bucket=Ref(website_bucket),
        PolicyDocument={
            "Version": "2012-10-17",
            "Statement": [
                {
                    "Sid": "PublicAccess",
                    "Effect": "Allow",
                    "Principal": "*",
                    "Action": ["s3:GetObject"],
                    "Resource": [{
                        "Fn::Join": [
                            "",
                            [
                                "arn:aws:s3:::",
                                {
                                    "Ref": "WebsiteS3Bucket",
                                },
                                "/*"
                            ]
                        ]
                    }]
                }
            ]
        }
    ))

    return t
コード例 #20
0
ファイル: s3_user.py プロジェクト: gtie/tropostack
 def o_username(self):
     _id = 'UserName'
     return Output(_id,
                   Description='Username of the created bot account',
                   Value=self.r_iam_user.ref(),
                   Export=Export(Sub("${AWS::StackName}-%s" % _id)))
コード例 #21
0
    def set_up_stack(self):
        super(Worker, self).set_up_stack()

        tags = self.get_input('Tags').copy()
        tags.update({'StackType': 'Worker'})

        self.default_tags = tags
        self.region = self.get_input('Region')

        self.add_description('Worker stack for MMW')

        # Parameters
        self.color = self.add_parameter(Parameter(
            'StackColor', Type='String',
            Description='Stack color', AllowedValues=['Blue', 'Green']
        ), 'StackColor')

        self.keyname = self.add_parameter(Parameter(
            'KeyName', Type='String',
            Description='Name of an existing EC2 key pair'
        ), 'KeyName')

        self.ip_access = self.add_parameter(Parameter(
            'IPAccess', Type='String', Default=self.get_input('IPAccess'),
            Description='CIDR for allowing SSH access'
        ), 'IPAccess')

        self.availability_zones = self.add_parameter(Parameter(
            'AvailabilityZones', Type='CommaDelimitedList',
            Description='Comma delimited list of availability zones'
        ), 'AvailabilityZones')

        self.rds_password = self.add_parameter(Parameter(
            'RDSPassword', Type='String', NoEcho=True,
            Description='Database password',
        ), 'RDSPassword')

        self.worker_instance_type = self.add_parameter(Parameter(
            'WorkerInstanceType', Type='String', Default='t2.micro',
            Description='Worker EC2 instance type',
            AllowedValues=EC2_INSTANCE_TYPES,
            ConstraintDescription='must be a valid EC2 instance type.'
        ), 'WorkerInstanceType')

        self.worker_ami = self.add_parameter(Parameter(
            'WorkerAMI', Type='String',
            Default=self.get_recent_worker_ami(),
            Description='Worker AMI'
        ), 'WorkerAMI')

        self.worker_instance_profile = self.add_parameter(Parameter(
            'WorkerInstanceProfile', Type='String',
            Default='WorkerInstanceProfile',
            Description='Worker instance profile'
        ), 'WorkerInstanceProfile')

        self.worker_auto_scaling_desired = self.add_parameter(Parameter(
            'WorkerAutoScalingDesired', Type='String', Default='2',
            Description='Worker AutoScalingGroup desired'
        ), 'WorkerAutoScalingDesired')

        self.worker_auto_scaling_min = self.add_parameter(Parameter(
            'WorkerAutoScalingMin', Type='String', Default='0',
            Description='Worker AutoScalingGroup minimum'
        ), 'WorkerAutoScalingMin')

        self.worker_auto_scaling_max = self.add_parameter(Parameter(
            'WorkerAutoScalingMax', Type='String', Default='2',
            Description='Worker AutoScalingGroup maximum'
        ), 'WorkerAutoScalingMax')

        self.worker_auto_scaling_schedule_start_recurrence = self.add_parameter(  # NOQA
            Parameter(
                'WorkerAutoScalingScheduleStartRecurrence', Type='String',
                Default='0 12 * * 1-5',
                Description='Worker ASG schedule start recurrence'
            ), 'WorkerAutoScalingScheduleStartRecurrence')

        self.worker_auto_scaling_schedule_start_capacity = self.add_parameter(  # NOQA
            Parameter(
                'WorkerAutoScalingScheduleStartCapacity', Type='String',
                Default='2',
                Description='Worker ASG schedule start capacity'
            ), 'WorkerAutoScalingScheduleStartCapacity')

        self.worker_auto_scaling_schedule_end_recurrence = self.add_parameter(  # NOQA
            Parameter(
                'WorkerAutoScalingScheduleEndRecurrence', Type='String',
                Default='0 0 * * *',
                Description='Worker ASG schedule end recurrence'
            ), 'WorkerAutoScalingScheduleEndRecurrence')

        self.worker_auto_scaling_schedule_end_capacity = self.add_parameter(  # NOQA
            Parameter(
                'WorkerAutoScalingScheduleEndCapacity', Type='String',
                Default='0',
                Description='Worker ASG schedule end capacity'
            ), 'WorkerAutoScalingScheduleEndCapacity')

        self.public_subnets = self.add_parameter(Parameter(
            'PublicSubnets', Type='CommaDelimitedList',
            Description='A list of public subnets'
        ), 'PublicSubnets')

        self.private_subnets = self.add_parameter(Parameter(
            'PrivateSubnets', Type='CommaDelimitedList',
            Description='A list of private subnets'
        ), 'PrivateSubnets')

        self.public_hosted_zone_name = self.add_parameter(Parameter(
            'PublicHostedZoneName', Type='String',
            Description='Route 53 public hosted zone name'
        ), 'PublicHostedZoneName')

        self.vpc_id = self.add_parameter(Parameter(
            'VpcId', Type='String',
            Description='VPC ID'
        ), 'VpcId')

        self.notification_topic_arn = self.add_parameter(Parameter(
            'GlobalNotificationsARN', Type='String',
            Description='ARN for an SNS topic to broadcast notifications'
        ), 'GlobalNotificationsARN')

        self.srat_catchment_api_url = self.add_parameter(Parameter(
            'SRATCatchmentAPIURL', Type='String',
            Description='URL for the SRAT Catchment API'
        ), 'SRATCatchmentAPIURL')

        self.srat_catchment_api_key = self.add_parameter(Parameter(
            'SRATCatchmentAPIKey', Type='String', NoEcho=True,
            Description='API key for the SRAT Catchment API'
        ), 'SRATCatchmentAPIKey')

        worker_lb_security_group, \
            worker_security_group = self.create_security_groups()
        worker_lb = self.create_load_balancer(worker_lb_security_group)

        self.create_auto_scaling_resources(
            worker_security_group,
            worker_lb)

        self.create_dns_records(worker_lb)

        self.add_output(Output('WorkerLoadBalancerEndpoint',
                               Value=GetAtt(worker_lb, 'DNSName')))
        self.add_output(Output('WorkerLoadBalancerHostedZoneNameID',
                               Value=GetAtt(worker_lb,
                                            'CanonicalHostedZoneNameID')))
コード例 #22
0
                        "test": {
                            "command": 'echo "$CFNTEST" > text.txt',
                            "env": {
                                "CFNTEST": "I come from config1."
                            },
                            "cwd": "~",
                        }
                    }),
                config2=cloudformation.InitConfig(
                    commands={
                        "test": {
                            "command": 'echo "$CFNTEST" > text.txt',
                            "env": {
                                "CFNTEST": "I come from config2."
                            },
                            "cwd": "~",
                        }
                    }),
            )),
        Tags=Tags(Name="ops.cfninit", env="ops"),
    ))

t.add_output(
    Output(
        "PublicIp",
        Description="Public IP of the newly created EC2 instance",
        Value=GetAtt(ec2_instance, "PublicIp"),
    ))

print(t.to_json())
コード例 #23
0
    ))

# tie the usage plan and key together
usagePlanKey = t.add_resource(
    UsagePlanKey(
        "ExampleUsagePlanKey",
        KeyId=Ref(key),
        KeyType="API_KEY",
        UsagePlanId=Ref(usagePlan),
    ))

# Add the deployment endpoint as an output
t.add_output([
    Output(
        "ApiEndpoint",
        Value=Join(
            "",
            [
                "https://",
                Ref(rest_api),
                ".execute-api.eu-west-1.amazonaws.com/",
                stage_name,
            ],
        ),
        Description="Endpoint for this stage of the api",
    ),
    Output("ApiKey", Value=Ref(key), Description="API key"),
])

print(t.to_json())
コード例 #24
0
ファイル: mock_blueprints.py プロジェクト: synfinatic/stacker
 def create_template(self):
     self.template.add_resource(WaitConditionHandle("Dummy"))
     self.template.add_output(Output("DummyId", Value="dummy-1234"))
     self.template.add_resource(WaitConditionHandle("Dummy2"))
コード例 #25
0
        "ap-northeast-1": {
            "AMI": "ami-dcfa4edd"
        }
    })

instance = t.add_resource(
    Instance(
        "Ec2Instance",
        ImageId=FindInMap("RegionMap", Ref("AWS::Region"), "AMI"),
        InstanceType="m1.small",
    ))

myDNSRecord = t.add_resource(
    RecordSetType(
        "myDNSRecord",
        HostedZoneName=Join("", [Ref(hostedzone), "."]),
        Comment="DNS name for my instance.",
        Name=Join("", [
            Ref(instance), ".",
            Ref("AWS::Region"), ".",
            Ref(hostedzone), "."
        ]),
        Type="A",
        TTL="900",
        ResourceRecords=[GetAtt("Ec2Instance", "PublicIp")],
    ))

t.add_output(Output("DomainName", Value=Ref(myDNSRecord)))

print(t.to_json())
コード例 #26
0
ファイル: mock_blueprints.py プロジェクト: synfinatic/stacker
    def create_template(self):
        t = self.template

        bucket_arn = Sub("arn:aws:s3:::${StackerBucket}*")
        objects_arn = Sub("arn:aws:s3:::${StackerBucket}*/*")
        cloudformation_scope = Sub(
            "arn:aws:cloudformation:*:${AWS::AccountId}:"
            "stack/${StackerNamespace}-*")
        sns_scope = Sub("arn:aws:sns:*:${AWS::AccountId}:"
                        "${StackerNamespace}-*")
        changeset_scope = "*"

        # This represents the precise IAM permissions that stacker itself
        # needs.
        stacker_policy = iam.Policy(
            PolicyName="Stacker",
            PolicyDocument=Policy(Statement=[
                Statement(Effect="Allow",
                          Resource=["*"],
                          Action=[awacs.s3.ListAllMyBuckets]),
                Statement(Effect="Allow",
                          Resource=[bucket_arn],
                          Action=[
                              awacs.s3.ListBucket,
                              awacs.s3.GetBucketLocation,
                              awacs.s3.CreateBucket,
                              awacs.s3.DeleteBucket,
                          ]),
                Statement(Effect="Allow",
                          Resource=[bucket_arn],
                          Action=[
                              awacs.s3.GetObject,
                              awacs.s3.GetObjectAcl,
                              awacs.s3.PutObject,
                              awacs.s3.PutObjectAcl,
                          ]),
                Statement(Effect="Allow",
                          Resource=[objects_arn],
                          Action=[
                              awacs.s3.DeleteObject,
                          ]),
                Statement(Effect="Allow",
                          Resource=[changeset_scope],
                          Action=[
                              awacs.cloudformation.DescribeChangeSet,
                              awacs.cloudformation.ExecuteChangeSet,
                              awacs.cloudformation.DeleteChangeSet,
                          ]),
                Statement(Effect="Deny",
                          Resource=[Ref("AWS::StackId")],
                          Action=[awacs.cloudformation.Action("*")]),
                Statement(
                    Effect="Allow",
                    Resource=[cloudformation_scope],
                    Action=[
                        awacs.cloudformation.GetTemplate, awacs.cloudformation.
                        CreateChangeSet, awacs.cloudformation.DeleteChangeSet,
                        awacs.cloudformation.DeleteStack, awacs.cloudformation.
                        CreateStack, awacs.cloudformation.UpdateStack,
                        awacs.cloudformation.SetStackPolicy,
                        awacs.cloudformation.DescribeStacks,
                        awacs.cloudformation.DescribeStackEvents
                    ]),
                Statement(Effect="Allow",
                          Resource=[sns_scope],
                          Action=[
                              awacs.sns.CreateTopic, awacs.sns.DeleteTopic,
                              awacs.sns.GetTopicAttributes
                          ])
            ]))

        principal = AWSPrincipal(Ref("AWS::AccountId"))
        role = t.add_resource(
            iam.Role("FunctionalTestRole",
                     AssumeRolePolicyDocument=Policy(Statement=[
                         Statement(Effect="Allow",
                                   Action=[awacs.sts.AssumeRole],
                                   Principal=principal)
                     ]),
                     Policies=[stacker_policy]))

        assumerole_policy = iam.Policy(
            PolicyName="AssumeRole",
            PolicyDocument=Policy(Statement=[
                Statement(Effect="Allow",
                          Resource=[GetAtt(role, "Arn")],
                          Action=[awacs.sts.AssumeRole])
            ]))

        user = t.add_resource(
            iam.User("FunctionalTestUser",
                     Policies=[stacker_policy, assumerole_policy]))

        key = t.add_resource(
            iam.AccessKey("FunctionalTestKey", Serial=1, UserName=Ref(user)))

        t.add_output(Output("User", Value=Ref(user)))
        t.add_output(Output("AccessKeyId", Value=Ref(key)))
        t.add_output(
            Output("SecretAccessKey",
                   Value=GetAtt("FunctionalTestKey", "SecretAccessKey")))
        t.add_output(Output("FunctionalTestRole", Value=GetAtt(role, "Arn")))
コード例 #27
0
    Path="/",
    Roles=[Ref("Role")]
))         

t.add_resource(ec2.Instance(
    "instance",
    ImageId="ami-a0cfeed8",
    InstanceType="t2.micro",
    SecurityGroups=[Ref("SecurityGroup")],
    KeyName=Ref("KeyPair"),
    UserData=ud,
    IamInstanceProfile=Ref("InstanceProfile"),
))

t.add_output(Output(
    "InstancePublicIp",
    Description="Public IP of our instance.",
    Value=GetAtt("instance", "PublicIp"),
))

t.add_output(Output(
    "WebUrl",
    Description="Application endpoint",
    Value=Join("", [
        "http://", GetAtt("instance", "PublicDnsName"),
        ":", ApplicationPort
    ]),
))

print (t.to_json())
コード例 #28
0
ファイル: EzElb_0_5.py プロジェクト: dboitnot/ez_elb
    def to_json(self):
        if self._json is not None:
            return self._json

        # Validity checks
        if len(self.subnet_ids) < 2:
            raise ValidationException(
                "Use .subnet_id() to specify at least two ELB subnets")
        if len(self.cert_ids) < 1:
            raise ValidationException(
                "Use .certificate_id() to specify at least one certificate")
        if not self._ecs_redirect and len(self.default_targets) < 1:
            raise ValidationException(
                "Use .default_target() to specify at least one default target or .ecs_redirect("
                ") to set up a redirect container")
        for (name, tp) in self.target_paths.iteritems():
            if len(set(map(lambda h: h.type, tp.hosts))) != 1:
                raise ValidationException(
                    "Inconsistent target types for %s. All hosts for a given path must have the "
                    "same type (ip or instance)." % name)

        # Export VPC ID
        self.template.add_output(
            Output("VpcIdExport",
                   Description="VPC of the ELB",
                   Value=self.vpc_id,
                   Export=Export(Sub("${AWS::StackName}-VpcId"))))

        # Build Security Group
        if self._custom_elb_sgs:
            elb_sgs = self._custom_elb_sgs
        else:
            elb_sg = SecurityGroup(
                "ElbSecurityGroup",
                GroupDescription=Sub("${AWS::StackName}-ElbSg"),
                Tags=self.tags_with(Name=Sub("${AWS::StackName}-ElbSg")),
                VpcId=self.vpc_id,
                SecurityGroupEgress=[
                    SecurityGroupRule(CidrIp="0.0.0.0/0", IpProtocol="-1")
                ],
                SecurityGroupIngress=self._sg_rules)
            self.template.add_resource(elb_sg)
            self.template.add_output(
                Output("ElbSecurityGroupOutput",
                       Description="Security group ID assigned to the ELB",
                       Value=Ref(elb_sg),
                       Export=Export(Sub("${AWS::StackName}-ElbSg"))))

            # Build Attachment Security Group
            inst_sg = SecurityGroup(
                "InstanceSecurityGroup",
                GroupDescription=Sub("${AWS::StackName}-InstSg"),
                Tags=self.tags_with(Name=Sub("${AWS::StackName}-InstSg")),
                VpcId=self.vpc_id,
                SecurityGroupEgress=[
                    SecurityGroupRule(CidrIp="0.0.0.0/0", IpProtocol="-1")
                ],
                SecurityGroupIngress=[
                    SecurityGroupRule(IpProtocol="-1",
                                      SourceSecurityGroupId=Ref(elb_sg))
                ])
            self.template.add_resource(inst_sg)
            self.template.add_output(
                Output("InstanceSecurityGroupOutput",
                       Description="Convenience SG to assign to instances",
                       Value=Ref(inst_sg),
                       Export=Export(Sub("${AWS::StackName}-InstSg"))))
            elb_sgs = [Ref("ElbSecurityGroup")]

        # Build ELB
        elb = LoadBalancer("ELB",
                           SecurityGroups=elb_sgs,
                           Scheme=self._elb_scheme,
                           Subnets=self.subnet_ids,
                           Tags=self.tags_with(Name=Ref("AWS::StackName")),
                           LoadBalancerAttributes=self.elb_attributes())
        if self._elb_name:
            elb.Name = self._elb_name
        self.template.add_resource(elb)
        self.template.add_output(
            Output("ElbArnOutput",
                   Description="ARN of the ELB",
                   Value=Ref(elb),
                   Export=Export(Sub("${AWS::StackName}-ElbArn"))))
        self.template.add_output(
            Output("ElbDnsOutput",
                   Description="DNS name of the ELB",
                   Value=GetAtt("ELB", "DNSName"),
                   Export=Export(Sub("${AWS::StackName}-ElbDns"))))

        # Build Default Target Group
        if self._ecs_redirect:
            default_tg_protocol = "HTTP"
        else:
            default_tg_protocol = self.default_targets[0].protocol
        default_tg = TargetGroup(
            "DefaultTargetGroup",
            Port=8080,
            Protocol=default_tg_protocol,
            Tags=self.tags_with(Name=Sub("${AWS::StackName}-Default")),
            VpcId=self.vpc_id,
            Targets=list(
                map(lambda h: TargetDescription(Id=h.host, Port=h.port),
                    self.default_targets)),
            HealthyThresholdCount=2,
            Matcher=Matcher(HttpCode="200-399"))
        self.name_target_group(default_tg)
        self.template.add_resource(default_tg)
        self.attach_alarm(default_tg)

        # Build Listener
        listener = Listener(
            "HttpsListener",
            Certificates=list(
                map(lambda i: Certificate(CertificateArn=i), self.cert_ids)),
            DefaultActions=[
                Action(Type="forward",
                       TargetGroupArn=Ref("DefaultTargetGroup"))
            ],
            LoadBalancerArn=Ref("ELB"),
            Port=443,
            Protocol="HTTPS")
        self.template.add_resource(listener)
        self.template.add_output(
            Output("HttpsListenerExport",
                   Description="HTTPS Listener ARN",
                   Value=Ref(listener),
                   Export=Export(Sub("${AWS::StackName}-HttpsListener"))))

        # Build HTTP redirect
        if len(self.http_redirect_targets) > 0:
            # Build Redirect Target Group
            http_tg = TargetGroup(
                "RedirectTargetGroup",
                Port=8080,
                Protocol=self.http_redirect_targets[0].protocol,
                Tags=self.tags_with(Name=Sub("${AWS::StackName}-Redirect")),
                VpcId=self.vpc_id,
                Targets=list(
                    map(lambda h: TargetDescription(Id=h.host, Port=h.port),
                        self.http_redirect_targets)),
                HealthyThresholdCount=2,
                Matcher=Matcher(HttpCode="200-399"))
            self.name_target_group(http_tg)
            self.template.add_resource(http_tg)
            self.attach_alarm(http_tg)

        if self._ecs_redirect or len(self.http_redirect_targets) > 0:
            if self._ecs_redirect:
                redirect_tg = "DefaultTargetGroup"
            else:
                redirect_tg = "RedirectTargetGroup"
            # Build Listener
            http_listener = Listener("HttpListener",
                                     DefaultActions=[
                                         Action(
                                             Type="forward",
                                             TargetGroupArn=Ref(redirect_tg))
                                     ],
                                     LoadBalancerArn=Ref("ELB"),
                                     Port=80,
                                     Protocol="HTTP")
            self.template.add_resource(http_listener)
            self.template.add_output(
                Output("HttpListenerExport",
                       Description="HTTP Listener ARN",
                       Value=Ref(http_listener),
                       Export=Export(Sub("${AWS::StackName}-HttpListener"))))

        # Build Target Groups & Rules
        for (name, tp) in self.target_paths.iteritems():
            name_an = alpha_numeric_name(name)
            tag_name = taggable_name(name)

            g = TargetGroup(
                "PathTg" + name_an,
                Port=tp.hosts[0].port,
                Protocol=tp.hosts[0].protocol,
                Tags=self.tags_with(Name="%s/%s" % (self.env_name, tag_name),
                                    TargetPath=tag_name),
                Targets=list(map(lambda h: h.to_target_desc(), tp.hosts)),
                VpcId=self.vpc_id,
                HealthCheckPath="/%s" % name,
                HealthyThresholdCount=2,
                Matcher=tp.health_check_matcher)
            self.name_target_group(g)

            # TODO: We should probably explicitly specify this for every TG. Not
            #       doing that now because it will cause lots of updates. Maybe
            #       in 0.4?
            if len(tp.hosts) > 0 and tp.hosts[0].type != "instance":
                g.TargetType = tp.hosts[0].type

            if self.sticky:
                g.TargetGroupAttributes = [
                    TargetGroupAttribute(Key="stickiness.enabled",
                                         Value="true"),
                    TargetGroupAttribute(Key="stickiness.type",
                                         Value="lb_cookie")
                ]
            self.template.add_resource(g)
            self.attach_alarm(g)
            self.template.add_resource(
                ListenerRule(
                    "PathRl" + name_an,
                    Actions=[Action(Type="forward", TargetGroupArn=Ref(g))],
                    Conditions=[
                        Condition(Field="path-pattern",
                                  Values=["/%s/*" % name])
                    ],
                    ListenerArn=Ref("HttpsListener"),
                    Priority=self.priority_hash(name)))
            self.template.add_resource(
                ListenerRule(
                    "PathRln" + name_an,
                    Actions=[Action(Type="forward", TargetGroupArn=Ref(g))],
                    Conditions=[
                        Condition(Field="path-pattern", Values=["/%s" % name])
                    ],
                    ListenerArn=Ref("HttpsListener"),
                    Priority=self.priority_hash(name)))

        # Build Alternate Listeners
        for al in self.alt_listeners:
            tg_name = "AltTg%d" % al.port
            tg_protocol = al.hosts[0].protocol
            tg = TargetGroup(
                tg_name,
                Port=9999,
                Protocol=tg_protocol,
                Tags=self.tags_with(Name=Sub("${AWS::StackName}-%s" %
                                             tg_name)),
                VpcId=self.vpc_id,
                Targets=list(
                    map(lambda h: TargetDescription(Id=h.host, Port=h.port),
                        al.hosts)),
                HealthyThresholdCount=2,
                Matcher=Matcher(HttpCode="200-399"))
            self.name_target_group(tg)
            self.template.add_resource(tg)
            self.attach_alarm(tg)

            listener = Listener("AltListener%d" % al.port,
                                DefaultActions=[
                                    Action(Type="forward",
                                           TargetGroupArn=Ref(tg_name))
                                ],
                                LoadBalancerArn=Ref("ELB"),
                                Port=al.port,
                                Protocol=al.protocol)

            if al.protocol == "HTTPS":
                listener.Certificates = list(
                    map(lambda i: Certificate(CertificateArn=i),
                        self.cert_ids))

            self.template.add_resource(listener)

        self._json = self.template.to_json()
        return self._json
コード例 #29
0
                    Join("", [
                        "arn:aws:codebuild:",
                        Ref('AWS::Region'), ":",
                        Ref('AWS::AccountId'), ":project/",
                        Ref(BuildProject)
                    ])
                ]
            }, {
                "Effect":
                "Allow",
                "Action": ["s3:ListBucket", "s3:PutObject", "s3:GetObject"],
                "Resource": ["*"]
            }]
        },
        Roles=[Ref(CodePipelineServiceRole)],
    ))

# Output clair repository URL
t.add_output(
    Output(
        "RepositoryURL",
        Description="The docker repository URL",
        Value=Join("", [
            Ref(AWS_ACCOUNT_ID), ".dkr.ecr.",
            Ref(AWS_REGION), ".amazonaws.com/",
            Ref(Repository)
        ]),
    ))

print(t.to_json())
コード例 #30
0
ファイル: vpc.py プロジェクト: ggaugain/sceptre
 def add_outputs(self):
     self.out = self.template.add_output([
         Output("VpcId", Value=Ref(self.vpc)),
         Output("IgwName", Value=Ref(self.igw)),
     ])