Beispiel #1
0
    def s3(self, template):
        """
        Create an s3 resource configuration from the config file data.
        This will produce Bucket and BucketPolicy resources along with
        the bucket name as output, these are all added to the troposphere
        template.

        Args:
            template:
                The troposphere.Template object
        """
        # As there are no required fields, although we may not have any
        # subkeys we still need to be able to have a parent key 's3:' to
        # signify that we want to create an s3 bucket. In this case we
        # set up an empty (no options set) dictionary
        present_keys = {}
        if isinstance(self.data["s3"], dict):
            present_keys = self.data["s3"].keys()

        # If the static bucket name is manually set then use that,
        # otherwise use the <stackname>-<logical-resource-name>-<random>
        # default
        bucket = Bucket("StaticBucket", AccessControl="BucketOwnerFullControl")
        if "static-bucket-name" in present_keys:
            bucket.BucketName = self.data["s3"]["static-bucket-name"]

        # If a policy has been manually set then use it, otherwise set
        # a reasonable default of public 'Get' access
        if "policy" in present_keys:
            policy = json.loads(open(self.data["s3"]["policy"]).read())
        else:
            arn = Join("", ["arn:aws:s3:::", Ref(bucket), "/*"])
            policy = {"Action": ["s3:GetObject"], "Resource": arn, "Effect": "Allow", "Principal": "*"}

        bucket_policy = BucketPolicy("StaticBucketPolicy", Bucket=Ref(bucket), PolicyDocument={"Statement": [policy]})
        # Add the bucket name to the list of cloudformation
        # outputs
        template.add_output(Output("StaticBucketName", Description="S3 bucket name", Value=Ref(bucket)))

        # Add the resources to the troposphere template
        map(template.add_resource, [bucket, bucket_policy])
Beispiel #2
0
    def _create_project_stack(self):
        update = True
        try:
            self._cloudformation_client.describe_stacks(
                StackName=self._stack_name())
        except ClientError as e:
            if 'does not exist' not in str(e):
                raise e
            update = False

        self.info('Creating project stack')
        template = Template()
        template.set_version('2010-09-09')

        memory_size = template.add_parameter(
            Parameter(f'{self._stack_name()}MemorySize',
                      Type=NUMBER,
                      Default=self._aws_config.get('memory_sync', '3008')))

        timeout_gateway = template.add_parameter(
            Parameter(f'{self._stack_name()}GatewayTimeout',
                      Type=NUMBER,
                      Default='30'))

        template.add_resource(
            Bucket(inflection.camelize(inflection.underscore(self._bucket)),
                   BucketName=self._bucket,
                   AccessControl='Private',
                   LifecycleConfiguration=LifecycleConfiguration(Rules=[
                       LifecycleRule(
                           Prefix='tmp', Status='Enabled', ExpirationInDays=1)
                   ])))

        api = template.add_resource(
            Api(self._rest_api_name(),
                Name=
                f'{inflection.humanize(self._project)} {inflection.humanize(self._env)} API',
                ProtocolType='HTTP'))

        role_title = f'{self._rest_api_name()}Role'
        self._add_role(role_title, template)

        default_lambda = template.add_resource(
            Function(
                f'{self._rest_api_name()}Function',
                FunctionName=self._rest_api_name(),
                Code=Code(ZipFile='\n'.join(
                    ['def handler(event, context):', '    return event'])),
                Handler='index.handler',
                Role=GetAtt(role_title, 'Arn'),
                Runtime='python3.7',
                MemorySize=Ref(memory_size),
                Timeout=Ref(timeout_gateway)))

        integration = template.add_resource(
            Integration(self._integration_name(),
                        ApiId=Ref(api),
                        IntegrationType='AWS_PROXY',
                        PayloadFormatVersion='2.0',
                        IntegrationUri=Join('', [
                            'arn:aws:lambda:',
                            self._region,
                            ':',
                            self._account_id,
                            ':function:',
                            Ref(default_lambda),
                        ]),
                        DependsOn=f'{self._rest_api_name()}Function'))

        template.add_resource(
            Route(self._route_name(),
                  ApiId=Ref(api),
                  RouteKey='$default',
                  AuthorizationType='NONE',
                  Target=Join(
                      '/', ['integrations', Ref(integration)]),
                  DependsOn=[integration]))

        # Deprecated
        template.add_resource(
            Stage(f'{self._rest_api_name()}Stage',
                  StageName='v2',
                  ApiId=Ref(api),
                  AutoDeploy=True))

        # Deprecated
        template.add_resource(
            Deployment(f'{self._rest_api_name()}Deployment',
                       ApiId=Ref(api),
                       StageName='v2',
                       DependsOn=[
                           f'{self._rest_api_name()}Stage',
                           self._route_name(),
                           self._integration_name(),
                           self._rest_api_name(),
                       ]))

        template.add_resource(
            Stage(f'{self._rest_api_name()}Stage1',
                  StageName='api',
                  ApiId=Ref(api),
                  AutoDeploy=True))

        template.add_resource(
            Deployment(f'{self._rest_api_name()}Deployment1',
                       ApiId=Ref(api),
                       StageName='api',
                       DependsOn=[
                           f'{self._rest_api_name()}Stage1',
                           self._route_name(),
                           self._integration_name(),
                           self._rest_api_name(),
                       ]))

        template.add_output([
            Output(self._rest_api_reference(),
                   Export=Export(self._rest_api_reference()),
                   Value=Ref(api)),
        ])

        self._s3_client.put_object(Body=template.to_json(),
                                   Bucket=self._bucket,
                                   Key=self._template_key)
        url = self._s3_client.generate_presigned_url(ClientMethod='get_object',
                                                     Params={
                                                         'Bucket':
                                                         self._bucket,
                                                         'Key':
                                                         self._template_key
                                                     })

        if update:
            self._update_stack(self._stack_name(), url)
        else:
            self._create_stack(self._stack_name(), url)
Beispiel #3
0
 def test_bucket_accesscontrol_bad_type(self):
     with self.assertRaises(TypeError):
         Bucket('b', AccessControl=123).validate()
Beispiel #4
0
 def test_bucket_accesscontrol(self):
     Bucket('b', AccessControl='AuthenticatedRead').validate()
Beispiel #5
0
# Converted from S3_Bucket.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/

from troposphere import Output, Ref, Template
from troposphere.s3 import Bucket, PublicRead

t = Template()

t.add_description(
    "AWS CloudFormation Sample Template S3_Bucket: Sample template showing "
    "how to create a publicly accessible S3 bucket. "
    "**WARNING** This template creates an Amazon EC2 instance. "
    "You will be billed for the AWS resources used if you create "
    "a stack from this template.")

s3bucket = t.add_resource(Bucket(
    "S3Bucket",
    AccessControl=PublicRead,
))

t.add_output(
    Output("BucketName",
           Value=Ref(s3bucket),
           Description="Name of S3 bucket to hold website content"))

print(t.to_json())
Beispiel #6
0
def flocker_docker_template(cluster_size, client_ami_map, node_ami_map):
    """
    :param int cluster_size: The number of nodes to create in the Flocker
        cluster (including control service node).
    :param dict client_ami_map: A map between AWS region name and AWS AMI ID
        for the client.
    :param dict node_ami_map: A map between AWS region name and AWS AMI ID
        for the node.
    :returns: a CloudFormation template for a Flocker + Docker + Docker Swarm
        cluster.
    """
    # Base JSON template.
    template = Template()

    # Keys corresponding to CloudFormation user Inputs.
    access_key_id_param = template.add_parameter(
        Parameter(
            "AmazonAccessKeyID",
            Description="Required: Your Amazon AWS access key ID",
            Type="String",
            NoEcho=True,
            AllowedPattern="[\w]+",
            MinLength="16",
            MaxLength="32",
        ))
    secret_access_key_param = template.add_parameter(
        Parameter(
            "AmazonSecretAccessKey",
            Description="Required: Your Amazon AWS secret access key",
            Type="String",
            NoEcho=True,
            MinLength="1",
        ))
    keyname_param = template.add_parameter(
        Parameter(
            "EC2KeyPair",
            Description=
            "Required: Name of an existing EC2 KeyPair to enable SSH "
            "access to the instance",
            Type="AWS::EC2::KeyPair::KeyName",
        ))
    template.add_parameter(
        Parameter(
            "S3AccessPolicy",
            Description="Required: Is current IAM user allowed to access S3? "
            "S3 access is required to distribute Flocker and Docker "
            "configuration amongst stack nodes. Reference: "
            "http://docs.aws.amazon.com/IAM/latest/UserGuide/"
            "access_permissions.html Stack creation will fail if user "
            "cannot access S3",
            Type="String",
            AllowedValues=["Yes"],
        ))
    volumehub_token = template.add_parameter(
        Parameter(
            "VolumeHubToken",
            Description=("Optional: Your Volume Hub token. "
                         "You'll find the token at "
                         "https://volumehub.clusterhq.com/v1/token."),
            Type="String",
            Default="",
        ))

    template.add_mapping('RegionMapClient',
                         {k: {
                             "AMI": v
                         }
                          for k, v in client_ami_map.items()})
    template.add_mapping('RegionMapNode',
                         {k: {
                             "AMI": v
                         }
                          for k, v in node_ami_map.items()})

    # Select a random AvailabilityZone within given AWS Region.
    zone = Select(0, GetAZs(""))

    # S3 bucket to hold {Flocker, Docker, Swarm} configuration for distribution
    # between nodes.
    s3bucket = Bucket('ClusterConfig', DeletionPolicy='Retain')
    template.add_resource(s3bucket)

    # Create SecurityGroup for cluster instances.
    instance_sg = template.add_resource(
        ec2.SecurityGroup(
            "InstanceSecurityGroup",
            GroupDescription=(
                "Enable ingress access on all protocols and ports."),
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol=protocol,
                    FromPort="0",
                    ToPort="65535",
                    CidrIp="0.0.0.0/0",
                ) for protocol in ('tcp', 'udp')
            ]))

    # Base for post-boot {Flocker, Docker, Swarm} configuration on the nodes.
    base_user_data = [
        '#!/bin/bash\n',
        'aws_region="',
        Ref("AWS::Region"),
        '"\n',
        'aws_zone="',
        zone,
        '"\n',
        'access_key_id="',
        Ref(access_key_id_param),
        '"\n',
        'secret_access_key="',
        Ref(secret_access_key_param),
        '"\n',
        's3_bucket="',
        Ref(s3bucket),
        '"\n',
        'stack_name="',
        Ref("AWS::StackName"),
        '"\n',
        'volumehub_token="',
        Ref(volumehub_token),
        '"\n',
        'node_count="{}"\n'.format(cluster_size),
        'set -ex\n',
    ] + _sibling_lines("common.sh") + [
        'retry_command apt-get update\n',
    ]

    # XXX Flocker agents are indexed from 1 while the nodes overall are indexed
    # from 0.
    flocker_agent_number = 1

    # Gather WaitConditions
    wait_condition_names = []

    for i in range(cluster_size):
        if i == 0:
            node_name = CONTROL_NODE_NAME
        else:
            node_name = AGENT_NODE_NAME_TEMPLATE.format(index=i)

        # Create an EC2 instance for the {Agent, Control} Node.
        ec2_instance = ec2.Instance(node_name,
                                    ImageId=FindInMap("RegionMapNode",
                                                      Ref("AWS::Region"),
                                                      "AMI"),
                                    InstanceType="m3.large",
                                    KeyName=Ref(keyname_param),
                                    SecurityGroups=[Ref(instance_sg)],
                                    AvailabilityZone=zone,
                                    Tags=Tags(Name=node_name))

        # WaitCondition and corresponding Handler to signal completion
        # of {Flocker, Docker, Swarm} configuration on the node.
        wait_condition_handle = WaitConditionHandle(
            INFRA_WAIT_HANDLE_TEMPLATE.format(node=node_name))
        template.add_resource(wait_condition_handle)
        wait_condition = WaitCondition(
            INFRA_WAIT_CONDITION_TEMPLATE.format(node=node_name),
            Handle=Ref(wait_condition_handle),
            Timeout=NODE_CONFIGURATION_TIMEOUT,
        )
        template.add_resource(wait_condition)

        # Gather WaitConditions
        wait_condition_names.append(wait_condition.name)

        user_data = base_user_data[:]
        user_data += [
            'node_number="{}"\n'.format(i),
            'node_name="{}"\n'.format(node_name),
            'wait_condition_handle="',
            Ref(wait_condition_handle),
            '"\n',
        ]

        # Setup S3 utilities to push/pull node-specific data to/from S3 bucket.
        user_data += _sibling_lines(S3_SETUP)

        if i == 0:
            # Control Node configuration.
            control_service_instance = ec2_instance
            user_data += ['flocker_node_type="control"\n']
            user_data += _sibling_lines(FLOCKER_CONFIGURATION_GENERATOR)
            user_data += _sibling_lines(DOCKER_SWARM_CA_SETUP)
            user_data += _sibling_lines(DOCKER_SETUP)

            # Setup Swarm 1.0.1
            user_data += _sibling_lines(SWARM_MANAGER_SETUP)
            template.add_output([
                Output(
                    "ControlNodeIP",
                    Description="Public IP of Flocker Control and "
                    "Swarm Manager.",
                    Value=GetAtt(ec2_instance, "PublicIp"),
                )
            ])
        else:
            # Agent Node configuration.
            ec2_instance.DependsOn = control_service_instance.name
            user_data += [
                'flocker_node_type="agent"\n',
                'flocker_agent_number="{}"\n'.format(flocker_agent_number)
            ]
            flocker_agent_number += 1
            user_data += _sibling_lines(DOCKER_SETUP)

            # Setup Swarm 1.0.1
            user_data += _sibling_lines(SWARM_NODE_SETUP)
            template.add_output([
                Output(
                    "AgentNode{}IP".format(i),
                    Description=(
                        "Public IP of Agent Node for Flocker and Swarm."),
                    Value=GetAtt(ec2_instance, "PublicIp"),
                )
            ])

        user_data += _sibling_lines(FLOCKER_CONFIGURATION_GETTER)
        user_data += _sibling_lines(VOLUMEHUB_SETUP)
        user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION)
        ec2_instance.UserData = Base64(Join("", user_data))
        template.add_resource(ec2_instance)

    # Client Node creation.
    client_instance = ec2.Instance(CLIENT_NODE_NAME,
                                   ImageId=FindInMap("RegionMapClient",
                                                     Ref("AWS::Region"),
                                                     "AMI"),
                                   InstanceType="m3.medium",
                                   KeyName=Ref(keyname_param),
                                   SecurityGroups=[Ref(instance_sg)],
                                   AvailabilityZone=zone,
                                   Tags=Tags(Name=CLIENT_NODE_NAME))
    wait_condition_handle = WaitConditionHandle(CLIENT_WAIT_HANDLE)
    template.add_resource(wait_condition_handle)
    wait_condition = WaitCondition(
        CLIENT_WAIT_CONDITION,
        Handle=Ref(wait_condition_handle),
        Timeout=NODE_CONFIGURATION_TIMEOUT,
    )
    template.add_resource(wait_condition)

    # Client Node {Flockerctl, Docker-compose} configuration.
    user_data = base_user_data[:]
    user_data += [
        'wait_condition_handle="',
        Ref(wait_condition_handle),
        '"\n',
        'node_number="{}"\n'.format("-1"),
    ]
    user_data += _sibling_lines(S3_SETUP)
    user_data += _sibling_lines(CLIENT_SETUP)
    user_data += _sibling_lines(SIGNAL_CONFIG_COMPLETION)
    client_instance.UserData = Base64(Join("", user_data))

    # Start Client Node after Control Node and Agent Nodes are
    # up and running Flocker, Docker, Swarm stack.
    client_instance.DependsOn = wait_condition_names
    template.add_resource(client_instance)

    # List of Output fields upon successful creation of the stack.
    template.add_output([
        Output(
            "ClientNodeIP",
            Description="Public IP address of the client node.",
            Value=GetAtt(client_instance, "PublicIp"),
        )
    ])
    template.add_output(
        Output(
            "ClientConfigDockerSwarmHost",
            Value=Join("", [
                "export DOCKER_HOST=tcp://",
                GetAtt(control_service_instance, "PublicIp"), ":2376"
            ]),
            Description="Client config: Swarm Manager's DOCKER_HOST setting."))
    template.add_output(
        Output("ClientConfigDockerTLS",
               Value="export DOCKER_TLS_VERIFY=1",
               Description="Client config: Enable TLS client for Swarm."))
    return template.to_json()
Beispiel #7
0
from troposphere.codepipeline import (Actions, ActionTypeId, ArtifactStore,
                                      InputArtifacts, OutputArtifacts,
                                      Pipeline, Stages)

from troposphere.iam import Role
from troposphere.iam import Policy as IAMPolicy

from troposphere.s3 import Bucket, VersioningConfiguration

t = Template()

t.add_description("Effective DevOps in AWS: Pipeline")

t.add_resource(
    Bucket("S3Bucket",
           VersioningConfiguration=VersioningConfiguration(Status="Enabled")))

t.add_resource(
    Role("PipelineRole",
         AssumeRolePolicyDocument=Policy(Statement=[
             Statement(Effect=Allow,
                       Action=[AssumeRole],
                       Principal=Principal("Service",
                                           ["codepipeline.amazonaws.com"]))
         ]),
         Path="/",
         Policies=[
             IAMPolicy(PolicyName="HelloworldCodePipeline",
                       PolicyDocument={
                           "Statement": [{
                               "Effect": "Allow",
from troposphere import Output, Ref, Template
from troposphere.s3 import Bucket, PublicRead

t = Template("Create S3 Bucket for XKE")
t.set_version()

s3_bucket = t.add_resource(
    Bucket(
        "TestBucket",
        BucketName="xke-test-bucket",
        AccessControl=PublicRead,
    ))

t.add_output(
    Output("BucketName", Value=Ref(s3_bucket),
           Description="Name of S3 bucket"))

print(t.to_json())
Beispiel #9
0
 def test_resource_depends_on_list(self):
     b1 = Bucket("B1")
     b2 = Bucket("B2")
     b3 = Bucket("B3", DependsOn=[b1, b2])
     self.assertEqual(b1.title, b3.DependsOn[0])
     self.assertEqual(b2.title, b3.DependsOn[1])
t.set_description(
    "AWS CloudFormation Sample Template "
    "S3_Website_Bucket_With_Retain_On_Delete: Sample template showing how to "
    "create a publicly accessible S3 bucket configured for website access "
    "with a deletion policy of retail on delete. "
    "**WARNING** This template creates an Amazon EC2 instance. "
    "You will be billed for the AWS resources used if you create "
    "a stack from this template."
)

s3bucket = t.add_resource(
    Bucket(
        "S3Bucket",
        AccessControl=PublicRead,
        WebsiteConfiguration=WebsiteConfiguration(
            IndexDocument="index.html", ErrorDocument="error.html"
        ),
    )
)
# XXX - Add "DeletionPolicy" : "Retain" to the resource

t.add_output(
    [
        Output(
            "WebsiteURL",
            Value=GetAtt(s3bucket, "WebsiteURL"),
            Description="URL for website hosted on S3",
        ),
        Output(
            "S3BucketSecureURL",
Beispiel #11
0
from troposphere import Template, Output, Ref, Export, Join, AWS_STACK_NAME, GetAtt, iam
from troposphere.awslambda import Environment
from troposphere.dynamodb import Table, AttributeDefinition, KeySchema, StreamSpecification
from troposphere.iam import ManagedPolicy, Role
from troposphere.logs import LogGroup
from troposphere.s3 import Bucket
from troposphere.serverless import Function
from troposphere.sqs import Queue

template = Template(Description='Core resources for spunt.be')
template.set_transform('AWS::Serverless-2016-10-31')

lambda_code_bucket = template.add_resource(Bucket('LambdaCodeBucket', ))

# Events table
video_events_table = template.add_resource(
    Table(
        'VideoEventsTable',
        BillingMode='PAY_PER_REQUEST',
        AttributeDefinitions=[
            AttributeDefinition(
                AttributeName='videoId',
                AttributeType='S',
            ),
            AttributeDefinition(
                AttributeName='timestamp',
                AttributeType='S',
            )
        ],
        KeySchema=[
            KeySchema(
Beispiel #12
0
    Type=constants.STRING,
    Default='spunt.be',
))

rewrite_assets_lambda_code_key = template.add_parameter(Parameter(
    'RewriteAssets',
    Type=constants.STRING,
    Default='lambda-code/frontend/rewrite_assets.zip',
))

template.add_parameter_to_group(rewrite_assets_lambda_code_key, 'Lambda Keys')

frontend_bucket = template.add_resource(Bucket(
    "FrontendBucket",
    AccessControl='PublicRead',  # Maybe remove this later on
    WebsiteConfiguration=WebsiteConfiguration(
        IndexDocument='index.html',
        ErrorDocument='index.html',
    ),
))

cloudfront_certificate = template.add_resource(Certificate(
    "CloudFrontCertificate",
    DomainName=Ref(domain_name),
    DomainValidationOptions=[DomainValidationOption(
        DomainName=Ref(domain_name),
        ValidationDomain=ImportValue(Join('-', [Ref(dns_stack), 'HostedZoneName'])),
    )],
    ValidationMethod='DNS',
))

readonly_function_role = template.add_resource(Role(
Beispiel #13
0
ec2_security_group = SecurityGroup(
    "ec2securitygroupingress",
    GroupDescription="Allow inbound TCP from the ALB",
    VpcId=Ref(vpc_id),
    SecurityGroupIngress=[
        SecurityGroupIngress("albsecuritygroupingress",
                             IpProtocol="tcp",
                             FromPort="0",
                             ToPort="65535",
                             SourceSecurityGroupId=Ref(alb_security_group))
    ],
    DependsOn=Ref(alb_security_group))
template.add_resource(alb_security_group)
template.add_resource(ec2_security_group)

logs_bucket = Bucket("LogsBucket", DeletionPolicy="Retain")
logs_bucket_policy = BucketPolicy(
    "LogsBucketPolicy",
    Bucket=Ref(logs_bucket),
    PolicyDocument={
        "Version":
        "2012-10-17",
        "Statement": [{
            "Sid":
            "Stmt1429136633762",
            "Action": ["s3:PutObject"],
            "Effect":
            "Allow",
            "Resource":
            Join("", ['s3:::', Ref(logs_bucket), "/alb/*"]),
            "Principal": {
from awslambdacontinuousdelivery.python.test import getTest
from awslambdacontinuousdelivery.notifications.sns import getEmailTopic 
from awslambdacontinuousdelivery.notifications import addFailureNotifications

template = Template()
# AWS will substitute this with the stack name during deployment
stack_name = Sub("${AWS::StackName}")
source_code = "SourceCode"
deploy_pkg_artifact = "FunctionDeployPackage"
cf_artifact = "CfOutputTemplate"
pipeline_stages = [] # list holding all stages, order matters!
stages = ["Gamma"]

s3 = template.add_resource(
  Bucket("ArtifactStoreS3Location"
        , AccessControl = "Private"
  )
)

pipeline_role = template.add_resource(
  createCodepipelineRole("PipelineRole"))
source = getGitHub(template, source_code)
pipeline_stages.append(source)

#unit_tests = getUnittest(template, source_code)
#pipeline_stages.append(unit_tests)

build_stage = getBuild(
  template, source_code, deploy_pkg_artifact, cf_artifact, stages)
pipeline_stages.append(build_stage)
Beispiel #15
0
 def test_BogusAttribute(self):
     t = Template()
     with self.assertRaises(AttributeError):
         t.add_resource(Bucket("S3Bucket", Bogus="Retain"))
Beispiel #16
0
 def test_resource_depends_on_attr(self):
     b1 = Bucket("B1")
     b2 = Bucket("B2", DependsOn=b1)
     self.assertEqual(b1.title, b2.DependsOn)
Beispiel #17
0
    def add_resources(self):
        """ Add All Cloudformation Resources. This will include vpc, igw, and any other network
        resources """
        self.vpc = self.template.add_resource(
            ec2.VPC(
                "VPC",
                CidrBlock=Ref(self.VpcCidr),
                EnableDnsSupport=True,
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-VPC"),
            ))

        self.RESTPubSubnet1 = self.template.add_resource(
            ec2.Subnet(
                "RESTPubSubnet1",
                CidrBlock=Ref(self.RESTPubSub1Cidr),
                VpcId=Ref(self.vpc),
                AvailabilityZone="us-east-1a",
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTPubSubnet1"),
            ))

        self.RESTPubSubnet2 = self.template.add_resource(
            ec2.Subnet(
                "RESTPubSubnet2",
                VpcId=Ref(self.vpc),
                CidrBlock=Ref(self.RESTPubSub2Cidr),
                AvailabilityZone="us-east-1b",
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTPubSubnet2"),
            ))

        self.RESTPrivSubnet1 = self.template.add_resource(
            ec2.Subnet(
                "RESTPrivSubnet1",
                VpcId=Ref(self.vpc),
                CidrBlock=Ref(self.RESTPrivSub1Cidr),
                AvailabilityZone="us-east-1a",
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTPrivSubnet1"),
            ))

        self.RESTPrivSubnet2 = self.template.add_resource(
            ec2.Subnet(
                "RESTPrivSubnet2",
                CidrBlock=Ref(self.RESTPrivSub2Cidr),
                VpcId=Ref(self.vpc),
                AvailabilityZone="us-east-1b",
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTPrivSubnet2"),
            ))

        self.RESTIGW = self.template.add_resource(
            ec2.InternetGateway(
                "RESTIGW",
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTIGW"),
            ))

        self.RESTIGWAttachment = self.template.add_resource(
            ec2.VPCGatewayAttachment(
                "RESTIGWAttachment",
                VpcId=Ref(self.vpc),
                InternetGatewayId=Ref(self.RESTIGW),
            ))

        self.RESTEIP1 = self.template.add_resource(
            ec2.EIP(
                "RESTEIP1",
                Domain="vpc",
            ))

        self.RESTEIP2 = self.template.add_resource(
            ec2.EIP(
                "RESTEIP2",
                Domain="vpc",
            ))

        self.RESTNAT1 = self.template.add_resource(
            ec2.NatGateway(
                "NAT",
                AllocationId=GetAtt(self.RESTEIP1, "AllocationId"),
                SubnetId=Ref(self.RESTPubSubnet1),
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTNAT1"),
            ))

        self.RESTNAT2 = self.template.add_resource(
            ec2.NatGateway(
                "NAT2",
                AllocationId=GetAtt(self.RESTEIP2, "AllocationId"),
                SubnetId=Ref(self.RESTPubSubnet2),
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTNAT2"),
            ))

        self.RESTPrivRT1 = self.template.add_resource(
            ec2.RouteTable(
                "RESTPrivRT1",
                VpcId=Ref(self.vpc),
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTPRIVRT1"),
            ))

        self.RESTPrivRT2 = self.template.add_resource(
            ec2.RouteTable(
                "RESTPrivRT2",
                VpcId=Ref(self.vpc),
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTPRIVRT2"),
            ))

        self.RESTNatRoute = self.template.add_resource(
            ec2.Route(
                "RESTNatRoute",
                RouteTableId=Ref(self.RESTPrivRT1),
                DestinationCidrBlock="0.0.0.0/0",
                NatGatewayId=Ref(self.RESTNAT1),
            ))

        self.RESTNat2Route = self.template.add_resource(
            ec2.Route(
                "RESTNatRoute2",
                RouteTableId=Ref(self.RESTPrivRT2),
                DestinationCidrBlock="0.0.0.0/0",
                NatGatewayId=Ref(self.RESTNAT2),
            ))

        self.RESTPrivRT1Association = self.template.add_resource(
            ec2.SubnetRouteTableAssociation(
                "RESTPrivRT1Association",
                SubnetId=Ref(self.RESTPrivSubnet1),
                RouteTableId=Ref(self.RESTPrivRT1),
            ))

        self.RESTPrivRT2Association = self.template.add_resource(
            ec2.SubnetRouteTableAssociation(
                "RESTPrivRT2Association",
                SubnetId=Ref(self.RESTPrivSubnet2),
                RouteTableId=Ref(self.RESTPrivRT2),
            ))

        self.RESTPubRT1 = self.template.add_resource(
            ec2.RouteTable(
                "RESTPubRT1",
                VpcId=Ref(self.vpc),
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTPUBRT1"),
            ))

        self.RESTPubRT2 = self.template.add_resource(
            ec2.RouteTable(
                "RESTPubRT2",
                VpcId=Ref(self.vpc),
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-RESTPUBRT2"),
            ))

        self.RESTPubRT1IGWattachment = self.template.add_resource(
            ec2.Route(
                "RESTPubRT1IGWAttachment",
                DependsOn=["RESTIGWAttachment"],
                RouteTableId=Ref(self.RESTPubRT1),
                DestinationCidrBlock="0.0.0.0/0",
                GatewayId=Ref(self.RESTIGW),
            ))

        self.RESTPubRT2IGWattachment = self.template.add_resource(
            ec2.Route(
                "RESTPubRT2IGWAttachment",
                DependsOn=["RESTIGWAttachment"],
                RouteTableId=Ref(self.RESTPubRT2),
                DestinationCidrBlock="0.0.0.0/0",
                GatewayId=Ref(self.RESTIGW),
            ))

        self.RESTPubRT1Association = self.template.add_resource(
            ec2.SubnetRouteTableAssociation(
                "RESTPubRT1Associate",
                SubnetId=Ref(self.RESTPubSubnet1),
                RouteTableId=Ref(self.RESTPubRT1),
            ))

        self.RESTPubRT2Asocation = self.template.add_resource(
            ec2.SubnetRouteTableAssociation(
                "RESTPubR2Associate",
                SubnetId=Ref(self.RESTPubSubnet2),
                RouteTableId=Ref(self.RESTPubRT2),
            ))

        self.VPCPeeringBetweenSharedVPCAndClientVPC = self.template.add_resource(
            ec2.VPCPeeringConnection(
                "VPCPeeringBetweenSharedVPCAndClientVPC",
                DependsOn=["RESTPrivRT1", "RESTPrivRT2"],
                VpcId=Ref(self.SharedServicesVpcId),
                PeerVpcId=Ref(self.vpc),
                Tags=self.base_tags +
                Tags(Name=self.environment_parameters["ClientEnvironmentKey"] +
                     "-SSTOCLIENTVPCPEER"),
            ))

        self.PeeringRouteForClientVPCPriv1 = self.template.add_resource(
            ec2.Route(
                "PeeringRouteForClientVPCPriv1",
                DependsOn=["VPCPeeringBetweenSharedVPCAndClientVPC"],
                RouteTableId=Ref(self.RESTPrivRT1),
                DestinationCidrBlock=Ref(self.SharedServicesVpcCidrBlock),
                VpcPeeringConnectionId=Ref(
                    self.VPCPeeringBetweenSharedVPCAndClientVPC),
            ))

        self.PeeringRouteForClientVPCPriv2 = self.template.add_resource(
            ec2.Route(
                "PeeringRouteForClientVPCPriv2",
                DependsOn=["VPCPeeringBetweenSharedVPCAndClientVPC"],
                RouteTableId=Ref(self.RESTPrivRT2),
                DestinationCidrBlock=Ref(self.SharedServicesVpcCidrBlock),
                VpcPeeringConnectionId=Ref(
                    self.VPCPeeringBetweenSharedVPCAndClientVPC),
            ))

        self.PeeringRouteForSharedServicesVPCPriv1 = self.template.add_resource(
            ec2.Route(
                "PeeringRouteForSharedServicesVPCPriv1",
                DependsOn=["VPCPeeringBetweenSharedVPCAndClientVPC"],
                RouteTableId=Ref(self.SharedServicesPrivateRouteTable1),
                DestinationCidrBlock=Ref(self.VpcCidr),
                VpcPeeringConnectionId=Ref(
                    self.VPCPeeringBetweenSharedVPCAndClientVPC),
            ))

        self.PeeringRouteForSharedServicesVPCPriv2 = self.template.add_resource(
            ec2.Route(
                "PeeringRouteForSharedServicesVPCPriv2",
                DependsOn=["VPCPeeringBetweenSharedVPCAndClientVPC"],
                RouteTableId=Ref(self.SharedServicesPrivateRouteTable2),
                DestinationCidrBlock=Ref(self.VpcCidr),
                VpcPeeringConnectionId=Ref(
                    self.VPCPeeringBetweenSharedVPCAndClientVPC),
            ))

        self.EnvironmentArtifactsBucket = self.template.add_resource(
            Bucket(
                "EnvironmentArtifactsBucket",
                BucketName=(
                    self.environment_parameters["ClientEnvironmentKey"] +
                    "-environment-artifacts").lower(),
                AccessControl="BucketOwnerRead",
                VersioningConfiguration=VersioningConfiguration(
                    Status="Enabled", ),
            ))

        self.BootstrapRepositorySSMParameter = self.template.add_resource(
            SSMParameter(
                "BootstrapRepositorySSMParameter",
                Description="The Bootstrap Repository",
                Name=self.environment_parameters["ClientEnvironmentKey"] +
                "-bootstrapRepository",
                Type="String",
                Value=(self.environment_parameters["ClientEnvironmentKey"] +
                       "-environment-artifacts").lower(),
            ))
                DomainName='robkenis.com',
                HostedZoneId=ImportValue(
                    Join('-', [Ref(dns_stack), 'HostedZoneId'])),
            ),
            DomainValidationOption(
                DomainName='www.robkenis.com',
                HostedZoneId=ImportValue(
                    Join('-', [Ref(dns_stack), 'HostedZoneId'])),
            ),
        ],
        Tags=Tags({'Name': Ref(AWS_STACK_NAME)}),
    ))

s3_website_origin = template.add_resource(
    Bucket(
        'WebsiteOrigin',
        AccessControl='Private',
    ))

origin_access_identity = template.add_resource(
    CloudFrontOriginAccessIdentity(
        'WebsiteOriginAccessIdentity',
        CloudFrontOriginAccessIdentityConfig=
        CloudFrontOriginAccessIdentityConfig(Comment=Ref(AWS_STACK_NAME), ),
    ))

template.add_resource(
    BucketPolicy(
        'WebsiteOriginPolicy',
        Bucket=Ref(s3_website_origin),
        PolicyDocument=Policy(Statement=[
            Statement(
Beispiel #19
0
def create_template():
    template = Template(Description=(
        "Static website hosted with S3 and CloudFront. "
        "https://github.com/schlarpc/overengineered-cloudfront-s3-static-website"
    ))

    partition_config = add_mapping(
        template,
        "PartitionConfig",
        {
            "aws": {
                # the region with the control plane for CloudFront, IAM, Route 53, etc
                "PrimaryRegion":
                "us-east-1",
                # assume that Lambda@Edge replicates to all default enabled regions, and that
                # future regions will be opt-in. generated with AWS CLI:
                # aws ec2 describe-regions --all-regions --query "Regions[?OptInStatus=='opt-in-not-required'].RegionName|sort(@)"
                "DefaultRegions": [
                    "ap-northeast-1",
                    "ap-northeast-2",
                    "ap-northeast-3",
                    "ap-south-1",
                    "ap-southeast-1",
                    "ap-southeast-2",
                    "ca-central-1",
                    "eu-central-1",
                    "eu-north-1",
                    "eu-west-1",
                    "eu-west-2",
                    "eu-west-3",
                    "sa-east-1",
                    "us-east-1",
                    "us-east-2",
                    "us-west-1",
                    "us-west-2",
                ],
            },
            # this doesn't actually work, because Lambda@Edge isn't supported in aws-cn
            "aws-cn": {
                "PrimaryRegion": "cn-north-1",
                "DefaultRegions": ["cn-north-1", "cn-northwest-1"],
            },
        },
    )

    acm_certificate_arn = template.add_parameter(
        Parameter(
            "AcmCertificateArn",
            Description=
            "Existing ACM certificate to use for serving TLS. Overrides HostedZoneId.",
            Type="String",
            AllowedPattern="(arn:[^:]+:acm:[^:]+:[^:]+:certificate/.+|)",
            Default="",
        ))

    hosted_zone_id = template.add_parameter(
        Parameter(
            "HostedZoneId",
            Description=
            "Existing Route 53 zone to use for validating a new TLS certificate.",
            Type="String",
            AllowedPattern="(Z[A-Z0-9]+|)",
            Default="",
        ))

    dns_names = template.add_parameter(
        Parameter(
            "DomainNames",
            Description=
            "Comma-separated list of additional domain names to serve.",
            Type="CommaDelimitedList",
            Default="",
        ))

    tls_protocol_version = template.add_parameter(
        Parameter(
            "TlsProtocolVersion",
            Description=
            "CloudFront TLS security policy; see https://amzn.to/2DR91Xq for details.",
            Type="String",
            Default="TLSv1.2_2019",
        ))

    log_retention_days = template.add_parameter(
        Parameter(
            "LogRetentionDays",
            Description=
            "Days to keep CloudFront, S3, and Lambda logs. 0 means indefinite retention.",
            Type="Number",
            AllowedValues=[0] + CLOUDWATCH_LOGS_RETENTION_OPTIONS,
            Default=365,
        ))

    default_ttl_seconds = template.add_parameter(
        Parameter(
            "DefaultTtlSeconds",
            Description="Cache time-to-live when not set by S3 object headers.",
            Type="Number",
            Default=int(datetime.timedelta(minutes=5).total_seconds()),
        ))

    enable_price_class_hack = template.add_parameter(
        Parameter(
            "EnablePriceClassHack",
            Description="Cut your bill in half with this one weird trick.",
            Type="String",
            Default="false",
            AllowedValues=["true", "false"],
        ))

    retention_defined = add_condition(template, "RetentionDefined",
                                      Not(Equals(Ref(log_retention_days), 0)))

    using_price_class_hack = add_condition(
        template, "UsingPriceClassHack",
        Equals(Ref(enable_price_class_hack), "true"))

    using_acm_certificate = add_condition(
        template, "UsingAcmCertificate",
        Not(Equals(Ref(acm_certificate_arn), "")))

    using_hosted_zone = add_condition(template, "UsingHostedZone",
                                      Not(Equals(Ref(hosted_zone_id), "")))

    using_certificate = add_condition(
        template,
        "UsingCertificate",
        Or(Condition(using_acm_certificate), Condition(using_hosted_zone)),
    )

    should_create_certificate = add_condition(
        template,
        "ShouldCreateCertificate",
        And(Condition(using_hosted_zone),
            Not(Condition(using_acm_certificate))),
    )

    using_dns_names = add_condition(template, "UsingDnsNames",
                                    Not(Equals(Select(0, Ref(dns_names)), "")))

    is_primary_region = "IsPrimaryRegion"
    template.add_condition(
        is_primary_region,
        Equals(Region, FindInMap(partition_config, Partition,
                                 "PrimaryRegion")),
    )

    precondition_region_is_primary = template.add_resource(
        WaitConditionHandle(
            "PreconditionIsPrimaryRegionForPartition",
            Condition=is_primary_region,
        ))

    log_ingester_dlq = template.add_resource(
        Queue(
            "LogIngesterDLQ",
            MessageRetentionPeriod=int(
                datetime.timedelta(days=14).total_seconds()),
            KmsMasterKeyId="alias/aws/sqs",
        ))

    log_ingester_role = template.add_resource(
        Role(
            "LogIngesterRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="DLQPolicy",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[sqs.SendMessage],
                                Resource=[GetAtt(log_ingester_dlq, "Arn")],
                            )
                        ],
                    ),
                )
            ],
        ))

    log_ingester = template.add_resource(
        Function(
            "LogIngester",
            Runtime=PYTHON_RUNTIME,
            Handler="index.{}".format(log_ingest.handler.__name__),
            Code=Code(ZipFile=inspect.getsource(log_ingest)),
            MemorySize=256,
            Timeout=300,
            Role=GetAtt(log_ingester_role, "Arn"),
            DeadLetterConfig=DeadLetterConfig(
                TargetArn=GetAtt(log_ingester_dlq, "Arn")),
        ))

    log_ingester_permission = template.add_resource(
        Permission(
            "LogIngesterPermission",
            FunctionName=GetAtt(log_ingester, "Arn"),
            Action="lambda:InvokeFunction",
            Principal="s3.amazonaws.com",
            SourceAccount=AccountId,
        ))

    log_bucket = template.add_resource(
        Bucket(
            "LogBucket",
            # S3 requires this ACL (regardless of bucket policy) or s3:PutBucketLogging fails.
            # When the CloudFront distribution is created, it adds an additional bucket ACL.
            # That ACL is not possible to model in CloudFormation.
            AccessControl="LogDeliveryWrite",
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                LifecycleRule(ExpirationInDays=1, Status="Enabled"),
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=1),
                    Status="Enabled",
                ),
            ]),
            NotificationConfiguration=NotificationConfiguration(
                LambdaConfigurations=[
                    LambdaConfigurations(Event="s3:ObjectCreated:*",
                                         Function=GetAtt(log_ingester, "Arn"))
                ]),
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            # if we use KMS, we can't read the logs
                            SSEAlgorithm="AES256"))
                ]),
            OwnershipControls=OwnershipControls(Rules=[
                OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred")
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
            DependsOn=[log_ingester_permission],
        ))

    log_ingester_log_group = template.add_resource(
        LogGroup(
            "LogIngesterLogGroup",
            LogGroupName=Join(
                "", ["/aws/lambda/", Ref(log_ingester)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    log_ingester_policy = template.add_resource(
        PolicyType(
            "LogIngesterPolicy",
            Roles=[Ref(log_ingester_role)],
            PolicyName="IngestLogPolicy",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                        Resource=[
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    Region,
                                    AccountId,
                                    "log-group",
                                    "/aws/cloudfront/*",
                                ],
                            ),
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    Region,
                                    AccountId,
                                    "log-group",
                                    "/aws/s3/*",
                                ],
                            ),
                            GetAtt(log_ingester_log_group, "Arn"),
                        ],
                    ),
                    Statement(
                        Effect=Allow,
                        Action=[s3.GetObject],
                        Resource=[Join("", [GetAtt(log_bucket, "Arn"), "/*"])],
                    ),
                ],
            ),
        ))

    bucket = template.add_resource(
        Bucket(
            "ContentBucket",
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                # not supported by CFN yet:
                # LifecycleRule(
                # Transitions=[
                # LifecycleRuleTransition(
                # StorageClass='INTELLIGENT_TIERING',
                # TransitionInDays=1,
                # ),
                # ],
                # Status="Enabled",
                # ),
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=7),
                    Status="Enabled",
                )
            ]),
            LoggingConfiguration=LoggingConfiguration(
                DestinationBucketName=Ref(log_bucket), LogFilePrefix="s3/"),
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            # Origin Access Identities can't use KMS
                            SSEAlgorithm="AES256"))
                ]),
            OwnershipControls=OwnershipControls(Rules=[
                OwnershipControlsRule(ObjectOwnership="BucketOwnerPreferred")
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
        ))

    origin_access_identity = template.add_resource(
        CloudFrontOriginAccessIdentity(
            "CloudFrontIdentity",
            CloudFrontOriginAccessIdentityConfig=
            CloudFrontOriginAccessIdentityConfig(
                Comment=GetAtt(bucket, "Arn")),
        ))

    bucket_policy = template.add_resource(
        BucketPolicy(
            "ContentBucketPolicy",
            Bucket=Ref(bucket),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal(
                            "CanonicalUser",
                            GetAtt(origin_access_identity,
                                   "S3CanonicalUserId"),
                        ),
                        Action=[s3.GetObject],
                        Resource=[Join("", [GetAtt(bucket, "Arn"), "/*"])],
                    ),
                ],
            ),
        ))

    # Not strictly necessary, as ACLs should take care of this access. However, CloudFront docs
    # state "In some circumstances [...] S3 resets permissions on the bucket to the default value",
    # and this allows logging to work without any ACLs in place.
    log_bucket_policy = template.add_resource(
        BucketPolicy(
            "LogBucketPolicy",
            Bucket=Ref(log_bucket),
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "delivery.logs.amazonaws.com"),
                        Action=[s3.PutObject],
                        Resource=[
                            Join(
                                "/",
                                [GetAtt(log_bucket, "Arn"), "cloudfront", "*"])
                        ],
                    ),
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "delivery.logs.amazonaws.com"),
                        Action=[s3.ListBucket],
                        Resource=[Join("/", [GetAtt(log_bucket, "Arn")])],
                    ),
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service", "s3.amazonaws.com"),
                        Action=[s3.PutObject],
                        Resource=[
                            Join("/", [GetAtt(log_bucket, "Arn"), "s3", "*"])
                        ],
                    ),
                ],
            ),
        ))

    certificate_validator_dlq = template.add_resource(
        Queue(
            "CertificateValidatorDLQ",
            MessageRetentionPeriod=int(
                datetime.timedelta(days=14).total_seconds()),
            KmsMasterKeyId="alias/aws/sqs",
            Condition=should_create_certificate,
        ))

    certificate_validator_role = template.add_resource(
        Role(
            "CertificateValidatorRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal("Service", "lambda.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="DLQPolicy",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[sqs.SendMessage],
                                Resource=[
                                    GetAtt(certificate_validator_dlq, "Arn")
                                ],
                            )
                        ],
                    ),
                )
            ],
            # TODO scope down
            ManagedPolicyArns=[
                "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole",
                "arn:aws:iam::aws:policy/AmazonRoute53FullAccess",
                "arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly",
            ],
            Condition=should_create_certificate,
        ))

    certificate_validator_function = template.add_resource(
        Function(
            "CertificateValidatorFunction",
            Runtime=PYTHON_RUNTIME,
            Handler="index.{}".format(certificate_validator.handler.__name__),
            Code=Code(ZipFile=inspect.getsource(certificate_validator)),
            MemorySize=256,
            Timeout=300,
            Role=GetAtt(certificate_validator_role, "Arn"),
            DeadLetterConfig=DeadLetterConfig(
                TargetArn=GetAtt(certificate_validator_dlq, "Arn")),
            Environment=Environment(
                Variables={
                    certificate_validator.EnvVars.HOSTED_ZONE_ID.name:
                    Ref(hosted_zone_id)
                }),
            Condition=should_create_certificate,
        ))

    certificate_validator_log_group = template.add_resource(
        LogGroup(
            "CertificateValidatorLogGroup",
            LogGroupName=Join(
                "", ["/aws/lambda/",
                     Ref(certificate_validator_function)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
            Condition=should_create_certificate,
        ))

    certificate_validator_rule = template.add_resource(
        Rule(
            "CertificateValidatorRule",
            EventPattern={
                "detail-type": ["AWS API Call via CloudTrail"],
                "detail": {
                    "eventSource": ["acm.amazonaws.com"],
                    "eventName": ["AddTagsToCertificate"],
                    "requestParameters": {
                        "tags": {
                            "key": [certificate_validator_function.title],
                            "value":
                            [GetAtt(certificate_validator_function, "Arn")],
                        }
                    },
                },
            },
            Targets=[
                Target(
                    Id="certificate-validator-lambda",
                    Arn=GetAtt(certificate_validator_function, "Arn"),
                )
            ],
            DependsOn=[certificate_validator_log_group],
            Condition=should_create_certificate,
        ))

    certificate_validator_permission = template.add_resource(
        Permission(
            "CertificateValidatorPermission",
            FunctionName=GetAtt(certificate_validator_function, "Arn"),
            Action="lambda:InvokeFunction",
            Principal="events.amazonaws.com",
            SourceArn=GetAtt(certificate_validator_rule, "Arn"),
            Condition=should_create_certificate,
        ))

    certificate = template.add_resource(
        Certificate(
            "Certificate",
            DomainName=Select(0, Ref(dns_names)),
            SubjectAlternativeNames=Ref(
                dns_names),  # duplicate first name works fine
            ValidationMethod="DNS",
            Tags=Tags(
                **{
                    certificate_validator_function.title:
                    GetAtt(certificate_validator_function, "Arn")
                }),
            DependsOn=[certificate_validator_permission],
            Condition=should_create_certificate,
        ))

    edge_hook_role = template.add_resource(
        Role(
            "EdgeHookRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect="Allow",
                        Principal=Principal(
                            "Service",
                            [
                                "lambda.amazonaws.com",
                                "edgelambda.amazonaws.com"
                            ],
                        ),
                        Action=[sts.AssumeRole],
                    )
                ],
            ),
        ))

    edge_hook_function = template.add_resource(
        Function(
            "EdgeHookFunction",
            Runtime=PYTHON_RUNTIME,
            Handler="index.handler",
            Code=Code(ZipFile=inspect.getsource(edge_hook)),
            MemorySize=128,
            Timeout=3,
            Role=GetAtt(edge_hook_role, "Arn"),
        ))
    edge_hook_function_hash = (hashlib.sha256(
        json.dumps(edge_hook_function.to_dict(),
                   sort_keys=True).encode("utf-8")).hexdigest()[:10].upper())

    edge_hook_version = template.add_resource(
        Version(
            "EdgeHookVersion" + edge_hook_function_hash,
            FunctionName=GetAtt(edge_hook_function, "Arn"),
        ))

    replica_log_group_name = Join(
        "/",
        [
            "/aws/lambda",
            Join(
                ".",
                [
                    FindInMap(partition_config, Partition, "PrimaryRegion"),
                    Ref(edge_hook_function),
                ],
            ),
        ],
    )

    edge_hook_role_policy = template.add_resource(
        PolicyType(
            "EdgeHookRolePolicy",
            PolicyName="write-logs",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[logs.CreateLogStream, logs.PutLogEvents],
                        Resource=[
                            Join(
                                ":",
                                [
                                    "arn",
                                    Partition,
                                    "logs",
                                    "*",
                                    AccountId,
                                    "log-group",
                                    replica_log_group_name,
                                    "log-stream",
                                    "*",
                                ],
                            ),
                        ],
                    ),
                ],
            ),
            Roles=[Ref(edge_hook_role)],
        ))

    stack_set_administration_role = template.add_resource(
        Role(
            "StackSetAdministrationRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal("Service",
                                            "cloudformation.amazonaws.com"),
                        Action=[sts.AssumeRole],
                    ),
                ],
            ),
        ))

    stack_set_execution_role = template.add_resource(
        Role(
            "StackSetExecutionRole",
            AssumeRolePolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Principal=Principal(
                            "AWS", GetAtt(stack_set_administration_role,
                                          "Arn")),
                        Action=[sts.AssumeRole],
                    ),
                ],
            ),
            Policies=[
                PolicyProperty(
                    PolicyName="create-stackset-instances",
                    PolicyDocument=PolicyDocument(
                        Version="2012-10-17",
                        Statement=[
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.DescribeStacks,
                                    logs.DescribeLogGroups,
                                ],
                                Resource=["*"],
                            ),
                            # stack instances communicate with the CFN service via SNS
                            Statement(
                                Effect=Allow,
                                Action=[sns.Publish],
                                NotResource=[
                                    Join(
                                        ":",
                                        [
                                            "arn", Partition, "sns", "*",
                                            AccountId, "*"
                                        ],
                                    )
                                ],
                            ),
                            Statement(
                                Effect=Allow,
                                Action=[
                                    logs.CreateLogGroup,
                                    logs.DeleteLogGroup,
                                    logs.PutRetentionPolicy,
                                    logs.DeleteRetentionPolicy,
                                ],
                                Resource=[
                                    Join(
                                        ":",
                                        [
                                            "arn",
                                            Partition,
                                            "logs",
                                            "*",
                                            AccountId,
                                            "log-group",
                                            replica_log_group_name,
                                            "log-stream",
                                            "",
                                        ],
                                    ),
                                ],
                            ),
                            Statement(
                                Effect=Allow,
                                Action=[
                                    cloudformation.CreateStack,
                                    cloudformation.DeleteStack,
                                    cloudformation.UpdateStack,
                                ],
                                Resource=[
                                    Join(
                                        ":",
                                        [
                                            "arn",
                                            Partition,
                                            "cloudformation",
                                            "*",
                                            AccountId,
                                            Join(
                                                "/",
                                                [
                                                    "stack",
                                                    Join(
                                                        "-",
                                                        [
                                                            "StackSet",
                                                            StackName, "*"
                                                        ],
                                                    ),
                                                ],
                                            ),
                                        ],
                                    )
                                ],
                            ),
                        ],
                    ),
                ),
            ],
        ))

    stack_set_administration_role_policy = template.add_resource(
        PolicyType(
            "StackSetAdministrationRolePolicy",
            PolicyName="assume-execution-role",
            PolicyDocument=PolicyDocument(
                Version="2012-10-17",
                Statement=[
                    Statement(
                        Effect=Allow,
                        Action=[sts.AssumeRole],
                        Resource=[GetAtt(stack_set_execution_role, "Arn")],
                    ),
                ],
            ),
            Roles=[Ref(stack_set_administration_role)],
        ))

    edge_log_groups = template.add_resource(
        StackSet(
            "EdgeLambdaLogGroupStackSet",
            AdministrationRoleARN=GetAtt(stack_set_administration_role, "Arn"),
            ExecutionRoleName=Ref(stack_set_execution_role),
            StackSetName=Join("-", [StackName, "EdgeLambdaLogGroup"]),
            PermissionModel="SELF_MANAGED",
            Description="Multi-region log groups for Lambda@Edge replicas",
            Parameters=[
                StackSetParameter(
                    ParameterKey="LogGroupName",
                    ParameterValue=replica_log_group_name,
                ),
                StackSetParameter(
                    ParameterKey="LogRetentionDays",
                    ParameterValue=Ref(log_retention_days),
                ),
            ],
            OperationPreferences=OperationPreferences(
                FailureToleranceCount=0,
                MaxConcurrentPercentage=100,
            ),
            StackInstancesGroup=[
                StackInstances(
                    DeploymentTargets=DeploymentTargets(Accounts=[AccountId]),
                    Regions=FindInMap(partition_config, Partition,
                                      "DefaultRegions"),
                )
            ],
            TemplateBody=create_log_group_template().to_json(indent=None),
            DependsOn=[stack_set_administration_role_policy],
        ))

    price_class_distribution = template.add_resource(
        Distribution(
            "PriceClassDistribution",
            DistributionConfig=DistributionConfig(
                Comment="Dummy distribution used for price class hack",
                DefaultCacheBehavior=DefaultCacheBehavior(
                    TargetOriginId="default",
                    ViewerProtocolPolicy="allow-all",
                    ForwardedValues=ForwardedValues(QueryString=False),
                ),
                Enabled=True,
                Origins=[
                    Origin(Id="default",
                           DomainName=GetAtt(bucket, "DomainName"))
                ],
                IPV6Enabled=True,
                ViewerCertificate=ViewerCertificate(
                    CloudFrontDefaultCertificate=True),
                PriceClass="PriceClass_All",
            ),
            Condition=using_price_class_hack,
        ))

    distribution = template.add_resource(
        Distribution(
            "ContentDistribution",
            DistributionConfig=DistributionConfig(
                Enabled=True,
                Aliases=If(using_dns_names, Ref(dns_names), NoValue),
                Logging=Logging(Bucket=GetAtt(log_bucket, "DomainName"),
                                Prefix="cloudfront/"),
                DefaultRootObject="index.html",
                Origins=[
                    Origin(
                        Id="default",
                        DomainName=GetAtt(bucket, "DomainName"),
                        S3OriginConfig=S3OriginConfig(
                            OriginAccessIdentity=Join(
                                "",
                                [
                                    "origin-access-identity/cloudfront/",
                                    Ref(origin_access_identity),
                                ],
                            )),
                    )
                ],
                DefaultCacheBehavior=DefaultCacheBehavior(
                    TargetOriginId="default",
                    Compress=True,
                    ForwardedValues=ForwardedValues(QueryString=False),
                    ViewerProtocolPolicy="redirect-to-https",
                    DefaultTTL=Ref(default_ttl_seconds),
                    LambdaFunctionAssociations=[
                        LambdaFunctionAssociation(
                            EventType="origin-request",
                            LambdaFunctionARN=Ref(edge_hook_version),
                        )
                    ],
                ),
                HttpVersion="http2",
                IPV6Enabled=True,
                ViewerCertificate=ViewerCertificate(
                    AcmCertificateArn=If(
                        using_acm_certificate,
                        Ref(acm_certificate_arn),
                        If(using_hosted_zone, Ref(certificate), NoValue),
                    ),
                    SslSupportMethod=If(using_certificate, "sni-only",
                                        NoValue),
                    CloudFrontDefaultCertificate=If(using_certificate, NoValue,
                                                    True),
                    MinimumProtocolVersion=Ref(tls_protocol_version),
                ),
                PriceClass=If(using_price_class_hack, "PriceClass_100",
                              "PriceClass_All"),
            ),
            DependsOn=[
                bucket_policy,
                log_ingester_policy,
                edge_log_groups,
                precondition_region_is_primary,
            ],
        ))

    distribution_log_group = template.add_resource(
        LogGroup(
            "DistributionLogGroup",
            LogGroupName=Join(
                "", ["/aws/cloudfront/", Ref(distribution)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    bucket_log_group = template.add_resource(
        LogGroup(
            "BucketLogGroup",
            LogGroupName=Join("", ["/aws/s3/", Ref(bucket)]),
            RetentionInDays=If(retention_defined, Ref(log_retention_days),
                               NoValue),
        ))

    template.add_output(Output("DistributionId", Value=Ref(distribution)))

    template.add_output(
        Output("DistributionDomain", Value=GetAtt(distribution, "DomainName")))

    template.add_output(
        Output(
            "DistributionDnsTarget",
            Value=If(
                using_price_class_hack,
                GetAtt(price_class_distribution, "DomainName"),
                GetAtt(distribution, "DomainName"),
            ),
        ))

    template.add_output(
        Output(
            "DistributionUrl",
            Value=Join("",
                       ["https://",
                        GetAtt(distribution, "DomainName"), "/"]),
        ))

    template.add_output(Output("ContentBucketArn", Value=GetAtt(bucket,
                                                                "Arn")))

    return template
Beispiel #20
0
 def test_max_resources(self):
     template = Template()
     for i in range(1, 201):
         template.add_resource(Bucket(str(i)))
     with self.assertRaises(ValueError):
         template.add_resource(Bucket(str(201)))
Beispiel #21
0
def createArtifactStoreS3Location():
    return Bucket("ArtifactStoreS3Location", AccessControl="Private")
Beispiel #22
0
s3bucket = t.add_resource(Bucket(
    "S3Bucket",
    # Make public Read
    AccessControl=PublicRead,
    # Turn on Versioning to the whole S3 Bucket
    VersioningConfiguration=VersioningConfiguration(
        Status="Enabled",
    ),
    # Attach a LifeCycle Configuration

    LifecycleConfiguration=LifecycleConfiguration(Rules=[
        # Add a rule to
        LifecycleRule(
            # Rule attributes
            Id="S3BucketRule001",
            Prefix="/only-this-sub-dir",
            Status="Enabled",
            # Applies to current objects
            ExpirationInDays=3650,
            Transitions=[
                LifecycleRuleTransition(
                    StorageClass="STANDARD_IA",
                    TransitionInDays=60,
                ),
            ],
            # Applies to Non Current objects
            NoncurrentVersionExpirationInDays=365,
            NoncurrentVersionTransitions=[
                NoncurrentVersionTransition(
                    StorageClass="STANDARD_IA",
                    TransitionInDays=30,
                ),
                NoncurrentVersionTransition(
                    StorageClass="GLACIER",
                    TransitionInDays=120,
                ),
            ],
        ),
    ]),

))
Beispiel #23
0
                               "dynamodb:UpdateItem",
                               "dynamodb:Scan",
                               "dynamodb:BatchWriteItem",
                           ],
                           "Resource": [GetAtt(dev_db, "Arn")]
                       },
                       {
                           "Effect": "Allow",
                           "Action": "dynamodb:ListTables",
                           "Resource": "*",
                       },
                   ],
               }))

s3_bucket = t.add_resource(
    Bucket("CkanTestStatus", BucketName="ckan-test-status"))

t.add_output(
    Output("TestCkanStatus",
           Value=Ref(s3_bucket),
           Description="Name of S3 bucket to hold test status file"))

t.add_resource(
    PolicyType(
        "S3TestBucket",
        PolicyName="S3TestBucketAccess",
        Groups=[Ref(queue_dev_group)],
        PolicyDocument={
            "Version":
            "2012-10-17",
            "Statement": [
    Ref, 
    Template 
) 

from troposphere.s3 import Bucket 

from troposphere.s3 import BucketPolicy 

from troposphere.cloudtrail import Trail

t = Template() 
 
t.add_description("Effective DevOps in AWS: Turn on CloudTrail and log to S3") 

t.add_resource(Bucket( 
    "S3Bucket", 
    DeletionPolicy="Retain" 
)) 

t.add_resource(BucketPolicy( 
    "BucketPolicy", 
    Bucket=Ref("S3Bucket"), 
    PolicyDocument={ 
        "Statement": [{ 
            "Action": "s3:GetBucketAcl", 
            "Effect": "Allow", 
            "Principal": { 
                "Service": "cloudtrail.amazonaws.com" 
            }, 
            "Resource": Join("", [ 
                "arn:aws:s3:::", Ref("S3Bucket") 
            ]) 
Beispiel #25
0
                    "PUT",
                    "HEAD",
                    "GET",
                ],
                AllowedHeaders=[
                    "*",
                ],
            )
        ], ),
)

# Create an S3 bucket that holds statics and media
assets_bucket = template.add_resource(
    Bucket(
        "AssetsBucket",
        AccessControl=PublicRead,
        **common_bucket_conf,
    ))

# Output S3 asset bucket name
template.add_output(
    Output("AssetsBucketDomainName",
           Description="Assets bucket domain name",
           Value=GetAtt(assets_bucket, "DomainName")))

# Create an S3 bucket that holds user uploads or other non-public files
private_assets_bucket = template.add_resource(
    Bucket(
        "PrivateAssetsBucket",
        AccessControl=Private,
        **common_bucket_conf,
Beispiel #26
0
 def test_bucket_accesscontrol_bad_string(self):
     with self.assertRaises(ValueError):
         Bucket('b', AccessControl='FooBar').validate()
Beispiel #27
0
PROJECT_NAME = "TrainingsHomepageProject"
BUILD_NAME = "TrainingsHomepageBuild"
ARTIFACT_BUCKET = "buildartifactsbucket"
WEBSITE_BUCKET = "awstrainingsbytecracer"
GIT_REPO = "TrainingsHomepage"
BRANCH_NAME = "master"
IAM_VERSION = "2012-10-17"
CFN_VERSION = "2010-09-09"

t = Template()
t.add_version(CFN_VERSION)

# Generate the bucket for storing build artifacts
WebsiteBucket = t.add_resource(
    Bucket(WEBSITE_BUCKET,
           AccessControl="PublicRead",
           WebsiteConfiguration=WebsiteConfiguration(
               IndexDocument="index.html", ErrorDocument="404.html")))

# Generate the bucketpolicy to allow user to access your static website
BucketPolicyStaticWebsite = t.add_resource(
    BucketPolicy(
        "BucketPolicyStaticWebsite",
        PolicyDocument={
            "Version":
            IAM_VERSION,
            "Statement": [{
                "Sid":
                "ReadOnly",
                "Effect":
                "Allow",
                "Principal":
Beispiel #28
0
 def test_bucket_accesscontrol_ref(self):
     bucket_acl = Parameter('acl', Type='String', Default='Private')
     Bucket('b', AccessControl=Ref(bucket_acl)).validate()
Beispiel #29
0
def create_primary_template():
    template = Template(
        Description="Root stack for VERY STRONG Lambda function")

    image_digest = template.add_parameter(
        Parameter("ImageDigest", Type="String", Default=""))

    is_image_digest_defined = "IsImageDigestDefined"
    template.add_condition(is_image_digest_defined,
                           Not(Equals(Ref(image_digest), "")))

    artifact_repository = template.add_resource(
        Repository(
            "ArtifactRepository",
            ImageTagMutability="MUTABLE",
            LifecyclePolicy=LifecyclePolicy(LifecyclePolicyText=json.dumps(
                {
                    "rules": [{
                        "rulePriority": 1,
                        "selection": {
                            "tagStatus": "untagged",
                            "countType": "imageCountMoreThan",
                            "countNumber": 3,
                        },
                        "action": {
                            "type": "expire",
                        },
                    }]
                },
                indent=None,
                sort_keys=True,
                separators=(",", ":"),
            )),
        ))

    artifact_repository_url = Join(
        "/",
        [
            Join(
                ".",
                [
                    AccountId,
                    "dkr",
                    "ecr",
                    Region,
                    URLSuffix,
                ],
            ),
            Ref(artifact_repository),
        ],
    )
    image_uri = Join("@", [artifact_repository_url, Ref(image_digest)])

    artifact_bucket = template.add_resource(
        Bucket(
            "ArtifactBucket",
            BucketEncryption=BucketEncryption(
                ServerSideEncryptionConfiguration=[
                    ServerSideEncryptionRule(
                        BucketKeyEnabled=True,
                        ServerSideEncryptionByDefault=
                        ServerSideEncryptionByDefault(
                            SSEAlgorithm="aws:kms",
                            KMSMasterKeyID=Join(":", [
                                "arn", Partition, "kms", Region, AccountId,
                                "alias/aws/s3"
                            ]),
                        ),
                    )
                ], ),
            LifecycleConfiguration=LifecycleConfiguration(Rules=[
                LifecycleRule(
                    AbortIncompleteMultipartUpload=
                    AbortIncompleteMultipartUpload(DaysAfterInitiation=3, ),
                    Status="Enabled",
                ),
            ], ),
            PublicAccessBlockConfiguration=PublicAccessBlockConfiguration(
                BlockPublicAcls=True,
                BlockPublicPolicy=True,
                IgnorePublicAcls=True,
                RestrictPublicBuckets=True,
            ),
        ))

    deployment_id_stack = template.add_resource(
        Stack(
            "DeploymentId",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), deployment_id.create_template()),
            Parameters={
                "ArtifactBucket": Ref(artifact_bucket),
            },
            Condition=is_image_digest_defined,
        ))

    availability_zones_stack = template.add_resource(
        Stack(
            "AvailabilityZones",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), availability_zones.create_template()),
            Parameters={
                "DeploymentId": GetAtt(deployment_id_stack, "Outputs.Value"),
                "ImageUri": image_uri,
            },
            Condition=is_image_digest_defined,
        ))

    vpc_stack = template.add_resource(
        Stack(
            "Vpc",
            TemplateURL=common.get_template_s3_url(Ref(artifact_bucket),
                                                   vpc.create_template()),
            Parameters={
                "AvailabilityZones":
                GetAtt(availability_zones_stack, "Outputs.AvailabilityZones"),
            },
            Condition=is_image_digest_defined,
        ))

    lambda_eip_allocator_stack = template.add_resource(
        Stack(
            "LambdaEipAllocator",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), lambda_eip_allocator.create_template()),
            Parameters={
                "DeploymentId": GetAtt(deployment_id_stack, "Outputs.Value"),
                "VpcId": GetAtt(vpc_stack, "Outputs.VpcId"),
                "ImageUri": image_uri,
            },
            Condition=is_image_digest_defined,
        ))

    elastic_file_system_stack = template.add_resource(
        Stack(
            "ElasticFileSystem",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), elastic_file_system.create_template()),
            Parameters={
                "VpcId":
                GetAtt(vpc_stack, "Outputs.VpcId"),
                "SubnetIds":
                GetAtt(vpc_stack, "Outputs.SubnetIds"),
                "AvailabilityZones":
                GetAtt(availability_zones_stack, "Outputs.AvailabilityZones"),
            },
            Condition=is_image_digest_defined,
        ))

    lambda_function_stack = template.add_resource(
        Stack(
            "LambdaFunction",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), lambda_function.create_template()),
            Parameters={
                "DeploymentId":
                GetAtt(deployment_id_stack, "Outputs.Value"),
                "VpcId":
                GetAtt(vpc_stack, "Outputs.VpcId"),
                "SubnetIds":
                GetAtt(vpc_stack, "Outputs.SubnetIds"),
                "FileSystemAccessPointArn":
                GetAtt(elastic_file_system_stack, "Outputs.AccessPointArn"),
                "ImageUri":
                image_uri,
            },
            DependsOn=[lambda_eip_allocator_stack],
            Condition=is_image_digest_defined,
        ))

    image_tagger_stack = template.add_resource(
        Stack(
            "ImageTagger",
            TemplateURL=common.get_template_s3_url(
                Ref(artifact_bucket), image_tagger.create_template()),
            Parameters={
                "DeploymentId": GetAtt(deployment_id_stack, "Outputs.Value"),
                "ArtifactRepository": Ref(artifact_repository),
                "DesiredImageTag": "current-cloudformation",
                "ImageDigest": Ref(image_digest),
                "ImageUri": image_uri,
            },
            DependsOn=list(template.resources),
            Condition=is_image_digest_defined,
        ))

    template.add_output(Output(
        "ArtifactBucket",
        Value=Ref(artifact_bucket),
    ))

    template.add_output(
        Output(
            "ArtifactRepositoryUrl",
            Value=artifact_repository_url,
        ))

    return template
Beispiel #30
0
    CreateLifeCycleRules("Christina", "/Christina/Pics", "3650", "Enabled"))
lifeCycleRules.append(
    CreateLifeCycleRules("Jorge", "/Jorge/Pics", "3650", "Enabled"))
lifeCycleRules.append(
    CreateLifeCycleRules("Hazel", "/Hazel/Pics", "3650", "Enabled"))

# miscTransitions = []
# lifeCycleRules.append(CreateLifeCycleRules("Misc", "/Misc/Pics", "3650", "Enabled", miscTransitions))
# miscTransitions.append(CreateLifecycleRuleTransition("STANDARD_IA", "90"))
# miscTransitions.append(CreateLifecycleRuleTransition("GLACIER", "365"))

lifeCycleConfiguration = s3.LifecycleConfiguration(Rules=lifeCycleRules)

template.add_resource(
    Bucket("JHSBucket",
           BucketName=Ref(bucketName),
           LifecycleConfiguration=lifeCycleConfiguration))

#Adding Output By Passing Object
myOutput = Output("JHSBucketName")
myOutput.Description = "This is the name of personal bucket"
myOutput.Value = Ref(bucketName)
template.add_output(myOutput)

#Adding Output Directly
template.add_output(
    Output("DirectOutput",
           Description="This is an alternative form of adding outputs",
           Value=Join(
               "",
               ["BucketName is ", Ref(bucketName), " On S3"])))
Beispiel #31
0
 def test_depends_on_helper_with_resource(self):
     resource_name = "Bucket1"
     b1 = Bucket(resource_name)
     self.assertEqual(depends_on_helper(b1), resource_name)