Example #1
0
def test_bucket(stack: Stack) -> None:
    """Test bucket creation."""
    stack.s3_bucket = "cfn_bucket"
    stack.s3_key = "templates/"

    topic_test = Topic(name="test-topic")
    queue_test = Queue(name="test-queue")
    lambda_test = Py38Function(
        name="mypylambda",
        description="this is a test",
        role="somearn",
        code_dir="my_code_dir",
        handler="app.main",
    )

    stack.add(topic_test)
    stack.add(lambda_test)
    stack.add(queue_test)

    bucket = Bucket(name="test-bucket")
    bucket.add_notification_configuration(event="s3:ObjectCreated:*",
                                          target=topic_test,
                                          permission_suffix="TpUpload")
    bucket.add_notification_configuration(event="s3:ObjectCreated:*",
                                          target=lambda_test,
                                          permission_suffix="TpUpload")
    bucket.add_notification_configuration(event="s3:ObjectCreated:*",
                                          target=queue_test,
                                          permission_suffix="FileEvent")
    stack.add(bucket)

    with open(os.path.join(TEST_DIR, "bucket.json")) as fd:
        expected_template = json.load(fd)

    assert stack.export()["Resources"] == expected_template
Example #2
0
def test_bucket(stack: Stack) -> None:
    """Test bucket creation.

    Note that a bucket policy is also created when a Bucket is instanciated
    """
    stack.add(Bucket(name="test-bucket"))
    assert stack.export()["Resources"] == EXPECTED_BUCKET
Example #3
0
    def __init__(
        self,
        name: str,
        aliases: list[str],
        bucket_name: str,
        certificate_arn: str,
        default_ttl: int = 86400,
        lambda_edge_function_arns: Optional[list[str]] = None,
        root_object: str = "index.html",
        r53_route_from: Optional[list[Tuple[str, str]]] = None,
    ):
        """Initialize a S3WebsiteCFDistribution.

        :param name: function name
        :param aliases: CNAMEs (alternate domain names), if any, for this
            distribution.
        :param bucket_name: name of the bucket to create to host the website
        :param certificate_arn: Amazon Resource Name (ARN) of the ACM
            certificate for aliases. Cloudfront only supports ceritificates
            stored in us-east-1.
        :param default_ttl: The default amount of time, in seconds, that you
            want objects to stay in the CloudFront cache before CloudFront sends
            another request to the origin to see if the object has been updated
        :param lambda_edge_function_arns: ARNs of Lambda@Edge functions to
            associate with the cloudfront distribution default cache behaviour
        :param root_object: The object that you want CloudFront to request from
            your origin
        :param r53_route_from: list of (hosted_zone_id, domain_id) for which to
            create route53 records
        """
        self.name = name
        self.aliases = aliases
        self.bucket = Bucket(name=bucket_name)
        self.certificate_arn = certificate_arn
        self.default_ttl = default_ttl
        self.lambda_edge_function_arns = lambda_edge_function_arns
        self.root_object = root_object
        self.r53_route_from = r53_route_from
        self._origin_access_identity = None
        self._cache_policy = None
Example #4
0
def test_bucket_multi_encryption(stack: Stack) -> None:
    """Test bucket accepting multiple types of encryptions and without default."""
    bucket = Bucket(
        name="test-bucket",
        default_bucket_encryption=None,
        authorized_encryptions=[
            EncryptionAlgorithm.AES256, EncryptionAlgorithm.KMS
        ],
    )
    stack.add(bucket)

    with open(os.path.join(TEST_DIR, "bucket_multi_encryption.json")) as fd:
        expected_template = json.load(fd)

    assert stack.export()["Resources"] == expected_template
Example #5
0
    def resources(self, stack: Stack) -> List[AWSObject]:
        """Build and return objects associated with the configuration recorder.

        Return a configuration recorder and a delivery channel with its s3 bucket
        """
        aws_objects = []

        config_role = iam.ServiceLinkedRole.from_dict(
            "AWSServiceRoleForConfig",
            {"AWSServiceName": "config.amazonaws.com"})
        aws_objects.append(config_role)

        # Add the config recorder
        recording_group = config.RecordingGroup(
            AllSupported=True, IncludeGlobalResourceTypes=True)

        aws_objects.append(
            config.ConfigurationRecorder(
                name_to_id("ConfigRecorder"),
                Name="ConfigRecorder",
                RecordingGroup=recording_group,
                RoleARN=Join(
                    ":",
                    [
                        "arn",
                        "aws",
                        "iam:",
                        AccountId,
                        ("role/aws-service-role/"
                         "config.amazonaws.com/AWSServiceRoleForConfig"),
                    ],
                ),
                DependsOn=config_role.title,
            ))

        # Create an S3 bucket for the delivery
        bucket = Bucket(name=self.bucket_name)
        bucket.policy_statements += [
            PolicyStatement(
                action="s3:GetBucketAcl",
                effect="Allow",
                principal={"Service": "config.amazonaws.com"},
                resource=bucket.arn,
            ),
            PolicyStatement(
                action="s3:PutObject",
                effect="Allow",
                condition={
                    "StringEquals": {
                        "s3:x-amz-acl": "bucket-owner-full-control"
                    }
                },
                principal={"Service": "config.amazonaws.com"},
                resource=Join(
                    "", [bucket.arn, "/AWSLogs/", AccountId, "/Config/*"]),
            ),
        ]

        aws_objects.extend(bucket.resources(stack=stack))

        # Create the delivery channel to the S3 bucket
        aws_objects.append(
            config.DeliveryChannel(
                name_to_id("DeliveryChannel"),
                Name="DeliveryChannel",
                S3BucketName=bucket.ref,
            ))

        return aws_objects
Example #6
0
def build_and_deploy_tstacks() -> None:
    """Build and deploy two simple troposphere stacks.

    Two stacks in two different regions are deployed. An us stack define only a secure
    bucket. An eu stack define secure s3 buckets, a role to add object to the eu bucket
    and a AWSConfig recorder with rules that check s3 buckets security configurations
    across both regions.
    """
    sessions = {
        "eu": Session(regions=["eu-west-1"]),
        "us": Session(regions=["us-east-1"]),
    }
    stack = {}
    for region in ("eu", "us"):
        stack[region] = Stack(
            f"e3-example-{region}",
            sessions[region],
            opts={"Capabilities": ["CAPABILITY_NAMED_IAM"]},
        )

    # Add a s3 secure bucket in each region
    stack["eu"].add_construct([Bucket(name="e3-l1-example")])
    stack["us"].add_construct([Bucket(name="e3-l2-example")])

    # Define a new IAM-Roles that will be used to acces e3-l1-example bucket
    stack["eu"].add_construct(
        [
            Role(
                name="L1WriteRole",
                description="Role to write to l1 buckets",
                principal={"Service": "ecs-tasks.amazonaws.com"},
            )
        ]
    )

    # Define a new IAM-Policy to putObject in e3-l1-example bucket
    # and attach the L1WriteRole role to it
    stack["eu"].add_construct(
        [
            S3AccessManagedPolicy(
                name="S3WriteAccess",
                buckets=["e3-l1-example"],
                action=["s3:PutObject"],
                roles=[Ref(stack["eu"]["L1WriteRole"])],
            )
        ]
    )

    # Add AWS config rules to check S3 buckets security configuration.
    # This should only be defined in one region
    for region in ("eu",):
        stack[region].add_construct(
            [ConfigurationRecorder(bucket_name="config-bucket-example")]
        )

    for region in ("eu",):
        stack[region].add_construct(
            [
                S3BucketPublicWriteProhibited,
                S3BucketPublicReadProhibited,
                S3BucketServerSideEncryptionEnabled,
                S3BucketSSLRequestsOnly,
                IAMUserNoPoliciesCheck,
            ]
        )

    # Deploy stacks
    for region in ("eu", "us"):
        stack[region].deploy()
Example #7
0
class S3WebsiteDistribution(Construct):
    """Set a Cloudfront distribution in front of a website hosted in S3.

    It also provides a lambda invalidating cloudfront cache when s3 objects
    are updated.
    """
    def __init__(
        self,
        name: str,
        aliases: list[str],
        bucket_name: str,
        certificate_arn: str,
        default_ttl: int = 86400,
        lambda_edge_function_arns: Optional[list[str]] = None,
        root_object: str = "index.html",
        r53_route_from: Optional[list[Tuple[str, str]]] = None,
    ):
        """Initialize a S3WebsiteCFDistribution.

        :param name: function name
        :param aliases: CNAMEs (alternate domain names), if any, for this
            distribution.
        :param bucket_name: name of the bucket to create to host the website
        :param certificate_arn: Amazon Resource Name (ARN) of the ACM
            certificate for aliases. Cloudfront only supports ceritificates
            stored in us-east-1.
        :param default_ttl: The default amount of time, in seconds, that you
            want objects to stay in the CloudFront cache before CloudFront sends
            another request to the origin to see if the object has been updated
        :param lambda_edge_function_arns: ARNs of Lambda@Edge functions to
            associate with the cloudfront distribution default cache behaviour
        :param root_object: The object that you want CloudFront to request from
            your origin
        :param r53_route_from: list of (hosted_zone_id, domain_id) for which to
            create route53 records
        """
        self.name = name
        self.aliases = aliases
        self.bucket = Bucket(name=bucket_name)
        self.certificate_arn = certificate_arn
        self.default_ttl = default_ttl
        self.lambda_edge_function_arns = lambda_edge_function_arns
        self.root_object = root_object
        self.r53_route_from = r53_route_from
        self._origin_access_identity = None
        self._cache_policy = None

    def add_oai_access_to_bucket(self) -> None:
        """Add policy granting cloudfront OAI read access to the bucket."""
        cf_principal = {
            "CanonicalUser":
            GetAtt(self.origin_access_identity, "S3CanonicalUserId")
        }
        self.bucket.policy_statements.extend([
            Allow(
                action="s3:GetObject",
                resource=self.bucket.all_objects_arn,
                principal=cf_principal,
            ),
            Allow(
                action="s3:ListBucket",
                resource=self.bucket.arn,
                principal=cf_principal,
            ),
        ])

    @property
    def cache_policy(self) -> cloudfront.CachePolicy:
        """Return cloudfront distribution cache policy."""
        if self._cache_policy is None:
            forwarded_to_origin = cloudfront.ParametersInCacheKeyAndForwardedToOrigin(
                CookiesConfig=cloudfront.CacheCookiesConfig(
                    CookieBehavior="none"),
                EnableAcceptEncodingBrotli="true",
                EnableAcceptEncodingGzip="true",
                HeadersConfig=cloudfront.CacheHeadersConfig(
                    HeaderBehavior="none"),
                QueryStringsConfig=cloudfront.CacheQueryStringsConfig(
                    QueryStringBehavior="none"),
            )
            self._cache_policy = cloudfront.CachePolicy(
                name_to_id(f"{self.name}-cloudfront-cache-policy"),
                CachePolicyConfig=cloudfront.CachePolicyConfig(
                    Comment=f"{self.name} s3 website cloudfront cache policy",
                    DefaultTTL=self.default_ttl,
                    MaxTTL=31536000,
                    MinTTL=1,
                    Name="s3-cache-policy",
                    ParametersInCacheKeyAndForwardedToOrigin=
                    forwarded_to_origin,
                ),
            )
        return self._cache_policy

    @property
    def distribution(self) -> cloudfront.Distribution:
        """Return cloudfront distribution with bucket as origin."""
        origin = cloudfront.Origin(
            S3OriginConfig=cloudfront.S3OriginConfig(OriginAccessIdentity=Join(
                "",
                [
                    "origin-access-identity/cloudfront/",
                    Ref(self.origin_access_identity),
                ],
            )),
            DomainName=f"{self.bucket.name}.s3.amazonaws.com",
            Id="S3Origin",
        )
        cache_params = {
            "AllowedMethods": ["GET", "HEAD", "OPTIONS"],
            "CachePolicyId": Ref(self.cache_policy),
            "TargetOriginId": "S3Origin",
            "ViewerProtocolPolicy": "redirect-to-https",
        }
        if self.lambda_edge_function_arns:
            cache_params["LambdaFunctionAssociations"] = [
                cloudfront.LambdaFunctionAssociation(
                    EventType="viewer-request", LambdaFunctionARN=lambda_arn)
                for lambda_arn in self.lambda_edge_function_arns
            ]

        default_cache_behavior = cloudfront.DefaultCacheBehavior(
            **cache_params)
        return cloudfront.Distribution(
            name_to_id(self.name),
            DistributionConfig=cloudfront.DistributionConfig(
                Aliases=self.aliases,
                DefaultRootObject=self.root_object,
                DefaultCacheBehavior=default_cache_behavior,
                Enabled="True",
                HttpVersion="http2",
                Origins=[origin],
                ViewerCertificate=cloudfront.ViewerCertificate(
                    AcmCertificateArn=self.certificate_arn,
                    SslSupportMethod="sni-only"),
            ),
        )

    @property
    def origin_access_identity(
            self) -> cloudfront.CloudFrontOriginAccessIdentity:
        """Return cloudformation access identity.

        It is needed to be used as principal for s3 bucket access policy.
        """
        if self._origin_access_identity is None:
            cf_oai_config = cloudfront.CloudFrontOriginAccessIdentityConfig(
                Comment=f"{self.name} Cloudfront origin access identity")
            self._origin_access_identity = cloudfront.CloudFrontOriginAccessIdentity(
                name_to_id(f"{self.name}-cloudfront-oai"),
                CloudFrontOriginAccessIdentityConfig=cf_oai_config,
            )
        return self._origin_access_identity

    def add_cache_invalidation(self, stack: Stack) -> list[AWSObject]:
        """Return resources invalidating cache when objects are pushed to s3.

        A lambda is called at each s3 object update to invalidate cloudformation
        cache for the updated object.
        """
        lambda_name = f"{self.name}-cache-invalidation-lambda"
        lambda_policy = ManagedPolicy(
            name=f"{lambda_name}-policy",
            description=f"managed policy used by {lambda_name}",
            path=f"/{stack.name}/",
            statements=[
                Allow(
                    action=["cloudfront:CreateInvalidation"],
                    resource=Join(
                        "",
                        [
                            "arn:aws:cloudfront::", AccountId,
                            ":distribution ", self.id
                        ],
                    ),
                )
            ],
        )
        lambda_role = Role(
            name=f"{lambda_name}-role",
            description=f"role assumed by {lambda_name}",
            path=f"/{stack.name}/",
            trust=Trust(services=["lambda"]),
            managed_policy_arns=[lambda_policy.arn],
        )

        # Get first part of invalidation lambda code from a file
        with open(
                os.path.join(
                    os.path.dirname(os.path.abspath(__file__)),
                    "data",
                    "lambda_invalidate_head.py",
                )) as lf:
            lambda_code = lf.read().splitlines()

        # Complete it with the part depending on the distribution id
        lambda_code.extend([
            "    client.create_invalidation(",
            Sub(
                "        DistributionId='${distribution_id}',",
                distribution_id=self.id,
            ),
            "        InvalidationBatch={",
            "            'Paths': {'Quantity': 1, 'Items': path},",
            "            'CallerReference': str(time.time()),",
            "        },",
            "    )",
        ])
        lambda_function = Function(
            name_to_id(lambda_name),
            description=(f"lambda invalidating cloudfront cache when "
                         f"{self.bucket.name} objects are updated"),
            handler="invalidate.handler",
            role=lambda_role,
            code_zipfile=Join("\n", lambda_code),
            runtime="python3.9",
        )

        sns_topic = Topic(name=f"{self.name}-invalidation-topic")
        sns_topic.add_lambda_subscription(
            function=lambda_function,
            delivery_policy={"throttlePolicy": {
                "maxReceivesPerSecond": 10
            }},
        )
        # Trigger the invalidation when a file is updated
        self.bucket.add_notification_configuration(event="s3:ObjectCreated:*",
                                                   target=sns_topic,
                                                   permission_suffix=self.name)

        result = [
            resource for construct in (lambda_policy, lambda_role,
                                       lambda_function, sns_topic)
            for resource in construct.resources(stack)
        ]
        return result

    @property
    def domain_name(self) -> GetAtt:
        """Return cloudfront distribution domain name."""
        return GetAtt(name_to_id(self.name), "DomainName")

    @property
    def id(self) -> Ref:
        """Return cloudfront distribution id."""
        return Ref(name_to_id(self.name))

    def resources(self, stack: Stack) -> list[AWSObject]:
        """Return list of AWSObject associated with the construct."""
        # Add bucket policy granting read access to te cloudfront distribution
        self.add_oai_access_to_bucket()

        result = [
            *self.bucket.resources(stack),
            self.cache_policy,
            self.distribution,
            self.origin_access_identity,
        ]

        # Add a lambda invalidating cloudfront cache when bucket objects are modified
        result.extend(self.add_cache_invalidation(stack))

        # Add route53 records if needed
        if self.r53_route_from:
            for zone_id, domain in self.r53_route_from:
                result.append(
                    route53.RecordSetType(
                        name_to_id(f"{self.name}-{domain}-r53-rset"),
                        AliasTarget=route53.AliasTarget(
                            DNSName=self.domain_name,
                            # Z2FDTNDATAQYW2 is always the hosted zone ID when you
                            # create an alias record that routes traffic to a
                            # CloudFront distribution
                            HostedZoneId="Z2FDTNDATAQYW2",
                        ),
                        Name=domain,
                        HostedZoneId=zone_id,
                        Type="A",
                    ))
        return result
Example #8
0
def test_add_and_get_item() -> None:
    """Test adding a construct and retrieving an AWSObject from a stack."""
    stack = Stack("test-stack", "this is a test stack")
    stack.add(Bucket("my-bucket"))
    my_bucket = stack["my-bucket"]
    assert my_bucket