Example #1
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create the Lambda function to receive the request
        # The source code is in './src' directory
        lambda_fn = lambda_.Function(
            self, "MyFunction",
            runtime=lambda_.Runtime.PYTHON_3_9,
            handler="index.handler",
            code=lambda_.Code.from_asset(os.path.join(DIRNAME, "src")),
            environment={
                "env_var1": "value 1",
                "env_var2": "value 2",
            }
        )

        # Create the HTTP API with CORS
        http_api = _apigw.HttpApi(
            self, "MyHttpApi",
            cors_preflight=_apigw.CorsPreflightOptions(
                allow_methods=[_apigw.CorsHttpMethod.GET],
                allow_origins=["*"],
                max_age=Duration.days(10),
            )
        )

        # Add a route to GET /
        http_api.add_routes(
            path="/",
            methods=[_apigw.HttpMethod.GET],
            integration=_integrations.HttpLambdaIntegration("LambdaProxyIntegration", handler=lambda_fn),
        )

        # Outputs
        CfnOutput(self, "API Endpoint", description="API Endpoint", value=http_api.api_endpoint)
 def _setup_mysql_serverless(self) -> None:
     port = 3306
     database = "test"
     schema = "test"
     aurora_mysql = rds.ServerlessCluster(
         self,
         "aws-data-wrangler-aurora-cluster-mysql-serverless",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_mysql(
             version=rds.AuroraMysqlEngineVersion.VER_5_7_12,
         ),
         cluster_identifier="mysql-serverless-cluster-wrangler",
         default_database_name=database,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         scaling=rds.ServerlessScalingOptions(
             auto_pause=Duration.minutes(5),
             min_capacity=rds.AuroraCapacityUnit.ACU_1,
             max_capacity=rds.AuroraCapacityUnit.ACU_1,
         ),
         backup_retention=Duration.days(1),
         vpc=self.vpc,
         vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT),
         subnet_group=self.rds_subnet_group,
         security_groups=[self.db_security_group],
         enable_data_api=True,
     )
     secret = secrets.Secret(
         self,
         "aws-data-wrangler-mysql-serverless-secret",
         secret_name="aws-data-wrangler/mysql-serverless",
         description="MySQL serverless credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "mysql",
                     "host": aurora_mysql.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_mysql.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "MysqlServerlessSecretArn", value=secret.secret_arn)
     CfnOutput(self, "MysqlServerlessClusterArn", value=aurora_mysql.cluster_arn)
     CfnOutput(self, "MysqlServerlessAddress", value=aurora_mysql.cluster_endpoint.hostname)
     CfnOutput(self, "MysqlServerlessPort", value=str(port))
     CfnOutput(self, "MysqlServerlessDatabase", value=database)
     CfnOutput(self, "MysqlServerlessSchema", value=schema)
Example #3
0
    def __init__(self, scope: Construct, id: str, vpc, asg_security_groups,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Ceate Aurora Cluster with 2 instances with CDK High Level API
        # Secrets Manager auto generate and keep the password, don't put password in cdk code directly
        # db_Aurora_cluster = rds.DatabaseCluster(self, "MyAurora",
        #                                         default_database_name="MyAurora",
        #                                         engine=rds.DatabaseClusterEngine.arora_mysql(
        #                                             version=rds.AuroraMysqlEngineVersion.VER_5_7_12
        #                                         )
        #                                         instance_props=rds.InstanceProps(
        #                                             vpc=vpc,
        #                                             vpc_subnets=ec2.SubnetSelection(subnet_group_name="DB"),
        #                                             instance_type=ec2.InstanceType(instance_type_identifier="t2.small")
        #                                         ),
        #                                         instances=2,
        #                                         parameter_group=rds.ClusterParameterGroup.from_parameter_group_name(
        #                                             self, "para-group-aurora",
        #                                             parameter_group_name="default.aurora-mysql5.7"
        #                                         ),
        #                                         )
        # for asg_sg in asg_security_groups:
        #     db_Aurora_cluster.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access Aurora")

        # Alternatively, create MySQL RDS with CDK High Level API
        db_mysql_easy = rds.DatabaseInstance(
            self,
            "MySQL_DB_easy",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_30),
            instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                              ec2.InstanceSize.SMALL),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_group_name="DB"),
            multi_az=True,
            allocated_storage=100,
            storage_type=rds.StorageType.GP2,
            cloudwatch_logs_exports=["audit", "error", "general", "slowquery"],
            deletion_protection=False,
            delete_automated_backups=False,
            backup_retention=Duration.days(7),
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self,
                "para-group-mysql",
                parameter_group_name="default.mysql5.7"))
        for asg_sg in asg_security_groups:
            db_mysql_easy.connections.allow_default_port_from(
                asg_sg, "EC2 Autoscaling Group access MySQL")
Example #4
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code from zhxinyua to create VPC, s3_endpoint, bastion, EC2, EBS, Cloudwatch event rule stop EC2, Backup for EC2

        # create a new VPC
        vpc_new = aws_ec2.Vpc(self, "VpcFromCDK", cidr="10.0.0.0/16")
        vpc_new.add_gateway_endpoint(
            "S3Endpoint",
            service=aws_ec2.GatewayVpcEndpointAwsService.S3,
            # Add only to ISOLATED subnets
            subnets=[
                aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.PUBLIC)
            ])

        # only allow a specific rang of IP to conncet bastion
        # BastionHostLinux support two way to connect, one is SSM, second is EC2 Instance Connect
        # EC2 Instance Connect are not supportd in CN
        host_bastion = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc_new,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))

        # write your own IP rang to access this bastion instead of 1.2.3.4/32
        host_bastion.allow_ssh_access_from(aws_ec2.Peer.ipv4("1.2.3.4/32"))

        # use amazon linux as OS
        amzn_linux = aws_ec2.MachineImage.latest_amazon_linux(
            generation=aws_ec2.AmazonLinuxGeneration.AMAZON_LINUX,
            edition=aws_ec2.AmazonLinuxEdition.STANDARD,
            virtualization=aws_ec2.AmazonLinuxVirt.HVM,
            storage=aws_ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # secure group
        my_security_group = aws_ec2.SecurityGroup(
            self,
            "SecurityGroup",
            vpc=vpc_new,
            description="SecurityGroup from CDK",
            security_group_name="CDK SecurityGroup",
            allow_all_outbound=True,
        )

        my_security_group.add_ingress_rule(aws_ec2.Peer.ipv4('10.0.0.0/16'),
                                           aws_ec2.Port.tcp(22),
                                           "allow ssh access from the VPC")

        # set up an web instance in public subnet
        work_server = aws_ec2.Instance(
            self,
            "WebInstance",
            instance_type=aws_ec2.InstanceType("Write a EC2 instance type"),
            machine_image=amzn_linux,
            vpc=vpc_new,
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC),
            security_group=my_security_group,
            key_name="Your SSH key pair name")

        # allow web connect
        work_server.connections.allow_from_any_ipv4(aws_ec2.Port.tcp(80),
                                                    "allow http from world")
        work_server.connections.allow_from_any_ipv4(aws_ec2.Port.tcp(443),
                                                    "allow https from world")

        # set a second ebs to web instance
        work_server.instance.add_property_override(
            "BlockDeviceMappings", [{
                "DeviceName": "/dev/sdb",
                "Ebs": {
                    "VolumeSize": "30",
                    "VolumeType": "gp2",
                    "DeleteOnTermination": "true"
                }
            }])

        # Cloudwatch event rule to stop instances every day in 15:00 UTC
        # they only use javascript SDK to call AWS API
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_events_targets/AwsApi.html
        stop_EC2 = AwsApi(
            service="EC2",
            action="stopInstances",
            parameters={
                "InstanceIds":
                [work_server.instance_id, host_bastion.instance_id]
            })

        Rule(self,
             "ScheduleRule",
             schedule=Schedule.cron(minute="0", hour="15"),
             targets=[stop_EC2])

        # AWS backup part
        # create a BackupVault
        vault = backup.BackupVault(self,
                                   "BackupVault",
                                   backup_vault_name="CDK_Backup_Vault")

        # create a BackupPlan
        plan = backup.BackupPlan(self,
                                 "AWS-Backup-Plan",
                                 backup_plan_name="CDK_Backup")

        # add buackup resources with two way for two resources
        plan.add_selection(
            "Selection",
            resources=[
                backup.BackupResource.from_ec2_instance(work_server),
                backup.BackupResource.from_tag("Name", "BastionHost")
            ])

        # details with backup rules
        plan.add_rule(
            backup.BackupPlanRule(
                backup_vault=vault,
                rule_name="CDK_Backup_Rule",
                schedule_expression=Schedule.cron(minute="0",
                                                  hour="16",
                                                  day="1",
                                                  month="1-12"),
                delete_after=Duration.days(130),
                move_to_cold_storage_after=Duration.days(10)))

        # output information after deploy
        output = CfnOutput(self,
                           "BastionHost_information",
                           value=host_bastion.instance_public_ip,
                           description="BastionHost's Public IP")
        output = CfnOutput(self,
                           "WebHost_information",
                           value=work_server.instance_public_ip,
                           description="Web server's Public IP")
 def _setup_mysql(self) -> None:
     port = 3306
     database = "test"
     schema = "test"
     aurora_mysql = rds.DatabaseCluster(
         self,
         "aws-data-wrangler-aurora-cluster-mysql",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_mysql(
             version=rds.AuroraMysqlEngineVersion.VER_5_7_12,
         ),
         cluster_identifier="mysql-cluster-wrangler",
         instances=1,
         default_database_name=database,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         backup=rds.BackupProps(retention=Duration.days(1)),
         instance_props=rds.InstanceProps(
             vpc=self.vpc,
             security_groups=[self.db_security_group],
             publicly_accessible=True,
         ),
         subnet_group=self.rds_subnet_group,
         s3_import_buckets=[self.bucket],
         s3_export_buckets=[self.bucket],
     )
     glue.Connection(
         self,
         "aws-data-wrangler-mysql-glue-connection",
         description="Connect to Aurora (MySQL).",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-mysql",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:mysql://{aurora_mysql.cluster_endpoint.hostname}:{port}/{database}",
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     glue.Connection(
         self,
         "aws-data-wrangler-mysql-glue-connection-ssl",
         description="Connect to Aurora (MySQL) with SSL.",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-mysql-ssl",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:mysql://{aurora_mysql.cluster_endpoint.hostname}:{port}/{database}",
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
             "JDBC_ENFORCE_SSL": "true",
             "CUSTOM_JDBC_CERT": "s3://rds-downloads/rds-combined-ca-bundle.pem",
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-mysql-secret",
         secret_name="aws-data-wrangler/mysql",
         description="MySQL credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "mysql",
                     "host": aurora_mysql.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_mysql.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "MysqlAddress", value=aurora_mysql.cluster_endpoint.hostname)
     CfnOutput(self, "MysqlPort", value=str(port))
     CfnOutput(self, "MysqlDatabase", value=database)
     CfnOutput(self, "MysqlSchema", value=schema)
 def _setup_postgresql(self) -> None:
     port = 3306
     database = "postgres"
     schema = "public"
     pg = rds.ParameterGroup(
         self,
         "aws-data-wrangler-postgresql-params",
         engine=rds.DatabaseClusterEngine.aurora_postgres(
             version=rds.AuroraPostgresEngineVersion.VER_11_13,
         ),
         parameters={
             "apg_plan_mgmt.capture_plan_baselines": "off",
         },
     )
     aurora_pg = rds.DatabaseCluster(
         self,
         "aws-data-wrangler-aurora-cluster-postgresql",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_postgres(
             version=rds.AuroraPostgresEngineVersion.VER_11_13,
         ),
         cluster_identifier="postgresql-cluster-wrangler",
         instances=1,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         backup=rds.BackupProps(retention=Duration.days(1)),
         parameter_group=pg,
         s3_import_buckets=[self.bucket],
         s3_export_buckets=[self.bucket],
         instance_props=rds.InstanceProps(
             vpc=self.vpc,
             security_groups=[self.db_security_group],
             publicly_accessible=True,
         ),
         subnet_group=self.rds_subnet_group,
     )
     glue.Connection(
         self,
         "aws-data-wrangler-postgresql-glue-connection",
         description="Connect to Aurora (PostgreSQL).",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-postgresql",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:postgresql://{aurora_pg.cluster_endpoint.hostname}:{port}/{database}",
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-postgresql-secret",
         secret_name="aws-data-wrangler/postgresql",
         description="Postgresql credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "postgresql",
                     "host": aurora_pg.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_pg.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "PostgresqlAddress", value=aurora_pg.cluster_endpoint.hostname)
     CfnOutput(self, "PostgresqlPort", value=str(port))
     CfnOutput(self, "PostgresqlDatabase", value=database)
     CfnOutput(self, "PostgresqlSchema", value=schema)
Example #7
0
    def __init__(self, scope: Construct, construct_id: str,
                 **kwargs: str) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.vpc = ec2.Vpc(
            self,
            "aws-data-wrangler-vpc",
            cidr="11.19.224.0/19",
            enable_dns_hostnames=True,
            enable_dns_support=True,
        )
        Tags.of(self.vpc).add("Name", "aws-data-wrangler")
        self.key = kms.Key(
            self,
            id="aws-data-wrangler-key",
            description="Aws Data Wrangler Test Key.",
            policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    sid="Enable IAM User Permissions",
                    effect=iam.Effect.ALLOW,
                    actions=["kms:*"],
                    principals=[iam.AccountRootPrincipal()],
                    resources=["*"],
                )
            ]),
        )
        kms.Alias(
            self,
            "aws-data-wrangler-key-alias",
            alias_name="alias/aws-data-wrangler-key",
            target_key=self.key,
        )
        self.bucket = s3.Bucket(
            self,
            id="aws-data-wrangler",
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True,
            ),
            lifecycle_rules=[
                s3.LifecycleRule(
                    id="CleaningUp",
                    enabled=True,
                    expiration=Duration.days(1),
                    abort_incomplete_multipart_upload_after=Duration.days(1),
                ),
            ],
            versioned=True,
        )
        glue_db = glue.Database(
            self,
            id="aws_data_wrangler_glue_database",
            database_name="aws_data_wrangler",
            location_uri=f"s3://{self.bucket.bucket_name}",
        )
        log_group = logs.LogGroup(
            self,
            id="aws_data_wrangler_log_group",
            retention=logs.RetentionDays.ONE_MONTH,
        )
        log_stream = logs.LogStream(
            self,
            id="aws_data_wrangler_log_stream",
            log_group=log_group,
        )
        CfnOutput(self, "Region", value=self.region)
        CfnOutput(
            self,
            "VPC",
            value=self.vpc.vpc_id,
            export_name="aws-data-wrangler-base-VPC",
        )
        CfnOutput(
            self,
            "PublicSubnet1",
            value=self.vpc.public_subnets[0].subnet_id,
            export_name="aws-data-wrangler-base-PublicSubnet1",
        )
        CfnOutput(
            self,
            "PublicSubnet2",
            value=self.vpc.public_subnets[1].subnet_id,
            export_name="aws-data-wrangler-base-PublicSubnet2",
        )
        CfnOutput(
            self,
            "PrivateSubnet",
            value=self.vpc.private_subnets[0].subnet_id,
            export_name="aws-data-wrangler-base-PrivateSubnet",
        )
        CfnOutput(
            self,
            "KmsKeyArn",
            value=self.key.key_arn,
            export_name="aws-data-wrangler-base-KmsKeyArn",
        )
        CfnOutput(
            self,
            "BucketName",
            value=self.bucket.bucket_name,
            export_name="aws-data-wrangler-base-BucketName",
        )
        CfnOutput(self, "GlueDatabaseName", value=glue_db.database_name)
        CfnOutput(self, "LogGroupName", value=log_group.log_group_name)
        CfnOutput(self, "LogStream", value=log_stream.log_stream_name)
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id)

        lambda_dir = kwargs["lambda_dir"]

        # Note: A dead-letter queue is optional but it helps capture any failed messages
        dlq = sqs.Queue(self,
                        id="dead_letter_queue_id",
                        retention_period=Duration.days(7))
        dead_letter_queue = sqs.DeadLetterQueue(max_receive_count=1, queue=dlq)

        upload_queue = sqs.Queue(self,
                                 id="sample_queue_id",
                                 visibility_timeout=Duration.seconds(30),
                                 dead_letter_queue=dead_letter_queue)

        sqs_subscription = sns_subs.SqsSubscription(upload_queue,
                                                    raw_message_delivery=True)

        upload_event_topic = sns.Topic(self, id="sample_sns_topic_id")

        # This binds the SNS Topic to the SQS Queue
        upload_event_topic.add_subscription(sqs_subscription)

        # Note: Lifecycle Rules are optional but are included here to keep costs
        #       low by cleaning up old files or moving them to lower cost storage options
        s3_bucket = s3.Bucket(
            self,
            id="sample_bucket_id",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            versioned=True,
            lifecycle_rules=[
                s3.LifecycleRule(
                    enabled=True,
                    expiration=Duration.days(365),
                    transitions=[
                        s3.Transition(
                            storage_class=s3.StorageClass.INFREQUENT_ACCESS,
                            transition_after=Duration.days(30)),
                        s3.Transition(storage_class=s3.StorageClass.GLACIER,
                                      transition_after=Duration.days(90)),
                    ])
            ])

        # Note: If you don't specify a filter all uploads will trigger an event.
        #       Also, modifying the event type will handle other object operations
        # This binds the S3 bucket to the SNS Topic
        s3_bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED_PUT,
            s3n.SnsDestination(upload_event_topic),
            s3.NotificationKeyFilter(prefix="uploads", suffix=".csv"))

        function = _lambda.Function(
            self,
            "lambda_function",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="lambda_function.handler",
            code=_lambda.Code.from_asset(path=lambda_dir))

        # This binds the lambda to the SQS Queue
        invoke_event_source = lambda_events.SqsEventSource(upload_queue)
        function.add_event_source(invoke_event_source)

        # Examples of CloudFormation outputs
        CfnOutput(
            self,
            "UploadFileToS3Example",
            value="aws s3 cp <local-path-to-file> s3://{}/".format(
                s3_bucket.bucket_name),
            description=
            "Upload a file to S3 (using AWS CLI) to trigger the SQS chain",
        )
        CfnOutput(
            self,
            "UploadSqsQueueUrl",
            value=upload_queue.queue_url,
            description="Link to the SQS Queue triggered on S3 uploads",
        )
        CfnOutput(
            self,
            "LambdaFunctionName",
            value=function.function_name,
        )
        CfnOutput(
            self,
            "LambdaFunctionLogGroupName",
            value=function.log_group.log_group_name,
        )
Example #9
0
def create_static_site(scope: InfraStack, hosted_zone: route53.HostedZone):
    bucket = s3.Bucket(
        scope,
        id=scope.context.construct_id("s3"),
        bucket_name=f'{scope.context.construct_id("s3")}-bucket',
        versioned=True,
        block_public_access=s3.BlockPublicAccess(
            restrict_public_buckets=True,
            block_public_policy=True,
            block_public_acls=True,
        ),
        removal_policy=RemovalPolicy.RETAIN,
        auto_delete_objects=False,
        lifecycle_rules=[
            s3.LifecycleRule(noncurrent_version_expiration=Duration.days(7))
        ],
    )

    prod_domain_name = "gingerbeans.tech"
    lower_domain_name = f"{scope.context.env_name}.gingerbeans.tech"
    domain_name = (prod_domain_name if scope.context.env_name == PRODUCTION
                   else lower_domain_name)

    my_certificate = acm.DnsValidatedCertificate(
        scope,
        scope.context.construct_id("certificate"),
        domain_name=domain_name,
        hosted_zone=hosted_zone,
        region="us-east-1",
    )

    origin_access_identity = cloudfront.OriginAccessIdentity(
        scope, scope.context.construct_id("cfOriginAccessIdentity"))
    cf_s3_access = iam.PolicyStatement()
    cf_s3_access.add_actions("s3:GetBucket*")
    cf_s3_access.add_actions("s3:GetObject*")
    cf_s3_access.add_actions("s3:List*")
    cf_s3_access.add_resources(bucket.bucket_arn)
    cf_s3_access.add_resources(f"{bucket.bucket_arn}/*")
    cf_s3_access.add_canonical_user_principal(
        origin_access_identity.
        cloud_front_origin_access_identity_s3_canonical_user_id)
    bucket.add_to_resource_policy(cf_s3_access)

    distro = cloudfront.Distribution(
        scope,
        scope.context.construct_id("dist"),
        default_behavior=cloudfront.BehaviorOptions(origin=origins.S3Origin(
            bucket=bucket, origin_access_identity=origin_access_identity)),
        default_root_object="index.html",
        domain_names=[domain_name],
        certificate=my_certificate,
    )

    route53.AaaaRecord(
        scope,
        scope.context.construct_id("AAAA"),
        record_name=domain_name,
        target=route53.RecordTarget.from_alias(
            targets.CloudFrontTarget(distro)),
        zone=hosted_zone,
    )

    route53.ARecord(
        scope,
        scope.context.construct_id("A"),
        record_name=domain_name,
        target=route53.RecordTarget.from_alias(
            targets.CloudFrontTarget(distro)),
        zone=hosted_zone,
    )

    s3_deployment.BucketDeployment(
        scope,
        scope.context.construct_id("s3_deployment"),
        sources=[s3_deployment.Source.asset("../gb-ui/build")],
        destination_key_prefix="/",
        destination_bucket=bucket,
        distribution=distro,
        distribution_paths=["/*"],
    )