Example #1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        self.wwwBucket = s3.Bucket(
            self,
            "HtmlBucket",
            access_control=s3.BucketAccessControl.PUBLIC_READ,
            bucket_name="music-suite-www-dev",
            website_index_document="index.html",
            website_error_document="404.html")

        s3_deployment.BucketDeployment(
            self,
            "HtmlDeployment",
            destination_bucket=self.wwwBucket,
            sources=[s3_deployment.Source.asset("html")])

        self.publicReadStatement = iam.PolicyStatement()
        self.publicReadStatement.add_actions("s3:GetObject")
        self.publicReadStatement.add_resources("%s/*" %
                                               self.wwwBucket.bucket_arn)
        self.publicReadStatement.add_principals(iam.AnyPrincipal())

        self.wwwBucket.add_to_resource_policy(self.publicReadStatement)
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #s3 bucket
        static_bucket = aws_s3.Bucket(
            self,
            "StaticBucket",
            versioned=True,
            public_read_access=True,
            website_index_document="index.html",
            website_error_document="404.html",
            removal_policy=core.RemovalPolicy.DESTROY
        )

        #import html files
        add_assets = aws_s3_deployment.BucketDeployment(
            self,
            "AssetsDeploy",
            sources=[
                aws_s3_deployment.Source.asset(
                    "deployments/assets"
                )
            ],
            destination_bucket=static_bucket
        )
Example #3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create s3 bucket
        bucket = s3.Bucket(self,
                           "cdkdemobucket",
                           public_read_access=True,
                           bucket_name="cdkdemobucket")

        # upload local file to s3 bucket
        deploy = s3deploy.BucketDeployment(
            self,
            'DeployLocal',
            sources=[s3deploy.Source.asset('resources/s3')],
            destination_bucket=bucket)

        # create lambda function
        handler = lambda_.Function(
            self,
            "toppage",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.asset("resources/webapp/artifact"),
            handler="webapp.main",
            environment=dict(BUCKET=bucket.bucket_name))

        bucket.grant_read_write(handler)

        #api gateway
        api = apigateway.RestApi(self, "demo page", rest_api_name="demo page")
        get_top_page = apigateway.LambdaIntegration(
            handler,
            request_templates={"application/json": '{ "statusCode": "200" }'})

        api.root.add_method("GET", get_top_page)
    def __init__(self, scope: cdk.Construct, construct_id: str,
        user_pool=None,
        web_bucket=None,
        api_gateway=None,
        **kwargs) -> None:

        super().__init__(scope, construct_id, **kwargs)

        dirname = os.path.dirname(__file__)
        client = cognito.UserPoolClient(self, "UserPoolClient",
            user_pool=user_pool,
            auth_flows={
                "user_password": True
            },
            o_auth={
                "flows": {
                    "implicit_code_grant": True
                },
                "callback_urls": ["{}/web/token.html".format(api_gateway.url)]
            }
        )
        directory = Path("{}/target".format(dirname))
        directory.mkdir(parents=True, exist_ok=True)
        for child in directory.iterdir():
            os.remove(str(child))
        with open("{}/target/client_id.json".format(dirname), "w") as text_file:
            text_file.write(json.dumps({'clientId': client.user_pool_client_id}))
        src_dir = Path("{}/src".format(dirname))
        for child in src_dir.iterdir():
            shutil.copy(str(child), str(directory))

        s3_deployment.BucketDeployment(self, "WebDeployment",
            destination_bucket=web_bucket,
            sources=[s3_deployment.Source.asset(str(directory))]
        )
Example #5
0
    def S3_FRONTENDDEPLOY(self):
        bucket = s3.Bucket(
            self,
            "FrontendBucket",
            removal_policy=core.RemovalPolicy.DESTROY,
            versioned=True,
        )

        distribution = cloudfront.Distribution(
            self,
            "FrontendDistribution",
            # TODO: The domain and cert info should be env vars
            domain_names=["www.seanfischer.io"],
            certificate=certificates.Certificate.from_certificate_arn(
                self,
                "DomainCertificateEast1",
                "arn:aws:acm:us-east-1:261392311630:certificate/859dc9d1-7a5f-4474-bcad-bcba1607a5ed",
            ),
            default_root_object="index.html",
            default_behavior=cloudfront.BehaviorOptions(
                origin=origins.S3Origin(bucket, ),
                viewer_protocol_policy=cloudfront.ViewerProtocolPolicy.
                REDIRECT_TO_HTTPS,
            ),
        )

        s3deploy.BucketDeployment(
            self,
            "FrontendS3Deployment",
            sources=[s3deploy.Source.asset("./src")],
            destination_bucket=bucket,
            distribution=distribution,
        )

        return bucket
Example #6
0
    def __init__(self, app: core.App, cfn_name: str, stack_env):
        super().__init__(scope=app, id=f"{cfn_name}-{stack_env}")

        s3_bucket = s3.Bucket(scope=self,
                              id=f"{cfn_name}-{stack_env}-bucket",
                              website_index_document="index.html")

        # upload files in `./html` to the bucket defined above
        _ = s3_deploy.BucketDeployment(
            scope=self,
            id=f"{cfn_name}-{stack_env}-deployments",
            sources=[s3_deploy.Source.asset("./html")],
            destination_bucket=s3_bucket,
            destination_key_prefix=self.ORIGIN_PATH)

        # set S3 as origin
        s3_origin = origin.S3Origin(bucket=s3_bucket,
                                    origin_path=self.ORIGIN_PATH)
        # create CloudFront Distribution
        _ = cloud_front.Distribution(
            scope=self,
            id=f"{cfn_name}-{stack_env}",
            default_behavior=cloud_front.BehaviorOptions(
                # may warning here, for type mismatched.
                origin=s3_origin))
    def __init__(self, scope: core.Construct, construct_id: str, public_bucket_name, api) -> None:
        super().__init__(scope, construct_id)

        public_bucket = aws_s3.Bucket(self,
                                      'public_bucket',
                                      bucket_name=public_bucket_name,
                                      public_read_access=True,
                                      removal_policy=core.RemovalPolicy.DESTROY,
                                      website_index_document='index.html')

        # TODO paste deployment url here, getting it from the passed api entity doesn't work
        api_url = ''

        static_template_file_name = 'static_upload_site/index-template.html'

        with open(static_template_file_name, 'r') as f:
            src = Template(f.read())
            prepared_site_content = src.substitute({"api_url": api_url})

        with open('static_upload_site/index.html', 'w') as f:
            f.write(prepared_site_content)

        # static site hosted on s3 allowing uploads
        static_upload_site = aws_s3_deployment.BucketDeployment(
            self,
            "deployStaticWebsite",
            sources=[aws_s3_deployment.Source.asset("static_upload_site")],
            destination_bucket=public_bucket
        )
Example #8
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        bucket_name = 'devassoc-storage'
        bucket = s3.Bucket(self,
                           'bucket',
                           bucket_name=bucket_name,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           auto_delete_objects=True)
        s3deploy.BucketDeployment(
            self,
            'DeployFiles',
            destination_bucket=bucket,
            sources=[
                s3deploy.Source.asset('./study_guide_exercises/polly_file')
            ],
            storage_class=s3deploy.StorageClass.ONEZONE_IA)

        core.CfnOutput(self, 'new-bucket', value=bucket.bucket_name)

        encrypt_enforce_bucket_name = 'devassoc-encrypted-storage'
        encrypt_enforce_bucket = s3.Bucket(
            self,
            'encrypt-enforced-bucket',
            bucket_name=encrypt_enforce_bucket_name,
            removal_policy=core.RemovalPolicy.DESTROY,
            auto_delete_objects=True)
        deny_incorrect_statement = {
            "Sid": "DenyIncorrectEncryption",
            "Effect": "Deny",
            "Principal": "*",
            "Action": "s3:PutObject",
            "Resource": f"{encrypt_enforce_bucket.bucket_arn}/*",
            "Condition": {
                "StringNotEquals": {
                    "s3:x-amz-server-side-encryption": "AES256"
                }
            }
        }
        encrypt_enforce_bucket.add_to_resource_policy(
            iam.PolicyStatement.from_json(deny_incorrect_statement))
        deny_missing_statement = {
            "Sid": "DenyMissingEncryption",
            "Effect": "Deny",
            "Principal": "*",
            "Action": "s3:PutObject",
            "Resource": f"{encrypt_enforce_bucket.bucket_arn}/*",
            "Condition": {
                "Null": {
                    "s3:x-amz-server-side-encryption": True
                }
            }
        }
        encrypt_enforce_bucket.add_to_resource_policy(
            iam.PolicyStatement.from_json(deny_missing_statement))

        core.CfnOutput(self,
                       'new-encrypt-enforce-bucket',
                       value=encrypt_enforce_bucket.bucket_name)
Example #9
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # Upload application code to S3 bucket
        artifact_bucket = s3.Bucket(
            self,
            id,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            encryption=s3.BucketEncryption.KMS_MANAGED,
            removal_policy=core.RemovalPolicy.DESTROY,
            auto_delete_objects=True,
            server_access_logs_prefix="bucketAccessLog")
        code_path = path.dirname(path.abspath(__file__))
        s3deploy.BucketDeployment(
            self,
            "DeployCode",
            sources=[
                s3deploy.Source.asset(code_path + "/../../deployment/app_code")
            ],
            destination_bucket=artifact_bucket,
            destination_key_prefix="app_code")
        self._code_bucket = artifact_bucket.bucket_name
Example #10
0
    def _deploy_glue_job_code(self, context: DataJobContext,
                              glue_job_name: str,
                              path_to_glue_job: str) -> str:
        """deploy the code of this glue job to the deployment bucket
        (can be found in the glue context object)"""
        glue_job_dir, glue_job_file_name = GlueJob._get_glue_job_dir_and_file_name(
            path_to_glue_job=path_to_glue_job)
        logger.debug(f"deploying glue job folder {glue_job_dir}")
        aws_s3_deployment.BucketDeployment(
            self,
            f"{glue_job_name}-CodeDeploy",
            sources=[
                # we can either sync dirs or zip files.
                # To keep it easy for now we agreed to sync the full dir.
                # todo - sync only the glue job itself.
                aws_s3_deployment.Source.asset(glue_job_dir)
            ],
            destination_bucket=context.deployment_bucket,
            destination_key_prefix=glue_job_name,
        )

        return GlueJob._create_s3_url_for_job(
            context=context,
            glue_job_id=glue_job_name,
            glue_job_file_name=glue_job_file_name,
        )
Example #11
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        
        bucket = _s3.Bucket(
            self, '_s3-site-bucket',
            website_index_document='index.html',
            public_read_access=True,
            removal_policy=core.RemovalPolicy.DESTROY
        )
        
        deployment = _deployment.BucketDeployment(
            self, '_s3-asset',
            destination_bucket=bucket,
            sources=[_deployment.Source.asset('./static/')]
        )
        
        cloudfrontorigin = _cloudfront_origins.S3Origin(
            bucket,
        )
        
        cloudfront = _cloudfront.Distribution(
            self, 'cloudfront_dist',
            default_behavior=cloudfrontorigin
        )
        
        core.CfnOutput(self, 'Cloudfront-DomainName', value=cloudfront.distribution_domain_name)
Example #12
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = sqs.Queue(self,
                          "sqsqueue",
                          visibility_timeout=core.Duration.seconds(300))

        topic = sns.Topic(self, "snstopic")

        topic.add_subscription(subs.SqsSubscription(queue))

        bucket = s3.Bucket(
            self,
            "s3Bucket",
            encryption=s3.BucketEncryption.KMS_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        bucket.add_event_notification(s3.EventType.OBJECT_CREATED_PUT,
                                      s3n.SnsDestination(topic))

        s3deploy.BucketDeployment(
            self,
            "DeployFile",
            sources=[s3deploy.Source.asset("./assets")],
            destination_bucket=bucket,
            retain_on_delete=False,
        )
Example #13
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        """Deploy the frontend to an S3 bucket
           
           Sets up:
           - An S3 with public access, static website hosting, and a bucket policy 
             that allows anonymous GetObject calls
        """
        super().__init__(app, id, **kwargs)

        bucket = _s3.Bucket(self,
                            "guestbook",
                            public_read_access=True,
                            website_index_document="index.html")

        bucket_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=["s3:GetObject"],
            principals=[_iam.AnyPrincipal()],
            resources=[bucket.bucket_arn + "/*"])

        bucket.add_to_resource_policy(bucket_policy)

        _s3_deploy.BucketDeployment(self,
                                    "DeployWebsite",
                                    sources=[_s3_deploy.Source.asset("./src")],
                                    destination_bucket=bucket)

        core.CfnOutput(self,
                       'BucketDomainName',
                       value=bucket.bucket_website_domain_name)
Example #14
0
    def __init__(self,scope: core.Construct, id: str, **kwargs,) -> None:
        super().__init__(scope, id, **kwargs)

       # Upload application code to S3 bucket 
        self._artifact_bucket=s3.Bucket(self, id, 
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            encryption=s3.BucketEncryption.KMS_MANAGED,
            removal_policy=core.RemovalPolicy.RETAIN,
            access_control = s3.BucketAccessControl.LOG_DELIVERY_WRITE,
            versioned=True #required by codebuild
        )

        proj_dir=os.path.split(os.environ['VIRTUAL_ENV'])[0]
        self.deploy=s3deploy.BucketDeployment(self, "DeployCode",
            sources=[s3deploy.Source.asset(proj_dir+'/deployment/app_code')],
            destination_bucket= self.artifact_bucket,
            destination_key_prefix="app_code"
        )
        self.bucket_name = self.artifact_bucket.bucket_name

        # # Override Cfn_Nag rule for S3 access logging
        self.artifact_bucket.node.default_child.add_metadata('cfn_nag',{
            "rules_to_suppress": [
                {
                    "id": "W35",
                    "reason": "bucket access log stops bucket removal, disable for now"
                },
                {
                    "id": "W51",
                    "reason": "bucket access is controled by IAM level"
                }
            ]
        })
Example #15
0
 def __init__(self, scope: cdk.Construct, construct_id: str,
              rest_api: apigw.Resource, **kwargs) -> None:
     super().__init__(scope, construct_id, **kwargs)
     asset_bucket = s3.Bucket(self,
                              'AssetBucket',
                              website_index_document='index.html',
                              public_read_access=True,
                              removal_policy=cdk.RemovalPolicy.DESTROY)
     s3_deployment.BucketDeployment(
         self,
         'BucketDeployment',
         sources=[s3_deployment.Source.asset('ui-static')],
         destination_bucket=asset_bucket,
         retain_on_delete=False)
     cdk.CfnOutput(self,
                   'BucketDomain',
                   value=asset_bucket.bucket_website_domain_name)
     s3_integration_role = iam.Role(self,
                                    'AwsIntegrationRole',
                                    assumed_by=iam.ServicePrincipal(
                                        service='apigateway.amazonaws.com'))
     asset_bucket.grant_read(s3_integration_role)
     self.ui_resource = self._add_get_integration(rest_api, asset_bucket,
                                                  s3_integration_role)
     self._add_item_integration(self.ui_resource, asset_bucket,
                                s3_integration_role)
Example #16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # S3 bucket for website content
        websiteBucket = aws_s3.Bucket(
            self,
            "websiteBucket",
            public_read_access=True,
            website_index_document="index.html",
            access_control=aws_s3.BucketAccessControl.PUBLIC_READ)

        # S3 bucket for uploaded image and video content. This is setup to be used to upload content which is to be viewed by the
        # public. This can't be the same bucket as the website if you're using aws_s3_deployment as  aws_s3_deployment clears the
        # bucket out before deploying
        imagesBucket = aws_s3.Bucket(
            self,
            "imagesBucket",
            public_read_access=True,
            access_control=aws_s3.BucketAccessControl.PUBLIC_READ)
        # Used to enable uploading to the S3 bucket directly from client side code. Pretty unnecessary otherwise, unless you want to get
        # bucket listings from client side code
        imagesBucket.add_cors_rule(allowed_methods=[
            aws_s3.HttpMethods.GET, aws_s3.HttpMethods.PUT,
            aws_s3.HttpMethods.HEAD, aws_s3.HttpMethods.POST,
            aws_s3.HttpMethods.DELETE
        ],
                                   allowed_origins=["*"],
                                   allowed_headers=["*"],
                                   exposed_headers=["ETag"])

        # CloudFront distribution. Creates a combined front for the two buckets. The second bucket must have a folder called /media
        # (name can be changed) containing all the stuff you want distributed. The content will appear under /media in the
        # distribution. The one pitfall with this is that subfolders of the website bucket (e.g. /admin) don't redirect to their
        # index.html properly. So, if /admin contains a index.html, navigating to /admin would get a 404 but /admin/index.html works
        # fine
        distribution = aws_cloudfront.CloudFrontWebDistribution(
            self,
            "S3BucketDistribution",
            origin_configs=[
                aws_cloudfront.SourceConfiguration(
                    s3_origin_source=aws_cloudfront.S3OriginConfig(
                        s3_bucket_source=websiteBucket),
                    behaviors=[
                        aws_cloudfront.Behavior(is_default_behavior=True)
                    ]),
                aws_cloudfront.SourceConfiguration(
                    s3_origin_source=aws_cloudfront.S3OriginConfig(
                        s3_bucket_source=imagesBucket),
                    behaviors=[
                        aws_cloudfront.Behavior(path_pattern="/media/*")
                    ])
            ])

        # Code to automatically deploy the frontend code to the website bucket
        deployment = aws_s3_deployment.BucketDeployment(
            self,
            "deployStaticWebsite",
            sources=[aws_s3_deployment.Source.asset("PATH_TO_FRONTEND_CODE")],
            destination_bucket=websiteBucket,
            distribution=distribution)
Example #17
0
 def create_bucket_deployment(self):
     assets_directory = self.gather_assets()
     return s3deploy.BucketDeployment(
         self,
         self.config.get("stack_name") + "_deploy",
         sources=[s3deploy.Source.asset(assets_directory)],
         destination_bucket=self.bucket_assets,
     )
Example #18
0
    def __init__(self, scope: core.Construct, _id: str, bucket_para,
                 **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        self.ddb_file_list = ddb.Table(
            self,
            "s3_migrate_ddb",
            partition_key=ddb.Attribute(name="Key",
                                        type=ddb.AttributeType.STRING),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        self.sqs_queue_DLQ = sqs.Queue(
            self,
            "s3_migrate_sqs_DLQ",
            visibility_timeout=core.Duration.hours(1),
            retention_period=core.Duration.days(14))
        self.sqs_queue = sqs.Queue(self,
                                   "s3_migrate_sqs_queue",
                                   visibility_timeout=core.Duration.hours(1),
                                   retention_period=core.Duration.days(14),
                                   dead_letter_queue=sqs.DeadLetterQueue(
                                       max_receive_count=24,
                                       queue=self.sqs_queue_DLQ))
        self.ssm_bucket_para = ssm.StringParameter(self,
                                                   "s3_migrate_bucket_para",
                                                   string_value=json.dumps(
                                                       bucket_para, indent=4))

        # You need to manually setup ssm_credential_para in SSM Parameter Store before deploy CDK
        # Here import ssm_credential_para, MIND THE VERSION NUMBER MUST BE EXACT THE SAME !!!
        # 你需要先手工配置了一个ssm_credential_para,然后在这里导入,注意版本号一致!!!
        self.ssm_credential_para = ssm.StringParameter.from_secure_string_parameter_attributes(
            self,
            "ssm_parameter_credentials",
            parameter_name=ssm_parameter_credentials,
            version=1)

        # New a S3 bucket, new object in this bucket will trigger SQS jobs
        # This is not for existing S3 bucket. Jobsender will scan the existing bucket and create sqs jobs.
        # 这里新建一个S3 bucket,里面新建Object就会触发SQS启动搬迁工作。
        # 对于现有的S3 bucket,不在这里配置,由jobsender进行扫描并生成SQS Job任务。
        self.s3bucket = s3.Bucket(self, "s3_migrate_bucket")
        self.s3bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED, s3n.SqsDestination(self.sqs_queue))

        # Deploy code
        self.s3_deploy = s3.Bucket(self, "s3_migrate_deploybucket")
        s3d.BucketDeployment(self,
                             "deploy_code",
                             sources=[s3d.Source.asset("./code")],
                             destination_bucket=self.s3_deploy)

        core.CfnOutput(self,
                       'NewS3Bucket_MigrateObjects',
                       value=self.s3bucket.bucket_name)
        core.CfnOutput(self,
                       'NewS3Bucket_deploy_code',
                       value=self.s3_deploy.bucket_name)
Example #19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        account_id = core.Aws.ACCOUNT_ID

        #To Store Frontend app

        FlaskFrontendBucket = s3.Bucket(self, 'FlaskFrontendWebsite',
            #access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            bucket_name=account_id+'-'+env_name+'-frontend',
            access_control=s3.BucketAccessControl.PUBLIC_READ,
            # block_public_access=s3.BlockPublicAccess(
            #     block_public_acls=True,
            #     block_public_policy=True,
            #     ignore_public_acls=True,
            #     restrict_public_buckets=True,
            # ),
            removal_policy=core.RemovalPolicy.DESTROY
        )
        
        policy_statement = iam.PolicyStatement(
            actions=["s3:GetObject"],
            resources=[f"{FlaskFrontendBucket.bucket_arn}/*"],
        )

        policy_statement.add_any_principal()

        static_site_policy_document = iam.PolicyDocument(
            statements=[policy_statement]
        )

        FlaskFrontendBucket.add_to_resource_policy(policy_statement)

        
        # The Origin Access Identity is a way to allow CloudFront
        # Access to the Website Bucket
        origin_access_identity = cfn.OriginAccessIdentity(
            self, "OriginAccessIdentity",
            comment="Allows Read-Access from CloudFront"
        )

        # We tell the website bucket to allow access from CloudFront
        FlaskFrontendBucket.grant_read(origin_access_identity)

        s3_deploy.BucketDeployment(self, "DeployFlaskFrontendWebsite",
            sources=[s3_deploy.Source.asset("./static-content")],
            destination_bucket=FlaskFrontendBucket,
           # destination_key_prefix=""
        )

        core.CfnOutput(self, 'S3FlaskFrontendExport',
            value = FlaskFrontendBucket.bucket_name,
            export_name='FlaskFrontendBucket'
        )
Example #20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create an S3 Bucket):
        static_site_assets_bkt = _s3.Bucket(
            self,
            "assetsBucket",
            versioned=True,
            # public_read_access=True,
            # website_index_document="index.html",
            # website_error_document="404.html",
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add assets to static site bucket
        add_assets_to_site = _s3_deployment.BucketDeployment(
            self,
            "deployStaticSiteAssets",
            sources=[
                _s3_deployment.Source.asset("advanced_use_cases/static_assets")
            ],
            destination_bucket=static_site_assets_bkt)

        # Create OAI for Cloudfront
        static_site_oai = _cloudfront.OriginAccessIdentity(
            self,
            "staticSiteOai",
            comment=f"OAI for static site from stack:{core.Aws.STACK_NAME}")

        # Deploy Cloudfront Configuration: Connecting OAI with static asset bucket
        cf_source_configuration = _cloudfront.SourceConfiguration(
            s3_origin_source=_cloudfront.S3OriginConfig(
                s3_bucket_source=static_site_assets_bkt,
                origin_access_identity=static_site_oai),
            behaviors=[
                _cloudfront.Behavior(
                    is_default_behavior=True,
                    compress=True,
                    allowed_methods=_cloudfront.CloudFrontAllowedMethods.ALL,
                    cached_methods=_cloudfront.CloudFrontAllowedCachedMethods.
                    GET_HEAD)
            ])

        # Create Cloudfront Distribution
        static_site_distribution = _cloudfront.CloudFrontWebDistribution(
            self,
            "staticSiteCfDistribution",
            comment="CDN for static website",
            origin_configs=[cf_source_configuration],
            price_class=_cloudfront.PriceClass.PRICE_CLASS_100)

        # Output Cloudfront Url
        output_1 = core.CfnOutput(
            self,
            "CloudfrontUrl",
            value=f"{static_site_distribution.domain_name}",
            description="The domain name of the static site")
Example #21
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        bucket = s3.Bucket(self, "Bucket", website_index_document="index.html")

        config = {"comment": "mythical-mysfits"}

        origin = cloudfront.CfnCloudFrontOriginAccessIdentity(
            self,
            "BucketOrigin",
            cloud_front_origin_access_identity_config=config)

        identity = iam.CanonicalUserPrincipal(
            canonical_user_id=origin.attr_s3_canonical_user_id)

        bucket.grant_read(identity)

        cloudfront_behaviour = cloudfront.Behavior(
            max_ttl=core.Duration.seconds(60),
            allowed_methods=cloudfront.CloudFrontAllowedMethods.
            GET_HEAD_OPTIONS,
            is_default_behavior=True)
        cloudfront_distribution = cloudfront.CloudFrontWebDistribution(
            self,
            "CloudFront",
            viewer_protocol_policy=cloudfront.ViewerProtocolPolicy.ALLOW_ALL,
            price_class=cloudfront.PriceClass.PRICE_CLASS_ALL,
            origin_configs=[
                cloudfront.SourceConfiguration(
                    behaviors=[cloudfront_behaviour],
                    origin_path="/web",
                    s3_origin_source=cloudfront.S3OriginConfig(
                        s3_bucket_source=bucket,
                        origin_access_identity_id=origin.ref),
                )
            ],
        )

        contentDir = os.path.realpath("../web/")
        source = deployment.Source.asset(contentDir)
        deployment.BucketDeployment(
            self,
            "DeployWebsite",
            sources=[source],
            destination_key_prefix="web/",
            destination_bucket=bucket,
            distribution=cloudfront_distribution,
            retain_on_delete=False,
        )

        core.CfnOutput(
            self,
            "CloudFrontURL",
            description="The CloudFront distribution URL",
            value="http://{}".format(cloudfront_distribution.domain_name),
        )
Example #22
0
    def resolve(self, scope: core.Construct) -> Dict[str, Any]:
        # If the same deployment is used multiple times, retain only the first instantiation
        if self._bucket_deployment is None:
            # Convert BucketDeploymentProps to dict
            deployment_props = vars(self._deployment_props)['_values']
            self._bucket_deployment = s3_deployment.BucketDeployment(
                scope, f'{self._id}_BucketDeployment'
                if self._id else 'BucketDeployment', **deployment_props)

        return {'S3Path': self.s3_path}
Example #23
0
    def __init__(self, scope: core.Construct, id: str, ssh_key_name: str,
                 **kw) -> None:
        super().__init__(scope, id, **kw)

        # start the web server (EC2 instance) in the default VPC
        vpc = ec2.Vpc.from_lookup(self, 'default_vpc', is_default=True)

        # create a security group for the web server that allows HTTP and SSH traffic
        sg = ec2.SecurityGroup(self,
                               'we_server_security_group',
                               vpc=vpc,
                               description='Allow HTTP ans SSH access',
                               allow_all_outbound=True)
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), 'SSH')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80), 'HTTP')
        sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443), 'HTTPS')

        # create a bucket with all the web app components so they can be laoded into the
        # EC2 server later
        app_contents_bucket = s3.Bucket(
            self,
            'cdk_lamp_server',
            public_read_access=True,
            removal_policy=core.RemovalPolicy.DESTROY)
        s3_deploy.BucketDeployment(
            self,
            'deploy_webapp',
            sources=[s3_deploy.Source.asset('./web_app_contents')],
            destination_bucket=app_contents_bucket)

        # create an EC2 instance to work as a web server
        init_script = EC2_INIT_SCRIPT % dict(
            SOURCE_BUCKET_NAME=app_contents_bucket.bucket_name)
        web_server = ec2.Instance(
            self,
            'web_server',
            instance_name='cdk_web_app',
            instance_type=ec2.InstanceType('t2.micro'),
            machine_image=ec2.AmazonLinuxImage(
                generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=sg,
            user_data=ec2.UserData.custom(init_script),
            key_name=ssh_key_name)

        # grant access to the EC2 instance
        app_contents_bucket.grant_read(web_server)

        # show the web_server public address as an output
        core.CfnOutput(self,
                       'web-server-address',
                       description='Web server public address',
                       value=web_server.instance_public_dns_name)
    def prepare_s3_assets(self):
        asset_bucket = s3.Bucket(self, "AssetBucket")

        s3deploy.BucketDeployment(
            self,
            "DeployAsset",
            sources=[s3deploy.Source.asset("./locust")],
            destination_bucket=asset_bucket,
        )

        return asset_bucket
Example #25
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #s3 bucket
        static_bucket = aws_s3.Bucket(
            self,
            "StaticBucket",
            versioned=True,
            removal_policy=core.RemovalPolicy.DESTROY)

        #import html files
        add_assets = aws_s3_deployment.BucketDeployment(
            self,
            "AssetsDeploy",
            sources=[aws_s3_deployment.Source.asset("deployments/assets")],
            destination_bucket=static_bucket)

        #add cloudfront origin access identity
        cloudfront_assets = aws_cloudfront.OriginAccessIdentity(
            self,
            "CloudfrontAssets",
            comment=f"CloudFront Assets for:{core.Aws.STACK_NAME}")

        #cloudfront configuration
        cloudfront_config = aws_cloudfront.SourceConfiguration(
            s3_origin_source=aws_cloudfront.S3OriginConfig(
                s3_bucket_source=static_bucket,
                origin_access_identity=cloudfront_assets),
            behaviors=[
                aws_cloudfront.Behavior(
                    is_default_behavior=True,
                    compress=True,
                    allowed_methods=aws_cloudfront.CloudFrontAllowedMethods.
                    ALL,
                    cached_methods=aws_cloudfront.
                    CloudFrontAllowedCachedMethods.GET_HEAD)
            ])

        #cloudfront distribution
        cloudfront_distribution = aws_cloudfront.CloudFrontWebDistribution(
            self,
            "CloudfrontDistribution",
            comment="CDN for static web",
            origin_configs=[cloudfront_config],
            price_class=aws_cloudfront.PriceClass.PRICE_CLASS_100)

        #cloudfront url
        cloudfront_output = core.CfnOutput(
            self,
            "cloudfrontURL",
            value=f"{cloudfront_distribution.domain_name}",
            description="Static web page url")
Example #26
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        web_app_root = os.path.abspath('./web')

        bucket = _s3.Bucket(self,
                            'Bucket',
                            website_index_document='index.html')

        origin = cloudfront.OriginAccessIdentity(self,
                                                 'BucketOrigin',
                                                 comment='mythical-mysfits')

        bucket.grant_read(
            _iam.CanonicalUserPrincipal(
                origin.cloud_front_origin_access_identity_s3_canonical_user_id)
        )

        cdn = cloudfront.CloudFrontWebDistribution(
            self,
            'CloudFront',
            viewer_protocol_policy=cloudfront.ViewerProtocolPolicy.ALLOW_ALL,
            price_class=cloudfront.PriceClass.PRICE_CLASS_ALL,
            origin_configs=[
                cloudfront.SourceConfiguration(
                    behaviors=[
                        cloudfront.Behavior(
                            is_default_behavior=True,
                            max_ttl=cdk.Duration.seconds(31536000),
                            allowed_methods=cloudfront.
                            CloudFrontAllowedMethods.GET_HEAD_OPTIONS)
                    ],
                    origin_path='/web',
                    s3_origin_source=cloudfront.S3OriginConfig(
                        s3_bucket_source=bucket,
                        origin_access_identity=origin))
            ])

        s3deploy.BucketDeployment(
            self,
            'DeployWebsite',
            sources=[s3deploy.Source.asset(web_app_root)],
            destination_key_prefix='web/',
            destination_bucket=bucket,
            distribution=cdn,
            retain_on_delete=False)

        cdk.CfnOutput(self,
                      'CloudFrontURL',
                      description='The CloudFront distribution URL',
                      value='https://' + cdn.domain_name)
Example #27
0
def add_static_site(stack: core.Stack):
    stack.static_site_bucket = Bucket(stack,
                                      'StaticSiteBucket',
                                      website_index_document="index.html",
                                      website_error_document="error.html",
                                      public_read_access=True,
                                      removal_policy=RemovalPolicy.RETAIN)

    stack.static_bucket_deploy = s3_deployment.BucketDeployment(
        stack,
        "StaticSiteDeploy",
        sources=[s3_deployment.Source.asset("./www/static-site-content")],
        destination_bucket=stack.static_site_bucket)
Example #28
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        static_bucket = aws_s3.Bucket(
            self,
            'MiscPublicFilesBucket',
            removal_policy=core.RemovalPolicy.RETAIN,
        )

        origin = aws_cloudfront.OriginAccessIdentity(
            self,
            'MiscPublicFilesOrigin',
            comment='CDN origin for miscellaneous public files',
        )

        cdn = aws_cloudfront.CloudFrontWebDistribution(
            self,
            'MiscPublicFilesCDN',
            comment='CDN for miscellaneous public files',
            origin_configs=[
                aws_cloudfront.SourceConfiguration(
                    s3_origin_source=aws_cloudfront.S3OriginConfig(
                        s3_bucket_source=static_bucket,
                        origin_access_identity=origin,
                    ),
                    behaviors=[
                        aws_cloudfront.Behavior(
                            is_default_behavior=True,
                            min_ttl=core.Duration.days(90),
                            max_ttl=core.Duration.days(360),
                            default_ttl=core.Duration.days(180),
                            compress=True,
                        )
                    ],
                )
            ],
            default_root_object='index.html',
            enable_ip_v6=True,
            http_version=aws_cloudfront.HttpVersion.HTTP2,
            price_class=aws_cloudfront.PriceClass.PRICE_CLASS_100,
            viewer_protocol_policy=aws_cloudfront.ViewerProtocolPolicy.
            REDIRECT_TO_HTTPS,  # NOQA
        )

        aws_s3_deployment.BucketDeployment(
            self,
            'MiscPublicFilesDeployment',
            sources=[aws_s3_deployment.Source.asset('public')],
            destination_bucket=static_bucket,
            distribution=cdn,
        )
Example #29
0
 def __init__(self, scope: cdk.Construct, construct_id: str,
              **kwargs) -> None:
     super().__init__(scope, construct_id, **kwargs)
     account = kwargs['env'].account
     region = kwargs['env'].region
     dirname = os.path.dirname(__file__)
     bucket = s3.Bucket(self,
                        "my-test-bucket-{}-{}".format(account, region))
     s3_deploy.BucketDeployment(
         self,
         "DeployTestFile",
         sources=[s3_deploy.Source.asset("{}/content".format(dirname))],
         destination_bucket=bucket)
     self.bucket_name = bucket.bucket_name
    def create_website_bucket(self, deployment_path: typing.Optional[str]):
        r"""Creates the S3 buckets required to host the SPA website

    Args:
      deployment_path: The path to the contents of your deployable assets
    """
        self.website_assets_bucket = s3.Bucket(self, self.__website_identifier)
        if deployment_path is not None:
            s3_deployment.BucketDeployment(
                self,
                id=f"{self.__website_identifier}-deploy",
                sources=[
                    s3_deployment.Source.asset(deployment_path)  # pylint: disable=no-value-for-parameter
                ],
                destination_bucket=self.website_assets_bucket)