def __init__(self, scope: core.Construct, id: str, s3bucket, **kwargs) -> None: super().__init__(scope, id, **kwargs) bucketName = s3.Bucket.from_bucket_name(self, "s3bucket", s3bucket) webAclId = self.node.try_get_context("web_acl_id") cdnId = cdn.CloudFrontWebDistribution( self, "webhosting-cdn", origin_configs=[ cdn.SourceConfiguration( behaviors=[ cdn.Behavior(is_default_behavior=True) #cdn.Behavior(is_default_behavior=False,path_pattern="/img/") ], origin_path="/build", s3_origin_source=cdn.S3OriginConfig( s3_bucket_source=bucketName, origin_access_identity=cdn.OriginAccessIdentity( self, 'webhosting-origin'), ), ) ], web_acl_id=webAclId) core.CfnOutput(self, 'cdnid', value=cdnId.distribution_id, export_name='distribution-id')
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here bucket = s3.Bucket( self, 'cloudfronttestbucket', bucket_name="testcloudfrontbuckettoday", public_read_access=False, removal_policy=core.RemovalPolicy.DESTROY, ) s3identity = cloudfront.OriginAccessIdentity( self, 'accessidentity', comment="access-identity-") s3Origin = origins.S3Origin(bucket=bucket, origin_access_identity=s3identity) current_behavior = cloudfront.BehaviorOptions( origin=s3Origin, viewer_protocol_policy=cloudfront.ViewerProtocolPolicy. REDIRECT_TO_HTTPS) skull_distribution = cloudfront.Distribution( self, 'skulldistibution', default_behavior=current_behavior) bucket.grant_read(identity=s3identity.grant_principal)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") account_id = core.Aws.ACCOUNT_ID #To Store Frontend app FlaskFrontendBucket = s3.Bucket(self, 'FlaskFrontendWebsite', #access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL, encryption=s3.BucketEncryption.S3_MANAGED, bucket_name=account_id+'-'+env_name+'-frontend', access_control=s3.BucketAccessControl.PUBLIC_READ, # block_public_access=s3.BlockPublicAccess( # block_public_acls=True, # block_public_policy=True, # ignore_public_acls=True, # restrict_public_buckets=True, # ), removal_policy=core.RemovalPolicy.DESTROY ) policy_statement = iam.PolicyStatement( actions=["s3:GetObject"], resources=[f"{FlaskFrontendBucket.bucket_arn}/*"], ) policy_statement.add_any_principal() static_site_policy_document = iam.PolicyDocument( statements=[policy_statement] ) FlaskFrontendBucket.add_to_resource_policy(policy_statement) # The Origin Access Identity is a way to allow CloudFront # Access to the Website Bucket origin_access_identity = cfn.OriginAccessIdentity( self, "OriginAccessIdentity", comment="Allows Read-Access from CloudFront" ) # We tell the website bucket to allow access from CloudFront FlaskFrontendBucket.grant_read(origin_access_identity) s3_deploy.BucketDeployment(self, "DeployFlaskFrontendWebsite", sources=[s3_deploy.Source.asset("./static-content")], destination_bucket=FlaskFrontendBucket, # destination_key_prefix="" ) core.CfnOutput(self, 'S3FlaskFrontendExport', value = FlaskFrontendBucket.bucket_name, export_name='FlaskFrontendBucket' )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") media_distribution_oai = cloudfront.OriginAccessIdentity( self, 'media-distribution-oai') media_distribution_oai.apply_removal_policy(core.RemovalPolicy.DESTROY) frontend_bucket = s3.Bucket( self, 'frontend-bucket', access_control=s3.BucketAccessControl.BUCKET_OWNER_FULL_CONTROL, encryption=s3.BucketEncryption.S3_MANAGED, bucket_name=prj_name + env_name + '-bucket', website_index_document='index.html', website_error_document='index.html', block_public_access=s3.BlockPublicAccess( block_public_acls=True, block_public_policy=True, ignore_public_acls=True, restrict_public_buckets=True), removal_policy=core.RemovalPolicy.DESTROY) media_assets = s3_deployment.BucketDeployment( self, 'media-assets', sources=[s3_deployment.Source.asset('./assets')], destination_bucket=frontend_bucket) media_distribution = cloudfront.CloudFrontWebDistribution( self, 'media-distribution', origin_configs=[ cloudfront.SourceConfiguration( behaviors=[cloudfront.Behavior(is_default_behavior=True)], s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=frontend_bucket, origin_access_identity=cloudfront.OriginAccessIdentity( self, 'frontend-origin'))) ], #Edege server location https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_cloudfront/PriceClass.html#aws_cdk.aws_cloudfront.PriceClass price_class=cloudfront.PriceClass.PRICE_CLASS_ALL) media_distribution.apply_removal_policy(core.RemovalPolicy.DESTROY)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create an S3 Bucket): static_site_assets_bkt = _s3.Bucket( self, "assetsBucket", versioned=True, # public_read_access=True, # website_index_document="index.html", # website_error_document="404.html", removal_policy=core.RemovalPolicy.DESTROY) # Add assets to static site bucket add_assets_to_site = _s3_deployment.BucketDeployment( self, "deployStaticSiteAssets", sources=[ _s3_deployment.Source.asset("advanced_use_cases/static_assets") ], destination_bucket=static_site_assets_bkt) # Create OAI for Cloudfront static_site_oai = _cloudfront.OriginAccessIdentity( self, "staticSiteOai", comment=f"OAI for static site from stack:{core.Aws.STACK_NAME}") # Deploy Cloudfront Configuration: Connecting OAI with static asset bucket cf_source_configuration = _cloudfront.SourceConfiguration( s3_origin_source=_cloudfront.S3OriginConfig( s3_bucket_source=static_site_assets_bkt, origin_access_identity=static_site_oai), behaviors=[ _cloudfront.Behavior( is_default_behavior=True, compress=True, allowed_methods=_cloudfront.CloudFrontAllowedMethods.ALL, cached_methods=_cloudfront.CloudFrontAllowedCachedMethods. GET_HEAD) ]) # Create Cloudfront Distribution static_site_distribution = _cloudfront.CloudFrontWebDistribution( self, "staticSiteCfDistribution", comment="CDN for static website", origin_configs=[cf_source_configuration], price_class=_cloudfront.PriceClass.PRICE_CLASS_100) # Output Cloudfront Url output_1 = core.CfnOutput( self, "CloudfrontUrl", value=f"{static_site_distribution.domain_name}", description="The domain name of the static site")
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #s3 bucket static_bucket = aws_s3.Bucket( self, "StaticBucket", versioned=True, removal_policy=core.RemovalPolicy.DESTROY) #import html files add_assets = aws_s3_deployment.BucketDeployment( self, "AssetsDeploy", sources=[aws_s3_deployment.Source.asset("deployments/assets")], destination_bucket=static_bucket) #add cloudfront origin access identity cloudfront_assets = aws_cloudfront.OriginAccessIdentity( self, "CloudfrontAssets", comment=f"CloudFront Assets for:{core.Aws.STACK_NAME}") #cloudfront configuration cloudfront_config = aws_cloudfront.SourceConfiguration( s3_origin_source=aws_cloudfront.S3OriginConfig( s3_bucket_source=static_bucket, origin_access_identity=cloudfront_assets), behaviors=[ aws_cloudfront.Behavior( is_default_behavior=True, compress=True, allowed_methods=aws_cloudfront.CloudFrontAllowedMethods. ALL, cached_methods=aws_cloudfront. CloudFrontAllowedCachedMethods.GET_HEAD) ]) #cloudfront distribution cloudfront_distribution = aws_cloudfront.CloudFrontWebDistribution( self, "CloudfrontDistribution", comment="CDN for static web", origin_configs=[cloudfront_config], price_class=aws_cloudfront.PriceClass.PRICE_CLASS_100) #cloudfront url cloudfront_output = core.CfnOutput( self, "cloudfrontURL", value=f"{cloudfront_distribution.domain_name}", description="Static web page url")
def __init__(self, scope: core.Construct, id: str, s3bucket,acmcert, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") bucketName = s3.Bucket.from_bucket_name(self,'s3bucket',s3bucket) self.cdn_id = cdn.CloudFrontWebDistribution(self,'webhosting-cdn', origin_configs=[cdn.SourceConfiguration( behaviors=[ cdn.Behavior(is_default_behavior=True) ], origin_path="/build", s3_origin_source=cdn.S3OriginConfig( s3_bucket_source=bucketName, origin_access_identity=cdn.OriginAccessIdentity(self,'webhosting-origin') ) )], error_configurations=[cdn.CfnDistribution.CustomErrorResponseProperty( error_code=400, response_code=200, response_page_path="/" ), cdn.CfnDistribution.CustomErrorResponseProperty( error_code=403, response_code=200, response_page_path="/" ), cdn.CfnDistribution.CustomErrorResponseProperty( error_code=404, response_code=200, response_page_path="/" ) ], alias_configuration=cdn.AliasConfiguration( acm_cert_ref=acmcert.certificate_arn, names=['app.cloudevangelist.ca'] ) ) ssm.StringParameter(self,'cdn-dist-id', parameter_name='/'+env_name+'/app-distribution-id', string_value=self.cdn_id.distribution_id ) ssm.StringParameter(self,'cdn-url', parameter_name='/'+env_name+'/app-cdn-url', string_value='https://'+self.cdn_id.domain_name )
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) web_app_root = os.path.abspath('./web') bucket = _s3.Bucket(self, 'Bucket', website_index_document='index.html') origin = cloudfront.OriginAccessIdentity(self, 'BucketOrigin', comment='mythical-mysfits') bucket.grant_read( _iam.CanonicalUserPrincipal( origin.cloud_front_origin_access_identity_s3_canonical_user_id) ) cdn = cloudfront.CloudFrontWebDistribution( self, 'CloudFront', viewer_protocol_policy=cloudfront.ViewerProtocolPolicy.ALLOW_ALL, price_class=cloudfront.PriceClass.PRICE_CLASS_ALL, origin_configs=[ cloudfront.SourceConfiguration( behaviors=[ cloudfront.Behavior( is_default_behavior=True, max_ttl=cdk.Duration.seconds(31536000), allowed_methods=cloudfront. CloudFrontAllowedMethods.GET_HEAD_OPTIONS) ], origin_path='/web', s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=bucket, origin_access_identity=origin)) ]) s3deploy.BucketDeployment( self, 'DeployWebsite', sources=[s3deploy.Source.asset(web_app_root)], destination_key_prefix='web/', destination_bucket=bucket, distribution=cdn, retain_on_delete=False) cdk.CfnOutput(self, 'CloudFrontURL', description='The CloudFront distribution URL', value='https://' + cdn.domain_name)
def create_asset_oai_config(self): """ Create OAI policy for S3/CloudFront Means bucket does not need to be public """ s3_oai = cloudfront.OriginAccessIdentity( self, self.config.get("stack_name") + "_OAI" ) return cloudfront.S3OriginConfig( s3_bucket_source=self.bucket_assets, origin_access_identity=s3_oai )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) static_bucket = aws_s3.Bucket( self, 'MiscPublicFilesBucket', removal_policy=core.RemovalPolicy.RETAIN, ) origin = aws_cloudfront.OriginAccessIdentity( self, 'MiscPublicFilesOrigin', comment='CDN origin for miscellaneous public files', ) cdn = aws_cloudfront.CloudFrontWebDistribution( self, 'MiscPublicFilesCDN', comment='CDN for miscellaneous public files', origin_configs=[ aws_cloudfront.SourceConfiguration( s3_origin_source=aws_cloudfront.S3OriginConfig( s3_bucket_source=static_bucket, origin_access_identity=origin, ), behaviors=[ aws_cloudfront.Behavior( is_default_behavior=True, min_ttl=core.Duration.days(90), max_ttl=core.Duration.days(360), default_ttl=core.Duration.days(180), compress=True, ) ], ) ], default_root_object='index.html', enable_ip_v6=True, http_version=aws_cloudfront.HttpVersion.HTTP2, price_class=aws_cloudfront.PriceClass.PRICE_CLASS_100, viewer_protocol_policy=aws_cloudfront.ViewerProtocolPolicy. REDIRECT_TO_HTTPS, # NOQA ) aws_s3_deployment.BucketDeployment( self, 'MiscPublicFilesDeployment', sources=[aws_s3_deployment.Source.asset('public')], destination_bucket=static_bucket, distribution=cdn, )
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) s3_bucket_name = cdk.CfnParameter(self, 'S3BucketForStaticContents', type='String', description='s3 bucket that the site contents are deployed to' ) site_bucket = s3.Bucket.from_bucket_name(self, 'S3BucketForStaticSite', s3_bucket_name.value_as_string) cloudfrontOAI = cloudfront.OriginAccessIdentity(self, 'CloudFrontOAI', comment="Allows CloudFront to reach the bucket: {name}".format(name=s3_bucket_name.value_as_string) ); cloudfrontOAI.apply_removal_policy(cdk.RemovalPolicy.DESTROY) #XXX: Add policy document the existing s3 bucket #XXX: https://stackoverflow.com/questions/60087302/how-to-add-resource-policy-to-existing-s3-bucket-with-cdk-in-javascript site_bucket_policy_statement = aws_iam.PolicyStatement(**{ 'actions': ['s3:GetObject'], 'resources': [site_bucket.arn_for_objects('*')], 'principals': [aws_iam.CanonicalUserPrincipal(cloudfrontOAI.cloud_front_origin_access_identity_s3_canonical_user_id)] }) s3.CfnBucketPolicy(self, 'SiteBucketPolicy', bucket=site_bucket.bucket_name, policy_document=aws_iam.PolicyDocument(statements=[site_bucket_policy_statement]) ) distribution = cloudfront.Distribution(self, "myDist", default_behavior=cloudfront.BehaviorOptions( origin=cf_origins.S3Origin(bucket=site_bucket, origin_access_identity=cloudfrontOAI) ), error_responses=[ #XXX: If you have accessed root page of cloudfront url (i.e. https://your-domain.cloudfront.net/), #XXX: 403:Forbidden error might occur. In order to prevent this error, #XXX: configure 403:Forbidden error response page to be 'index.html' cloudfront.ErrorResponse(http_status=403, response_http_status=200, response_page_path='/index.html', ttl=cdk.Duration.seconds(10)), #XXX: Configure 404:NotFound error response page to be 'error.html' cloudfront.ErrorResponse(http_status=404, response_http_status=404, response_page_path='/error.html', ttl=cdk.Duration.seconds(10)) ] ) cdk.CfnOutput(self, 'StackName', value=self.stack_name, export_name='StackName') cdk.CfnOutput(self, 'SiteBucket', value=site_bucket.bucket_name, export_name='SiteBucket') cdk.CfnOutput(self, 'DistributionId', value=distribution.distribution_id, export_name='DistributionId') cdk.CfnOutput(self, 'DistributionDomainName', value=distribution.distribution_domain_name, export_name='DistributionDomainName') cdk.CfnOutput(self, 'CloudFrontOriginAccessId', value=cloudfrontOAI.cloud_front_origin_access_identity_s3_canonical_user_id, export_name='CloudFrontOAI')
def __init__(self, scope:core.Construct, id:str, s3bucket, **kwargs): super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context('project_name') env_name = self.node.try_get_context('env') bucketName = s3.Bucket.from_bucket_name(self, 's3bucket', s3bucket) cdn_id = cdn.CloudFrontWebDistribution( self, 'webhosting-cdn', origin_configs=[ cdn.SourceConfiguration( behaviors=[cdn.Behavior(is_default_behavior=True)], origin_path='/build', s3_origin_source=cdn.S3OriginConfig( s3_bucket_source=bucketName, origin_access_identity=cdn.OriginAccessIdentity(self, 'webhosting-origin') ) ) ], error_configurations=[ cdn.CfnDistribution.CustomErrorResponseProperty( error_code=400, response_code=200, response_page_path='/' ), cdn.CfnDistribution.CustomErrorResponseProperty( error_code=403, response_code=200, response_page_path='/' ), cdn.CfnDistribution.CustomErrorResponseProperty( error_code=404, response_code=200, response_page_path='/' ) ], ) ## ssm params ssm.StringParameter(self, 'cdn-id', parameter_name=f'/{env_name}/cdn-id', string_value=cdn_id.distribution_id) ssm.StringParameter(self, 'cdn-url', parameter_name=f'/{env_name}/cdn-url', string_value=f'https://{cdn_id.distribution_domain_name}')
def __init__( self, scope: core.Construct, id: str, generate_exports_and_bundle: Callable[[], None], **kwargs, ) -> None: super().__init__(scope, id, **kwargs) # generate exports file and bundle application generate_exports_and_bundle() # deploy application to s3 bucket behind cloudfront bucket = s3.Bucket( self, "ReactAppBucket", website_index_document="index.html", website_error_document="404.html", ) s3_dep.BucketDeployment( self, "DeployNextJSReactApp", sources=[s3_dep.Source.asset(path.join("client", "out"))], destination_bucket=bucket, ) oai = cloudfront.OriginAccessIdentity(self, "OAI") cfd = cloudfront.CloudFrontWebDistribution( self, "ReactAppDistribution", origin_configs=[{ "s3OriginSource": { "s3BucketSource": bucket, "originAccessIdentity": oai, }, "behaviors": [cloudfront.Behavior(is_default_behavior=True)], }], ) # only allows cloudfront distribution to read from bucket bucket.grant_read(oai.grant_principal) core.CfnOutput(self, id="appurl", value=f"https://{cfd.distribution_domain_name}")
def create_cloudfront_distribution(self, cloudfront_alias=typing.Optional[dict]): r"""Creates the Cloudfront distribution required to access the SPA website The Cloudfront distribution will automatically forward requests to the s3 bucket /index.html file and serve that as the default for all HTTP URIs that don't exist so that the index file can be served for any dynamic path NOTE: If cloudfront_alias is defined, the default `security_policy` is aws_cloudfront.SecurityPolicyProtocol.TLS_V1_2_2018 which can be by defining `security_policy` in the dictionary if required Args: cloudfront_alias: Aliases your Cloudfront distribution should use and should include a dictionary containing array(`names`) and str(`acm_cert_ref`) """ cloudfront_originaccesspolicy = cloudfront.OriginAccessIdentity( self, f"{self.__website_identifier}-originpolicy", ) alias_configuration = None if cloudfront_alias is not None: if 'security_policy' not in cloudfront_alias: cloudfront_alias[ 'security_policy'] = cloudfront.SecurityPolicyProtocol.TLS_V1_2_2018 cloudfront_alias = cloudfront.AliasConfiguration( **cloudfront_alias) self.cloudfront_distro = cloudfront.CloudFrontWebDistribution( self, id=f"{self.__website_identifier}-cloudfront", price_class=cloudfront.PriceClass.PRICE_CLASS_ALL, alias_configuration=alias_configuration, origin_configs=[ cloudfront.SourceConfiguration( behaviors=[cloudfront.Behavior(is_default_behavior=True)], s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=self.website_assets_bucket, origin_access_identity=cloudfront_originaccesspolicy)) ], error_configurations=[ cloudfront.CfnDistribution.CustomErrorResponseProperty( error_code=404, error_caching_min_ttl=0, response_code=200, response_page_path="/index.html") ])
def __init__(self, scope: core.Construct, id: str, aliases: List[str], certificate_arn: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) bucket = s3.Bucket(self, 'Storage') origin_identity = cloudfront.OriginAccessIdentity(self, 'Identity') bucket.grant_read(origin_identity.grant_principal) certificate = acm.Certificate.from_certificate_arn( self, 'Certificate', certificate_arn=certificate_arn) distribution = cloudfront.CloudFrontWebDistribution( self, 'CDN', price_class=cloudfront.PriceClass.PRICE_CLASS_ALL, alias_configuration=cloudfront.AliasConfiguration( names=aliases, acm_cert_ref=certificate.certificate_arn, security_policy=cloudfront.SecurityPolicyProtocol. TLS_V1_2_2019, ), origin_configs=[ cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=bucket, origin_access_identity=origin_identity, ), behaviors=[ cloudfront.Behavior( default_ttl=core.Duration.days(1), min_ttl=core.Duration.days(1), max_ttl=core.Duration.days(365), is_default_behavior=True, ) ]) ]) self.bucket = bucket self.distribution = distribution
def __init__(self, scope: core.Construct, id: str, api: apigateway.RestApi, **kwargs) -> None: super().__init__(scope, id, **kwargs) stack = core.Stack.of(self) bucket = s3.Bucket(self, 'Storage') s3_deployment.BucketDeployment( self, 'Deployment', sources=[ s3_deployment.Source.asset('./src/html'), ], destination_bucket=bucket, ) origin_identity = cloudfront.OriginAccessIdentity(self, 'Identity') bucket.grant_read(origin_identity.grant_principal) s3_origin = cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=bucket, origin_access_identity=origin_identity, ), behaviors=[ cloudfront.Behavior( default_ttl=core.Duration.days(1), min_ttl=core.Duration.days(1), max_ttl=core.Duration.days(31), is_default_behavior=True, ) ]) api_origin = cloudfront.SourceConfiguration( origin_path='/{}'.format(api.deployment_stage.stage_name), custom_origin_source=cloudfront.CustomOriginConfig( domain_name='{}.execute-api.{}.{}'.format( api.rest_api_id, stack.region, stack.url_suffix), ), behaviors=[ cloudfront.Behavior(default_ttl=core.Duration.seconds(0), min_ttl=core.Duration.seconds(0), max_ttl=core.Duration.seconds(0), path_pattern='/stock/*', forwarded_values={ 'query_string': True, 'query_string_cache_keys': ['start', 'end'] }) ]) domain_name = 'demo.training' subdomain = 'finance.{}'.format(domain_name) zone = route53.HostedZone.from_lookup( self, 'Zone', domain_name=domain_name, ) certificate = acm.DnsValidatedCertificate( self, 'Certificate', domain_name=subdomain, hosted_zone=zone, region='us-east-1', ) distribution = cloudfront.CloudFrontWebDistribution( self, 'CDN', price_class=cloudfront.PriceClass.PRICE_CLASS_ALL, origin_configs=[ s3_origin, api_origin, ], alias_configuration=cloudfront.AliasConfiguration( acm_cert_ref=certificate.certificate_arn, names=[subdomain], )) route53.ARecord( self, 'DnsRecord', record_name=subdomain, target=route53.AddressRecordTarget.from_alias( alias_target=route53_targets.CloudFrontTarget(distribution)), zone=zone, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Message timeout; used by SQS and Lambda message_timeout = core.Duration.seconds(15) # SQS queue that the Raspberry Pi will write to queue = sqs.Queue( self, 'Queue', visibility_timeout=message_timeout, receive_message_wait_time=core.Duration.seconds(20), retention_period=core.Duration.hours(1), ) # DynamoDB table that the web app will read from icao_address = dynamodb.Attribute( name='IcaoAddress', type=dynamodb.AttributeType.STRING, ) table = dynamodb.Table( self, 'Table', partition_key=icao_address, billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY, ) database = timestream.CfnDatabase( self, 'Database', database_name='aircraft-database', ) table2 = timestream.CfnTable(self, 'Table2', database_name=database.ref, table_name='aircraft-table', retention_properties={ 'MemoryStoreRetentionPeriodInHours': 1, 'MagneticStoreRetentionPeriodInDays': 1, }) # IAM user for the Raspberry Pi user = iam.User(self, 'RaspberryPi') queue.grant_send_messages(user) access_key = iam.CfnAccessKey( self, 'AccessKey', user_name=user.user_name, ) # IAM role for Lambda function, so it can write to DynamoDB lambda_role = iam.Role( self, 'LambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), ], ) lambda_role.add_to_policy( iam.PolicyStatement( actions=[ 'timestream:CancelQuery', 'timestream:DescribeEndpoints', 'timestream:DescribeTable', 'timestream:ListMeasures', 'timestream:Select', 'timestream:WriteRecords' ], resources=['*'], # TODO: narrow down permissions )) table.grant_read_write_data(lambda_role) # Integration between SQS and Lambda event = lambda_event_sources.SqsEventSource( queue=queue, batch_size=10, ) # Lambda function that processes messages from SQS queue and updates DynamoDB table import_function = lambda_.Function( self, 'ImportFunction', description='Reads SQS messages and writes to DynamoDB', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset('lambda_import/'), timeout=message_timeout, handler='index.handler', role=lambda_role, events=[event], environment={ 'TABLE_NAME': table2.ref, }, ) # TODO: add custom log group # TODO: add metric filters for number of succesfull updates and failed updates # Lambda function that reads from DynamoDB and returns data to API Gateway api_function = lambda_.Function( self, 'ApiFunction', description='Reads from DynamoDB and returns to API GW', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.from_asset('lambda_api/'), timeout=message_timeout, handler='index.handler', role=lambda_role, environment={ 'TABLE_NAME': table.table_name, }, ) # API Gateway for requesting aircraft data api = apigateway.RestApi( self, 'Api', endpoint_types=[apigateway.EndpointType.REGIONAL], cloud_watch_role=False, ) aircraft_resource = api.root.add_resource('aircraft') aircraft_resource.add_method( http_method='GET', integration=apigateway.LambdaIntegration( api_function, proxy=True, ), ) # Static website bucket = s3.Bucket(self, 'StaticWebsite') s3_deployment.BucketDeployment( self, 'Deployment', sources=[ s3_deployment.Source.asset('html/'), ], destination_bucket=bucket, ) # Permissions between CloudFront and S3 origin_identity = cloudfront.OriginAccessIdentity(self, 'Identity') bucket.grant_read(origin_identity.grant_principal) # CloudFront distribution pointing to both S3 and API Gateway s3_origin = cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=bucket, origin_access_identity=origin_identity, ), behaviors=[ cloudfront.Behavior( default_ttl=core.Duration.days(0), min_ttl=core.Duration.days(0), max_ttl=core.Duration.days(31), is_default_behavior=True, ) ]) api_origin = cloudfront.SourceConfiguration( origin_path='/{}'.format(api.deployment_stage.stage_name), custom_origin_source=cloudfront.CustomOriginConfig( domain_name='{}.execute-api.{}.{}'.format( api.rest_api_id, self.region, self.url_suffix), ), behaviors=[ cloudfront.Behavior( default_ttl=core.Duration.seconds(0), min_ttl=core.Duration.seconds(0), max_ttl=core.Duration.seconds(0), path_pattern='/aircraft/*', ) ]) domain_name = self.node.try_get_context('domain_name') # If domain name is specified, create a certificate and alias configuration for CloudFront if domain_name is None: alias_configuration = None else: subdomain = 'aircraft.{}'.format(domain_name) zone = route53.HostedZone.from_lookup( self, 'Zone', domain_name=domain_name, ) certificate = acm.DnsValidatedCertificate( self, 'Certificate', domain_name=subdomain, hosted_zone=zone, region='us-east-1', ) alias_configuration = cloudfront.AliasConfiguration( acm_cert_ref=certificate.certificate_arn, names=[subdomain], ) distribution = cloudfront.CloudFrontWebDistribution( self, 'CDN', price_class=cloudfront.PriceClass.PRICE_CLASS_ALL, alias_configuration=alias_configuration, origin_configs=[ s3_origin, api_origin, ], ) # If domain name is specified, create a DNS record for CloudFront if domain_name is not None: route53.ARecord( self, 'DnsRecord', record_name=subdomain, target=route53.AddressRecordTarget.from_alias( alias_target=route53_targets.CloudFrontTarget( distribution)), zone=zone, ) # Outputs that are needed on the Raspberry Pi core.CfnOutput( self, 'QueueUrl', value=queue.queue_url, ) core.CfnOutput( self, 'AccessKeyId', value=access_key.ref, ) core.CfnOutput( self, 'SecretAccessKey', value=access_key.attr_secret_access_key, ) core.CfnOutput( self, 'Region', value=self.region, )
def __init__(self, scope: core.Construct, id: str, artifact_bucket: s3.Bucket, **kwargs) -> None: super().__init__(scope, id, **kwargs) pool = cognito.UserPool(scope=self, id="user-pool", mfa=cognito.Mfa.OPTIONAL, mfa_second_factor=cognito.MfaSecondFactor( otp=True, sms=True), password_policy=cognito.PasswordPolicy( min_length=12, require_lowercase=True, require_uppercase=False, require_digits=False, require_symbols=False, )) client = pool.add_client( id="customer-app-client", auth_flows=cognito.AuthFlow(user_password=True, refresh_token=True), ) backend = _lambda.Function( scope=self, id="api-function", runtime=_lambda.Runtime.GO_1_X, handler="main", memory_size=500, timeout=core.Duration.seconds(10), environment={ "USER_POOL_ID": pool.user_pool_id, "CLIENT_ID": client.user_pool_client_id, }, code=_lambda.Code.from_bucket( bucket=artifact_bucket, key="Server/main.zip", ), ) backend.add_to_role_policy( statement=iam.PolicyStatement(actions=[ "cognito-idp:RespondToAuthChallenge", "cognito-idp:InitiateAuth", "cognito-idp:SetUserMFAPreference", "cognito-idp:AssociateSoftwareToken", "cognito-idp:VerifySoftwareToken" ], resources=[pool.user_pool_arn])) api = apigateway.LambdaRestApi( scope=self, id="mfa-api", handler=backend, endpoint_types=[apigateway.EndpointType.REGIONAL], default_cors_preflight_options=apigateway.CorsOptions( allow_origins=["*"])) self.api = api self.backend_fn = backend static_website_bucket = s3.Bucket( scope=self, id="static-website-bucket", ) self.static_website_bucket = static_website_bucket distribution = cloudfront.CloudFrontWebDistribution( scope=self, id="static-website-distribution", default_root_object="index.html", origin_configs=[ cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=static_website_bucket, origin_access_identity=cloudfront.OriginAccessIdentity( scope=self, id="origin-access-identity", )), behaviors=[cloudfront.Behavior(is_default_behavior=True)]) ], )
def __init__(self, scope: core.Construct, id: str, props: StaticSiteProps) -> None: super().__init__(scope, id) fqdn = props.fqdn certificate_arn = props.certificate_arn error_configuration = props.error_configuration if len(error_configuration) == 0: error_codes = None else: error_codes = [] for error_config in error_configuration: error_codes.append( cloudfront.CfnDistribution.CustomErrorResponseProperty( error_code=error_config["error_code"], error_caching_min_ttl=error_config[ "error_caching_min_ttl"], response_code=error_config["response_code"], response_page_path=error_config["response_page_path"], )) # Content Bucket site_bucket = s3.Bucket( self, "SiteBucket", bucket_name=fqdn + "-static-site", website_index_document="index.html", website_error_document="index.html", block_public_access=s3.BlockPublicAccess(block_public_policy=True), removal_policy=core.RemovalPolicy.DESTROY, ) self.bucket_name = fqdn + "-static-site" self.bucket_resource = site_bucket # Uses new method for OAI (still breaking changes) - https://github.com/aws/aws-cdk/pull/4491 origin_access_identity = cloudfront.OriginAccessIdentity( self, "OriginIdentity") # Add CloudFront Origin Access Identity to the bucket site_bucket.grant_read(origin_access_identity) core.CfnOutput(self, "Bucket", value=site_bucket.bucket_name) # CloudFront distribution with or without certificate source_configuration = cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=site_bucket, origin_access_identity=origin_access_identity, ), behaviors=[cloudfront.Behavior(is_default_behavior=True)], ) # Use ACM Certificate if provided, otherwise no-SSL if certificate_arn: # CloudFront distribution that provides HTTPS alias_configuration = cloudfront.AliasConfiguration( acm_cert_ref=certificate_arn, names=[fqdn], ssl_method=cloudfront.SSLMethod.SNI, security_policy=cloudfront.SecurityPolicyProtocol. TLS_V1_1_2016, ) distribution = cloudfront.CloudFrontWebDistribution( self, "SiteDistribution", alias_configuration=alias_configuration, error_configurations=error_codes, origin_configs=[source_configuration], viewer_protocol_policy=cloudfront.ViewerProtocolPolicy. REDIRECT_TO_HTTPS, ) else: distribution = cloudfront.CloudFrontWebDistribution( self, "SiteDistribution", origin_configs=[source_configuration], error_configurations=error_codes, ) core.CfnOutput( self, "DistributionId", value=distribution.distribution_id, export_name=props.output_name, ) # Route53 alias record for the CloudFront Distribution zone = route53.HostedZone.from_hosted_zone_attributes( self, id="HostedZoneID", hosted_zone_id=props.hosted_zone_id, zone_name=props.fqdn, ) route53.ARecord( self, "SiteAliasRecord", record_name=fqdn, target=route53.RecordTarget.from_alias( alias_target=targets.CloudFrontTarget(distribution)), zone=zone, )
def create_origin_access_identity(self): return _cfront.OriginAccessIdentity(self, "oai", comment="Cloudfront access to S3")
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # certificate = certmgr.DnsValidatedCertificate(self, # 'HostedZoneCertificate', # hosted_zone=hosted_zone, # domain_name='untermuller.ch', # region='us-east-1', # subject_alternative_names=['resume.untermuller.ch','www.resume.untermuller.ch','www.untermuller.ch'] # ) # website_bucket_create = s3.Bucket(self, 'WebsiteBucketCDK', # bucket_name='BUCKET_NAME', # website_index_document='index.html', # website_error_document='error.html', # public_read_access=True) table = dynamo.Table(self, 'HitCounter', partition_key={ 'name': 'id', 'type': dynamo.AttributeType.STRING }) counter = lmb.Function( self, 'CounterHandler', runtime=lmb.Runtime.PYTHON_3_8, handler='counter.lambda_handler', code=lmb.Code.asset('resume_cdk/lambda'), environment={'TABLE_NAME': table.table_name}, ) lambdaapigw = apigw.LambdaRestApi(self, 'Endpoint', handler=counter) endpoint = core.CfnOutput(self, 'ApiEndpoint', value=lambdaapigw.url) table.grant_read_write_data(counter) website_bucket = s3.Bucket.from_bucket_name(self, 'BUCKET_NAME', bucket_name='BUCKET_NAME') deploy_website = s3_deploy.BucketDeployment( self, 'DeployWebsiteCDK', sources=[s3_deploy.Source.asset("./website")], destination_bucket=website_bucket) hosted_zone = route53.HostedZone.from_hosted_zone_attributes( self, 'HOST.CH', hosted_zone_id='HOST_ID', zone_name='HOST.CH') certificate = certmgr.Certificate.from_certificate_arn( self, 'CERT_HOST.CH', 'CERT_HOST.CH_ARN') cdf_OAI = cdf.OriginAccessIdentity(self, 'OAI-user-s3', comment='OAI') grant_access = website_bucket.grant_read(cdf_OAI.grant_principal) s3_origin = cdf.S3OriginConfig( s3_bucket_source=website_bucket, origin_access_identity=cdf_OAI, ) cdf_viewer_policy = cdf.ViewerProtocolPolicy('REDIRECT_TO_HTTPS') cdf_behavior = cdf.Behavior(is_default_behavior=True) cdf_source_config = cdf.SourceConfiguration( behaviors=[cdf_behavior], s3_origin_source=s3_origin, ) cdf_dist = cdf.CloudFrontWebDistribution( self, 'ResumeDistribution', origin_configs=[cdf_source_config], viewer_certificate=cdf.ViewerCertificate.from_acm_certificate( certificate, aliases=['HOST.CH', 'ALIAS_HOST.CH'], security_policy=cdf.SecurityPolicyProtocol.TLS_V1, ssl_method=cdf.SSLMethod.SNI), viewer_protocol_policy=cdf_viewer_policy) route_Aname = route53.ARecord( self, 'ANAME', record_name='HOST.CH', target=route53.RecordTarget.from_alias( route53_targets.CloudFrontTarget(cdf_dist)), zone=hosted_zone, comment="insert a record")
def __init__( self, scope: core.Construct, id: str, env: core.Environment, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) # S3 bucket to store website static files (HTML, CSS, JS...) static_bucket = aws_s3.Bucket( self, 'WebsiteStaticS3Bucket', bucket_name='slsblog-website-static', removal_policy=core.RemovalPolicy.DESTROY, ) cdn_logs_bucket = aws_s3.Bucket( self, 'CDNLogsS3Bucket', bucket_name='slsblog-cdn-logs', removal_policy=core.RemovalPolicy.DESTROY, ) # CloudFront origin identity to associate with the S3 bucket origin = aws_cloudfront.OriginAccessIdentity( self, 'SlsBlogS3OriginAccessIdentity', comment='Associated with serverless website static S3 bucket', ) self.cdn = aws_cloudfront.CloudFrontWebDistribution( self, 'SlsBlogCDN', comment='CDN for a full-stack serverless website', origin_configs=[ aws_cloudfront.SourceConfiguration( s3_origin_source=aws_cloudfront.S3OriginConfig( s3_bucket_source=static_bucket, origin_access_identity=origin, ), behaviors=[ aws_cloudfront.Behavior( is_default_behavior=True, min_ttl=core.Duration.hours(1), max_ttl=core.Duration.hours(24), default_ttl=core.Duration.hours(1), compress=True, ) ], ) ], default_root_object='index.html', enable_ip_v6=True, http_version=aws_cloudfront.HttpVersion.HTTP2, logging_config=aws_cloudfront.LoggingConfiguration( bucket=cdn_logs_bucket, include_cookies=True, ), price_class=aws_cloudfront.PriceClass.PRICE_CLASS_100, viewer_protocol_policy=aws_cloudfront.ViewerProtocolPolicy. REDIRECT_TO_HTTPS, # NOQA ) aws_s3_deployment.BucketDeployment( self, 'SlsBlogStaticS3Deployment', sources=[aws_s3_deployment.Source.asset('website_static')], destination_bucket=static_bucket, distribution=self.cdn, )
def __init__(self, scope: core.Construct, id: str, environment: str, domain: str, **kwargs) -> None: """ StaticSiteStack creates the CloudFormation Stack that creates the resources necessary to host a static web site from an S3 Bucket with a CloudFormation CDN and a custom domain name. Three separate stacks are created based on the environment variable ('dev', 'stg', 'prod') arguments: environment -- Deployment Environment, e.g. one of ('dev', 'stg', 'prod') domain -- custom domain name owned by user, e.g. my-domain.com """ super().__init__(scope, id, **kwargs) # In the GitHub Actions Workflow, the Certificate is created using the CertificateStack and its arn is set as an environment variable self.certificate_arn = self.node.try_get_context("certificate_arn") bucket = s3.Bucket(self, f"{environment}bucket", website_index_document="index.html", removal_policy=core.RemovalPolicy.DESTROY, block_public_access=s3.BlockPublicAccess.BLOCK_ALL) core.CfnOutput(self, "sitebucketname", value=bucket.bucket_name) oai = cf.OriginAccessIdentity( self, f"OriginIdentity-{environment}-{domain}", ) alias_configuration = cf.AliasConfiguration( acm_cert_ref=self.certificate_arn, names=[f"{environment}.{domain}"], ssl_method=cf.SSLMethod.SNI, security_policy=cf.SecurityPolicyProtocol.TLS_V1_1_2016) # Config dictionary for CloudFront distributions, no caching takes place in dev # The assumption is that it will be changed frequently and those changes will be tested # To use alternative sub-domains, change the keys of this dictionary to match the sub-domains used in certificate_stack.py cf_behavior_dict = { "dev": cf.Behavior(is_default_behavior=True, min_ttl=core.Duration.seconds(0), max_ttl=core.Duration.seconds(0), default_ttl=core.Duration.seconds(0)), "stg": cf.Behavior(is_default_behavior=True), "prod": cf.Behavior(is_default_behavior=True) } source_config = cf.SourceConfiguration( s3_origin_source=cf.S3OriginConfig(s3_bucket_source=bucket, origin_access_identity=oai), behaviors=[cf_behavior_dict[environment]]) cf_dist = cf.CloudFrontWebDistribution( self, f"{environment}-static-site-distribution", alias_configuration=alias_configuration, origin_configs=[source_config], viewer_protocol_policy=cf.ViewerProtocolPolicy.REDIRECT_TO_HTTPS) core.CfnOutput(self, "distid", value=cf_dist.distribution_id) # Route53 alias record for the CloudFront Distribution hosted_zone = route53.HostedZone.from_lookup( self, "static-site-hosted-zone-id", domain_name=domain) route53.ARecord(self, 'static-site-alias-record', record_name=f"{environment}.{domain}", target=route53.AddressRecordTarget.from_alias( targets.CloudFrontTarget(cf_dist)), zone=hosted_zone)
def __init__(self, scope: core.Construct, id: str, sub_domain: str, domain: str, **kwargs) -> None: """ StaticSiteStack creates the CloudFormation Stack that creates the resources necessary to host a static web site from an S3 Bucket with a CloudFormation CDN and a custom domain name. arguments: sub_domain -- sub domain name used for the dashboard url, acg-covid-challenge domain -- custom domain name owned by user, e.g. my-domain.com """ super().__init__(scope, id, **kwargs) # In the GitHub Actions Workflow, the Certificate is created using the CertificateStack and its arn is set as an environment variable self.certificate_arn = self.node.try_get_context("certificate_arn") bucket = s3.Bucket(self, f"{sub_domain}-bucket", website_index_document="index.html", removal_policy=core.RemovalPolicy.DESTROY, block_public_access=s3.BlockPublicAccess.BLOCK_ALL ) core.CfnOutput(self, "sitebucketname", value=bucket.bucket_name) oai = cf.OriginAccessIdentity( self, f"OriginIdentity-{sub_domain}", ) alias_configuration = cf.AliasConfiguration( acm_cert_ref=self.certificate_arn, names=[f"{sub_domain}.{domain}"], ssl_method=cf.SSLMethod.SNI, security_policy=cf.SecurityPolicyProtocol.TLS_V1_1_2016 ) source_config = cf.SourceConfiguration( s3_origin_source=cf.S3OriginConfig( s3_bucket_source=bucket, origin_access_identity=oai ), behaviors=[cf.Behavior(is_default_behavior=True)] ) cf_dist = cf.CloudFrontWebDistribution( self, f"{sub_domain}-static-site-distribution", alias_configuration=alias_configuration, origin_configs=[source_config], viewer_protocol_policy=cf.ViewerProtocolPolicy.REDIRECT_TO_HTTPS ) core.CfnOutput(self, "distid", value=cf_dist.distribution_id) # Route53 alias record for the CloudFront Distribution hosted_zone = route53.HostedZone.from_lookup( self, f"{sub_domain}-hosted-zone-id", domain_name=domain) route53.ARecord( self, f'{sub_domain}-alias-record', record_name=f"{sub_domain}.{domain}", target=route53.AddressRecordTarget.from_alias( targets.CloudFrontTarget(cf_dist)), zone=hosted_zone )
def __create_cloud_front_origin_access_identity( self) -> aws_cloudfront.OriginAccessIdentity: return aws_cloudfront.OriginAccessIdentity( self, 'CloudFrontOriginAccessIdentity', comment='cloudfront-only-acc-identity')
def __init__(self, scope: core.Construct, id: str, hosted_zone_id: str, hosted_zone_name: str, website_domain_name: str, certificate_in_us_east_1_arn: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Raise an exception if we get a certificate that doesn't live in us-east-1 check_us_east_1_cert(certificate_in_us_east_1_arn) # The S3 Bucket that will store our website website_bucket = s3.Bucket(self, "WebsiteBucket") # The Origin Access Identity is a way to allow CloudFront Access to the Website Bucket origin_access_identity = cloudfront.OriginAccessIdentity( self, "OriginAccessIdentity", comment="Allows Read-Access from CloudFront") # We tell the website bucket to allow access from CloudFront website_bucket.grant_read(origin_access_identity) # Import the cert from the arn we get as a parameter tls_cert = certificatemanager.Certificate.from_certificate_arn( self, "Certificate", certificate_arn=certificate_in_us_east_1_arn) # We set up the CloudFront Distribution with the S3 Bucket as the origin and our certificate cloudfront_distribution = cloudfront.CloudFrontWebDistribution( self, "WebsiteDistribution", origin_configs=[ cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=website_bucket, origin_access_identity=origin_access_identity), behaviors=[ cloudfront.Behavior(is_default_behavior=True, default_ttl=core.Duration.hours(1)) ], ) ], error_configurations=[ # Point CloudFront to our custom 404 error page when a 404 occurs cloudfront.CfnDistribution.CustomErrorResponseProperty( error_code=404, response_code=404, response_page_path="/404.html") ], viewer_certificate=cloudfront.ViewerCertificate. from_acm_certificate(certificate=tls_cert, aliases=[website_domain_name])) # Set the DNS Alias for CloudFront hosted_zone = route53.HostedZone.from_hosted_zone_attributes( self, "HostedZone", hosted_zone_id=hosted_zone_id, zone_name=hosted_zone_name) cloudfront_alias_record = route53.ARecord( self, "DNSAliasForCloudFront", zone=hosted_zone, target=route53.RecordTarget.from_alias( route53_targets.CloudFrontTarget(cloudfront_distribution)), record_name=website_domain_name, ) # Repo for the website repository = codecommit.Repository( self, "Repository", repository_name=website_domain_name, description=f"Repository for the website {website_domain_name}") website_build_project = codebuild.PipelineProject( self, "WebsiteBuild", build_spec=codebuild.BuildSpec.from_source_filename( "buildspec.yml"), environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.STANDARD_4_0), environment_variables={ "baseurl": codebuild.BuildEnvironmentVariable( value=f"https://{website_domain_name}/"), "bucket": codebuild.BuildEnvironmentVariable( value=website_bucket.bucket_name) }) website_bucket.grant_read_write(website_build_project.grant_principal) source_output = codepipeline.Artifact() website_build_pipeline = codepipeline.Pipeline( self, "WebsiteBuildPipeline", stages=[ # Check Out the Code From the Repo codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CheckoutCode", repository=repository, output=source_output) ]), # Build and deploy the Website to S3 (this uses the sync command with the delete option, which the codebuild action to deploy to S3 doesn't support) codepipeline.StageProps( stage_name="BuildAndDeploy", actions=[ codepipeline_actions.CodeBuildAction( action_name="BuildAndDeployWebsite", project=website_build_project, input=source_output) ]) ]) # Display the Repo Clone URLs as the Stack Output core.CfnOutput(self, "RepositoryCloneUrlSSH", value=repository.repository_clone_url_ssh) core.CfnOutput(self, "RepositoryCloneUrlHTTPS", value=repository.repository_clone_url_http) # Display the website URL as the stack output core.CfnOutput(self, "WebsiteUrl", value=f"https://{website_domain_name}/")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # region IAM sa_role = aws_iam.Role( self, "Role", role_name="SaRole", assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com")) sa_role.add_to_policy( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=["*"], actions=[ "cloudwatch:*", "s3:*", "logs:*", "dynamodb:*", "iam:*" ])) # endregion IAM # region S3 mask_images_bucket = aws_s3.Bucket(self, 'MaskImagesBucket') # endregion S3 # region DB masks_db = aws_dynamodb.Table( self, 'MasksTable', table_name='Masks', partition_key=aws_dynamodb.Attribute( name='id', type=aws_dynamodb.AttributeType.STRING), sort_key=aws_dynamodb.Attribute( name='mask_name', type=aws_dynamodb.AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY) # endregion DB # region Lambda fetch_lambda = _lambda.Function( self, 'DynamoFetch', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('lambda'), handler='dynamo_fetch.handler', environment=dict(DYNAMO_TABLE_NAME=masks_db.table_name), role=sa_role) insert_lambda = _lambda.Function( self, 'DynamoInsert', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('lambda'), handler='dynamo_insert.handler', environment=dict(DYNAMO_TABLE_NAME=masks_db.table_name), role=sa_role) # endregion # region API base_api = aws_apigateway.LambdaRestApi( self, 'SaApi', rest_api_name='SaApi', handler=fetch_lambda, default_cors_preflight_options=aws_apigateway.CorsOptions( allow_origins=aws_apigateway.Cors.ALL_ORIGINS)) # endregion API # region Frontend frontend_bucket = aws_s3.Bucket(self, "CreateReactAppBucket", website_index_document="index.html") frontend_src = aws_s3_deployment.BucketDeployment( self, "DeployCRA", sources=[ aws_s3_deployment.Source.asset("../frontend/sa-app/build") ], destination_bucket=frontend_bucket) oia = aws_cloudfront.OriginAccessIdentity(self, 'OIA') frontend_bucket.grant_read(oia) cloudFront = aws_cloudfront.CloudFrontWebDistribution( self, "CDKCRAStaticDistribution", origin_configs=[ aws_cloudfront.SourceConfiguration( s3_origin_source=aws_cloudfront.S3OriginConfig( s3_bucket_source=frontend_bucket, origin_access_identity=oia), behaviors=[ aws_cloudfront.Behavior( is_default_behavior=True, default_ttl=core.Duration.seconds(0), max_ttl=core.Duration.seconds(0), min_ttl=core.Duration.seconds(0)) ]) ]) # endregion # region S3 triggers new_mask_image_notification = aws_s3_notifications.LambdaDestination( insert_lambda) mask_images_bucket.add_event_notification( aws_s3.EventType.OBJECT_CREATED, new_mask_image_notification)
def __init__( self, scope: core.Construct, id_: str, vpc_stack, elastic_stack, update_lambda_zip=False, **kwargs, ) -> None: super().__init__(scope, id_, **kwargs) # if update lambda zip (including if zip doesn't exist) if ( update_lambda_zip or not pathlib.Path(os.path.join(dirname, "kibana_lambda.zip")).exists() ): # rebuild the lambda if changed call(["docker", "build", "--tag", "kibana-lambda", "."], cwd=dirname) call( ["docker", "create", "-ti", "--name", "dummy", "kibana-lambda", "bash"], cwd=dirname, ) call(["docker", "cp", "dummy:/tmp/kibana_lambda.zip", "."], cwd=dirname) call(["docker", "rm", "-f", "dummy"], cwd=dirname) kibana_bucket = s3.Bucket( self, "kibana_bucket", public_read_access=False, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, removal_policy=core.RemovalPolicy.DESTROY, ) # tag the bucket core.Tag.add(kibana_bucket, "project", constants["PROJECT_TAG"]) # the lambda behind the api kibana_lambda = lambda_.Function( self, "kibana_lambda", description="kibana api gateway lambda", code=lambda_.Code.from_asset(os.path.join(dirname, "kibana_lambda.zip")), handler="lambda_function.lambda_handler", timeout=core.Duration.seconds(300), runtime=lambda_.Runtime.PYTHON_3_8, vpc=vpc_stack.get_vpc, security_groups=[elastic_stack.elastic_security_group], log_retention=logs.RetentionDays.ONE_WEEK, ) # tag the lambda core.Tag.add(kibana_lambda, "project", constants["PROJECT_TAG"]) # create policies for the lambda kibana_lambda_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:*",], resources=["*"], ) # add the role permissions kibana_lambda.add_to_role_policy(statement=kibana_lambda_policy) # the api gateway kibana_api = apigw.LambdaRestApi( self, "kibana_api", handler=kibana_lambda, binary_media_types=["*/*"] ) # tag the api gateway core.Tag.add(kibana_api, "project", constants["PROJECT_TAG"]) kibana_identity = cloudfront.OriginAccessIdentity(self, "kibana_identity") kibana_api_domain = "/".join(kibana_api.url.split("/")[1:-2])[1:] kibana_api_path = f'/{"/".join(kibana_api.url.split("/")[-2:])}' # create the cloudfront distribution kibana_distribution = cloudfront.CloudFrontWebDistribution( self, "kibana_distribution", origin_configs=[ # the lambda source for kibana cloudfront.SourceConfiguration( custom_origin_source=cloudfront.CustomOriginConfig( domain_name=kibana_api_domain, origin_protocol_policy=cloudfront.OriginProtocolPolicy.HTTPS_ONLY, ), origin_path="/prod", behaviors=[ cloudfront.Behavior( is_default_behavior=True, allowed_methods=cloudfront.CloudFrontAllowedMethods.ALL, cached_methods=cloudfront.CloudFrontAllowedCachedMethods.GET_HEAD_OPTIONS, compress=False, forwarded_values=CfnDistribution.ForwardedValuesProperty( query_string=True, cookies=CfnDistribution.CookiesProperty(forward="all"), headers=[ "Content-Type", "Accept", "Accept-Encoding", "kbn-name", "kbn-version", ], ), ) ], ), # the s3 bucket source for kibana cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=kibana_bucket, origin_access_identity=kibana_identity, ), behaviors=[ cloudfront.Behavior( is_default_behavior=False, path_pattern="bucket_cached/*", allowed_methods=cloudfront.CloudFrontAllowedMethods.GET_HEAD, cached_methods=cloudfront.CloudFrontAllowedCachedMethods.GET_HEAD, compress=True, ) ], ), ], ) # tag the cloudfront distribution core.Tag.add(kibana_distribution, "project", constants["PROJECT_TAG"]) # needs api and bucket to be available kibana_distribution.node.add_dependency(kibana_api) # kibana bucket empty policies kibana_bucket_empty_policy = [ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:ListBucket"], resources=["*"], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:DeleteObject",], resources=[f"{kibana_bucket.bucket_arn}/*"], ), ] # create the custom resource kibana_bucket_empty = CustomResource( self, "kibana_bucket_empty", PhysicalId="kibanaBucketEmpty", Description="Empty kibana cache s3 bucket", Uuid="f7d4f730-4ee1-13e8-9c2d-fa7ae06bbebc", HandlerPath=os.path.join(dirname, "../helpers/s3_bucket_empty.py"), BucketName=kibana_bucket.bucket_name, ResourcePolicies=kibana_bucket_empty_policy, ) # tag the lamdbda core.Tag.add(kibana_bucket_empty, "project", constants["PROJECT_TAG"]) # needs a dependancy kibana_bucket_empty.node.add_dependency(kibana_bucket) # kibana lambda update policies kibana_lambda_update_policy = [ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "s3:ListBucket", "s3:ListAllMyBuckets", "lambda:ListFunctions", "lambda:UpdateFunctionConfiguration", "cloudfront:ListDistributions", "s3:GetBucketTagging", "es:ListDomainNames", "es:DescribeElasticsearchDomain", ], resources=["*"], ) ] # create the kibana lambda update kibana_lambda_update = CustomResource( self, "kibana_lambda_update", Description="Update ENV vars for kibana api lambda", PhysicalId="kibanaLambdaUpdate", Uuid="f7d4f230-4ee1-07e8-9c2d-fa7ae06bbebc", HandlerPath=os.path.join(dirname, "../helpers/lambda_env_update.py"), ResourcePolicies=kibana_lambda_update_policy, ) # tag the lamdbda core.Tag.add(kibana_lambda_update, "project", constants["PROJECT_TAG"]) # needs a dependancy kibana_lambda_update.node.add_dependency(kibana_bucket) kibana_lambda_update.node.add_dependency(kibana_distribution) core.CfnOutput( self, "kibana_link", value=f"https://{kibana_distribution.domain_name}/_plugin/kibana", description="Kibana Web Url", export_name="kibana-link", )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Define S3 bucket that will host site assets website_bucket = s3.Bucket( self, 'parthrparikh-com-assets-bucket', website_index_document='index.html', block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Deny non-SSL traffic website_bucket.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.DENY, actions=["s3:*"], resources=[website_bucket.bucket_arn], conditions={'Bool': { 'aws:SecureTransport': False, }}, principals=[iam.AnyPrincipal()], )) s3_deploy.BucketDeployment( self, 'parthrparikh-com-deploy-website', sources=[s3_deploy.Source.asset('../website/')], destination_bucket=website_bucket, ) # Define certificate for parthrparikh.com cert = acm.Certificate(self, 'parthrparikh-com-cert', domain_name='parthrparikh.com', subject_alternative_names=[ 'www.parthrparikh.com', ]) # Define CloudFront distribution origin_access_identity = cf.OriginAccessIdentity( self, 'OriginAccessIdentity', comment='Personal website (parthrparikh.com) OAI to reach bucket', ) website_bucket.grant_read(origin_access_identity) distro = cf.CloudFrontWebDistribution( self, 'parthrparikh-com-distribution', origin_configs=[ cf.SourceConfiguration( s3_origin_source=cf.S3OriginConfig( s3_bucket_source=website_bucket, origin_access_identity=origin_access_identity), behaviors=[ cf.Behavior( is_default_behavior=True, default_ttl=core.Duration.minutes(10), max_ttl=core.Duration.hours(1), ) ], ), ], viewer_certificate=cf.ViewerCertificate.from_acm_certificate( certificate=cert, aliases=[ 'parthrparikh.com', 'www.parthrparikh.com', ]), viewer_protocol_policy=cf.ViewerProtocolPolicy.REDIRECT_TO_HTTPS, )
def __init__( self, scope: core.Construct, id: str, certificate_arn: str, hosted_zone_id, domain_name, **kwargs ) -> None: super().__init__(scope, id, **kwargs) hosted_zone = route53.HostedZone.from_hosted_zone_attributes( self, "HostedZone", hosted_zone_id=hosted_zone_id, zone_name=domain_name ) # since the bucket has already been created from previous deployments # we build the bucket object from attributes here rather than creating a # new bucket. # This change is required since we had to re-create this entire stack # after the bucket had been originally created. site_bucket = s3.Bucket.from_bucket_attributes( self, "SiteBucket", bucket_name=domain_name ) oai = cloudfront.OriginAccessIdentity(self, "OriginAccessIdentity") distribution = cloudfront.CloudFrontWebDistribution( self, "SiteDistribution", alias_configuration=cloudfront.AliasConfiguration( acm_cert_ref=certificate_arn, names=[domain_name], ), origin_configs=[ cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=site_bucket, origin_access_identity=oai ), behaviors=[cloudfront.Behavior(is_default_behavior=True)], ), ], ) # noinspection PyTypeChecker route53.ARecord( self, "SiteAliasRecord", record_name=domain_name, target=route53.AddressRecordTarget.from_alias( route53_targets.CloudFrontTarget(distribution) ), zone=hosted_zone, ) s3_deployment.BucketDeployment( self, "DeployWithInvalidation", sources=[s3_deployment.Source.asset("assets/elm/dst")], destination_bucket=site_bucket, distribution=distribution, distribution_paths=["/*"], )