def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The error page should be index.html as well # so that it can trigger NuxtJS routing # when the using is opening using direct permalink # Reference: https://stackoverflow.com/a/47554827 frontend_bucket = Bucket( self, "frontend", website_index_document="index.html", website_error_document="index.html", public_read_access=True, ) # CloudFront Origin should be S3 DNS name, not the S3 bucket itself # Otherwise, the CloudFront cannot serve dynamic pages (eg. /vote/{id} page) # https://stackoverflow.com/a/59359038/7999204 frontend_distribution = CloudFrontWebDistribution( self, "frontend-cdn", error_configurations=[ CfnDistribution.CustomErrorResponseProperty( error_caching_min_ttl=0, error_code=403, response_code=200, response_page_path="/index.html", ) ], origin_configs=[ SourceConfiguration( custom_origin_source=CustomOriginConfig( domain_name=frontend_bucket.bucket_domain_name, origin_protocol_policy=OriginProtocolPolicy.HTTP_ONLY, ), behaviors=[Behavior(is_default_behavior=True)], ) ], ) BucketDeployment( self, "DeployWithInvalidation", sources=[Source.asset("./frontend/dist")], destination_bucket=frontend_bucket, distribution=frontend_distribution, distribution_paths=["/*"], ) core.CfnOutput( self, "cdn-domain", value=frontend_distribution.distribution_domain_name )
def create_deployment_bucket(stack, domain, bucket_name): bucket = Bucket(stack, f'RadiantLounge{bucket_name}Bucket', website_index_document='index.html', public_read_access=True, bucket_name=domain, website_error_document="index.html") BucketDeployment(stack, f'RadiantLounge{bucket_name}DeployBucket', sources=[Source.asset('../../public/wwwroot')], destination_bucket=bucket) return bucket
def __init__(self, scope: core.Construct, id: str, hosted_zone: IHostedZone, domain_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) certificate = DnsValidatedCertificate(self, 'Certificate', domain_name=f'*.{domain_name}', subject_alternative_names=[domain_name], hosted_zone=hosted_zone) bucket = Bucket(self, 'SiteBucket', bucket_name=domain_name, website_index_document='index.html', public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY) cloudfront_distribution = CloudFrontWebDistribution( self, 'CloudFrontDistribution', origin_configs=[ SourceConfiguration( custom_origin_source=CustomOriginConfig( domain_name=bucket.bucket_website_domain_name, origin_protocol_policy=OriginProtocolPolicy.HTTP_ONLY, ), behaviors=[ Behavior(is_default_behavior=True, default_ttl=core.Duration.hours(1)) ], ), ], alias_configuration=AliasConfiguration( acm_cert_ref=certificate.certificate_arn, names=[domain_name], ) ) ARecord(self, 'DefaultRecord', target=RecordTarget(alias_target=CloudFrontTarget( distribution=cloudfront_distribution)), zone=hosted_zone, ttl=core.Duration.hours(1)) BucketDeployment(self, 'DeployWebsite', sources=[Source.asset('./site/public')], destination_bucket=bucket, distribution=cloudfront_distribution) core.CfnOutput(self, 'CloudFrontDomain', value=cloudfront_distribution.distribution_domain_name) core.CfnOutput(self, 'BucketName', value=bucket.bucket_name)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) website_bucket = s3.Bucket(self, "MyHostingBucketMF", versioned=True, public_read_access=True, website_index_document="index.html", removal_policy=cdk.RemovalPolicy.DESTROY) BucketDeployment(self, "DeployStaticWebsite", sources=[Source.asset("./website-dist")], destination_bucket=website_bucket)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) bucket = Bucket( self, "CdkAngularTemplateBucket", website_index_document="index.html", public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY, # Stack削除と同時にバケットを削除する ) BucketDeployment( self, "CdkAngularTemplateBucketDeployment", sources=[ Source.asset("cdk-angular-template/dist/cdk-angular-template") ], destination_bucket=bucket)
async def create_site_bucket(self, id: str) -> Bucket: cors_rule = CorsRule( allowed_methods=[ HttpMethods.GET], allowed_origins=['*']) bucket = Bucket( self, '{}StaticBucket'.format(id), website_error_document='README.md', website_index_document='template.html', public_read_access=True, removal_policy=RemovalPolicy.DESTROY, cors=[cors_rule] ) deployment_source = Source.asset('site/') BucketDeployment( self, '{}StaticDeployment'.format(id), destination_bucket=bucket, source=deployment_source, retain_on_delete=False) return bucket
def __init__(self, scope: Construct, app_id: str, **kwargs) -> None: super().__init__(scope, app_id, **kwargs) # bucket with ui contents can be reached over listener rule on ALB api_domain_name = "static." + AWS_CONF["private_base_domain"] host_domain = f"{AWS_CONF['app_name']}.{AWS_CONF['private_base_domain']}" s3_path = AWS_CONF["app_name"] if AWS_CONF["deployment_stage"] == "tst": host_domain = f"{AWS_CONF['branch_id']}." + host_domain s3_path += "-" + AWS_CONF["branch_path"] ui_bucket = Bucket.from_bucket_name( self, "UiBucket", bucket_name=AWS_CONF["optionals"]["ui_bucket"], ) BucketDeployment( self, "UiBucketDepl", destination_bucket=ui_bucket, destination_key_prefix=s3_path, sources=[Source.asset(AWS_CONF["optionals"]["node_build_path"])], ) # ALB rule for http redirect to https load_balancer_arn = Arn.format( components=ArnComponents( service="elasticloadbalancing", partition="aws", resource="loadbalancer/app", resource_name=AWS_CONF["optionals"]["alb"], ), stack=self, ) alb = ApplicationLoadBalancer.from_lookup( self, "AlbApi", load_balancer_arn=load_balancer_arn, ) listener_http = ApplicationListener.from_lookup( self, "AlbHttpListenerRule", load_balancer_arn=alb.load_balancer_arn, listener_port=80, ) # listener rule priority is mandatory input and needs to be looked up # if cdk context not set yet set fixed priority during cdk synth priority = 1 if AWS_CONF["env"]["account"] in listener_http.listener_arn: priority = _next_elb_priority(host_domain, listener_http.listener_arn) # the rule is added to the existing listener ApplicationListenerRule( self, f"ListenerRule{AWS_CONF['branch_id'].capitalize()}", listener=listener_http, priority=priority, action=ListenerAction.redirect( host=api_domain_name, path=f"/ui/{s3_path}/index.html", permanent=True, port="443", protocol="HTTPS", ), conditions=[ListenerCondition.host_headers([host_domain])], ) # route 53 private zone with listener rule for redirect to alb ARecord( self, f"ARecord{AWS_CONF['branch_id'].capitalize()}", record_name=host_domain, target=RecordTarget(alias_target=LoadBalancerTarget(alb)), zone=PrivateHostedZone.from_lookup( self, "PrivZoneCorp", domain_name=AWS_CONF["private_base_domain"], private_zone=True, vpc_id=AWS_CONF["optionals"]["vpc"], ), )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here vpc = Vpc(self, "MyVpc", max_azs=2) ecs_cluster = Cluster(self, 'FagateCluster', vpc=vpc) alb = ApplicationLoadBalancer(self, 'EcsLb', vpc=vpc, internet_facing=True) listener = alb.add_listener('EcsListener', port=80) listener.add_fixed_response('Default-Fix', status_code='404') listener.node.default_child.default_action = [{ "type": "fixed-response", "fixedResponseConfig": { "statusCode": "404" } }] website_bucket = Bucket(self, 'PetclinicWebsite', website_index_document='index.html', public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY) deployment = BucketDeployment( self, 'PetclinicDeployWebsite', sources=[Source.asset('./spring-petclinic-static')], destination_bucket=website_bucket, retain_on_delete=False #destination_key_prefix='web/static' ) # Modify the config.js with CF custome resource modify_policy = [ PolicyStatement(actions=[ "s3:PutObject", "s3:PutObjectAcl", "s3:PutObjectVersionAcl", "s3:GetObject" ], effect=Effect.ALLOW, resources=[website_bucket.bucket_arn + "/*"]), PolicyStatement(actions=["s3:ListBucket"], effect=Effect.ALLOW, resources=[website_bucket.bucket_arn]), PolicyStatement(actions=["dynamodb:*"], effect=Effect.ALLOW, resources=[ "arn:aws:dynamodb:" + self.region + ":" + self.account + ":*" ]) ] with open("custom-resource-code/init.py", encoding="utf-8") as fp: code_body = fp.read() dynamodb_tables = [] for s in ['customers', 'vets', 'visits']: table = Table( self, s.capitalize() + 'Table', partition_key={ 'name': 'id', 'type': AttributeType.STRING }, removal_policy=core.RemovalPolicy.DESTROY, read_capacity=5, write_capacity=5, ) dynamodb_tables.append(table.table_name) asset = DockerImageAsset( self, 'spring-petclinic-' + s, repository_name=self.stack_name + '-' + s, directory='./spring-petclinic-serverless/spring-petclinic-' + s + '-serverless', build_args={ 'JAR_FILE': 'spring-petclinic-' + s + '-serverless-2.0.7.jar' }) ecs_task = FargateTaskDefinition(self, 'TaskDef-Fargate-' + s, memory_limit_mib=512, cpu=256) ecs_task.add_to_task_role_policy( PolicyStatement(actions=["dynamodb:*"], effect=Effect.ALLOW, resources=[table.table_arn])) ecs_task.add_to_task_role_policy( PolicyStatement(actions=['xray:*'], effect=Effect.ALLOW, resources=['*'])) env = { 'DYNAMODB_TABLE_NAME': table.table_name, 'SERVER_SERVLET_CONTEXT_PATH': '/api/' + s.rstrip('s') } ecs_container = ecs_task.add_container( 'Container-' + s, image=ContainerImage.from_docker_image_asset(asset), logging=LogDriver.aws_logs(stream_prefix=s), environment=env) ecs_container.add_port_mappings(PortMapping(container_port=8080)) # Sidecare Container for X-Ray ecs_sidecar_container = ecs_task.add_container( 'Sidecar-Xray-' + s, image=ContainerImage.from_registry('amazon/aws-xray-daemon')) ecs_sidecar_container.add_port_mappings( PortMapping(container_port=2000, protocol=Protocol.UDP)) ecs_service = FargateService(self, 'FargateService-' + s, cluster=ecs_cluster, service_name='spring-petclinic-' + s, desired_count=2, task_definition=ecs_task) parttern = '/api/' + s.rstrip('s') + '/*' priority = randint(1, 10) * len(s) check = HealthCheck( path='/api/' + s.rstrip('s') + '/manage', healthy_threshold_count=2, unhealthy_threshold_count=3, ) target = listener.add_targets('ECS-' + s, path_pattern=parttern, priority=priority, port=80, targets=[ecs_service], health_check=check) resource = CustomResource( self, "S3ModifyCustomResource", provider=CustomResourceProvider.lambda_( SingletonFunction(self, "CustomResourceSingleton", uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc", code=InlineCode(code_body), handler="index.handler", timeout=core.Duration.seconds(300), runtime=Runtime.PYTHON_3_7, initial_policy=modify_policy)), properties={ "Bucket": website_bucket.bucket_name, "InvokeUrl": 'http://' + alb.load_balancer_dns_name + '/', "DynamoDBTables": dynamodb_tables }) core.CfnOutput(self, "FagateALBUrl", export_name="FagateALBUrl", value=alb.load_balancer_dns_name) core.CfnOutput(self, "FagatePetclinicWebsiteUrl", export_name="FagatePetclinicWebsiteUrl", value=website_bucket.bucket_website_url)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) db_name = 'petclinic' db_cluster = 'petclinic-serverless-graphql' petclinic_graphql_api = CfnGraphQLApi( self, 'PetClinicApi', name='PetClinicApi', authentication_type='API_KEY' ) petclinic_graphql_key = CfnApiKey( self, 'ItemsApiKey', api_id=petclinic_graphql_api.attr_api_id ) with open('./definition/petclinic.graphql', 'rt') as f: schema_def = f.read() petclinic_schema = CfnGraphQLSchema( self, 'PetclinicSchema', api_id=petclinic_graphql_api.attr_api_id, definition=schema_def ) serverless_rds_secret = Secret( self, 'PetclinicRDSSecret', generate_secret_string=SecretStringGenerator( generate_string_key='password', secret_string_template='{"username":"******"}', exclude_characters= '"@/', password_length=16 ) ) serverless_rds_cluster = CfnDBCluster( self, 'PetclinicRDSServerless', engine='aurora', database_name=db_name, db_cluster_identifier=db_cluster, engine_mode='serverless', master_username=serverless_rds_secret.secret_value_from_json('username').to_string(), master_user_password=serverless_rds_secret.secret_value_from_json('password').to_string(), scaling_configuration=CfnDBCluster.ScalingConfigurationProperty( min_capacity=1, max_capacity=2, auto_pause=False ) ) serverless_rds_cluster.apply_removal_policy(core.RemovalPolicy.DESTROY) serverless_rds_arn='arn:aws:rds:' + self.region + ':' + self.account + ':cluster:' + db_cluster website_bucket = Bucket(self, 'PetclinicWebsite', website_index_document='index.html', public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY ) deployment = BucketDeployment(self, 'PetclinicDeployWebsite', sources=[Source.asset('../frontend/public')], destination_bucket=website_bucket, retain_on_delete=False #destination_key_prefix='web/static' ) iam_policy = [PolicyStatement( actions=["secretsmanager:GetSecretValue"], effect=Effect.ALLOW, resources=[serverless_rds_secret.secret_arn] ),PolicyStatement( actions=["rds-data:ExecuteStatement", "rds-data:DeleteItems", "rds-data:ExecuteSql", "rds-data:GetItems", "rds-data:InsertItems", "rds-data:UpdateItems"], effect=Effect.ALLOW, resources=[serverless_rds_arn, serverless_rds_arn + ':*'] ),PolicyStatement( actions=["rds:*"], effect=Effect.ALLOW, resources=[serverless_rds_arn, serverless_rds_arn + ':*'] ),PolicyStatement( actions=[ "s3:PutObject","s3:PutObjectAcl","s3:PutObjectVersionAcl","s3:GetObject"], effect=Effect.ALLOW, resources=[website_bucket.bucket_arn + "/*"] ),PolicyStatement( actions=[ "s3:ListBucket"], effect=Effect.ALLOW, resources=[website_bucket.bucket_arn] )] init_resource = CustomResource(self, "PetlinicInitCustomResource", provider=CustomResourceProvider.lambda_( SingletonFunction( self, "CustomResourceSingleton", uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc", code=Code.from_asset('./custom-resource-code'), handler="index.handler", timeout=core.Duration.seconds(600), runtime=Runtime.PYTHON_3_7, initial_policy=iam_policy ) ), properties={ "DBClusterIdentifier": db_cluster, "DBClusterArn": serverless_rds_arn, "DBSecretArn": serverless_rds_secret.secret_arn, "DBName": db_name, "Bucket": website_bucket.bucket_name, "GraphqlApi": petclinic_graphql_api.attr_graph_ql_url, "GraphqlKey": petclinic_graphql_key.attr_api_key } ) petclinic_rds_role = Role( self, 'PetclinicRDSRole', assumed_by=ServicePrincipal('appsync.amazonaws.com') ) petclinic_rds_role.add_to_policy(iam_policy[0]) petclinic_rds_role.add_to_policy(iam_policy[1]) data_source = CfnDataSource( self, 'PetclinicRDSDatesource', api_id=petclinic_graphql_api.attr_api_id, type='RELATIONAL_DATABASE', name='PetclinicRDSDatesource', relational_database_config=CfnDataSource.RelationalDatabaseConfigProperty( relational_database_source_type='RDS_HTTP_ENDPOINT', rds_http_endpoint_config=CfnDataSource.RdsHttpEndpointConfigProperty( aws_region=self.region, aws_secret_store_arn=serverless_rds_secret.secret_arn, database_name='petclinic', db_cluster_identifier=serverless_rds_arn ) ), service_role_arn=petclinic_rds_role.role_arn ) data_source.add_depends_on(petclinic_schema) data_source.add_depends_on(serverless_rds_cluster) query_req_path = './definition/template/query/request/' query_res_path = './definition/template/query/response/' for req_file in os.listdir(query_req_path): query_name = req_file.split('.')[0] with open(query_req_path + req_file, 'rt') as f: query_req = f.read() with open(query_res_path + query_name + '.vm', 'rt') as f: query_res = f.read() pettypes_resolver = CfnResolver( self, query_name, api_id=petclinic_graphql_api.attr_api_id, type_name='Query', field_name=query_name, data_source_name=data_source.name, request_mapping_template=query_req, response_mapping_template=query_res ) pettypes_resolver.add_depends_on(data_source) func_dict = {} func_req_path = './definition/template/function/request/' func_res_path = './definition/template/function/response/' for req_file in os.listdir(func_req_path): func_name = req_file.split('.')[0] with open(func_req_path + req_file) as f: func_req = f.read() with open(func_res_path + func_name + '.vm') as f: func_res = f.read() func_dict[func_name] = CfnFunctionConfiguration( self, func_name, api_id=petclinic_graphql_api.attr_api_id, data_source_name=data_source.name, name=func_name, function_version='2018-05-29', request_mapping_template=func_req, response_mapping_template=func_res ) func_dict[func_name].add_depends_on(data_source) query_owner = CfnResolver( self, 'QueryOnwer', api_id=petclinic_graphql_api.attr_api_id, kind='PIPELINE', type_name='Query', field_name='owner', request_mapping_template="{}", response_mapping_template="$util.toJson($ctx.result)", pipeline_config=CfnResolver.PipelineConfigProperty( functions=[func_dict['Query_Owner_getOwnerById'].attr_function_id, func_dict['Query_Owner_getPetsByOwner'].attr_function_id, func_dict['Query_Owner_getVistsByPet'].attr_function_id] ) ) query_owner.add_depends_on(func_dict['Query_Owner_getOwnerById']) query_owner.add_depends_on(func_dict['Query_Owner_getPetsByOwner']) query_owner.add_depends_on(func_dict['Query_Owner_getVistsByPet']) query_all_owners = CfnResolver( self, 'QueryAllOnwers', api_id=petclinic_graphql_api.attr_api_id, kind='PIPELINE', type_name='Query', field_name='owners', request_mapping_template="{}", response_mapping_template="$util.toJson($ctx.result)", pipeline_config=CfnResolver.PipelineConfigProperty( functions=[func_dict['Query_Owners_getAllOwners'].attr_function_id, func_dict['Query_Owners_getPetsByOwner'].attr_function_id] ) ) query_all_owners.add_depends_on(func_dict['Query_Owners_getAllOwners']) query_all_owners.add_depends_on(func_dict['Query_Owners_getPetsByOwner']) query_pet = CfnResolver( self, 'QueryPet', api_id=petclinic_graphql_api.attr_api_id, kind='PIPELINE', type_name='Query', field_name='pet', request_mapping_template="{}", response_mapping_template="$util.toJson($ctx.result)", pipeline_config=CfnResolver.PipelineConfigProperty( functions=[func_dict['Query_Pet_getPetById'].attr_function_id, func_dict['Query_Pet_getVisitByPet'].attr_function_id] ) ) query_pet.add_depends_on(func_dict['Query_Pet_getPetById']) query_pet.add_depends_on(func_dict['Query_Pet_getVisitByPet']) query_vets = CfnResolver( self, 'QueryVets', api_id=petclinic_graphql_api.attr_api_id, kind='PIPELINE', type_name='Query', field_name='vets', request_mapping_template="{}", response_mapping_template="$util.toJson($ctx.result)", pipeline_config=CfnResolver.PipelineConfigProperty( functions=[func_dict['Query_Vets_getVets'].attr_function_id, func_dict['Query_Vets_getSpecByVets'].attr_function_id] ) ) query_vets.add_depends_on(func_dict['Query_Vets_getVets']) query_vets.add_depends_on(func_dict['Query_Vets_getSpecByVets']) mutation_req_path = './definition/template/mutation/request/' mutation_res_path = './definition/template/mutation/response/' for req_file in os.listdir(mutation_req_path): mutation_name = req_file.split('.')[0] with open(mutation_req_path + req_file) as f: func_req = f.read() with open(mutation_res_path + mutation_name + '.vm') as f: func_res = f.read() mutation = CfnResolver( self, mutation_name, api_id=petclinic_graphql_api.attr_api_id, type_name='Mutation', field_name=mutation_name, data_source_name=data_source.name, request_mapping_template=func_req, response_mapping_template=func_res ) mutation.add_depends_on(data_source) core.CfnOutput(self,"GraphqlPetclinicWebsiteUrl",export_name="GraphqlPetclinicWebsiteUrl",value=website_bucket.bucket_website_url)
def __init__( self, scope: Construct, id: str, buckets: List[IBucket] = None, instance_type: str = "ml.t2.medium", instance_volume_size: int = 10, notebook_path: Union[Path, None] = None, notebook_destination_bucket: IBucket = None, notebook_destination_prefix: str = None, ): super().__init__(scope, id) self.buckets = buckets if buckets else [] self.deployment = None self.instance = None self.policies = NotebookInlinePolicies(self) # permissions for the notebook instance notebook_role = iam.Role( self, "InstanceRole", assumed_by=iam.ServicePrincipal("sagemaker.amazonaws.com"), inline_policies={ "SagemakerNotebookCloudWatchLogs": self.policies.cloudwatch_logs_write(), "ForecastBucketAccessPolicy": self.policies.s3_access(buckets), "SagemakerNotebookListTags": self.policies.sagemaker_tags_read(), "NotebookBucketAccessPolicy": self.policies.s3_solutions_access(), }, ) # lifecycle configuration lifecycle_config_path = os.path.join(os.path.dirname(__file__), "lifecycle_config.py") with open(lifecycle_config_path) as lifecycle_config: lifecycle_config_code = lifecycle_config.read() lifecycle_config = CfnNotebookInstanceLifecycleConfig( self, "LifecycleConfig") lifecycle_config.add_property_override("OnStart", [{ "Content": { "Fn::Base64": lifecycle_config_code } }]) # notebook instance self.instance = CfnNotebookInstance( self, "NotebookInstance", notebook_instance_name= f"{Aws.STACK_NAME}-aws-forecast-visualization", instance_type=instance_type, role_arn=notebook_role.role_arn, volume_size_in_gb=instance_volume_size, lifecycle_config_name=lifecycle_config. attr_notebook_instance_lifecycle_config_name, tags=[ CfnTag( key="FORECAST_BUCKET", value=Fn.base64(notebook_destination_bucket.bucket_name), ), CfnTag( key="NOTEBOOK_BUCKET", value=self.get_notebook_source( notebook_destination_bucket), ), CfnTag( key="NOTEBOOK_PREFIX", value=self.get_notebook_prefix(), ), ], ) add_cfn_nag_suppressions( self.instance, [ CfnNagSuppression( "W1201", "Require access to all resources; Not all Amazon Forecast resources support resource based policy", ) ], ) self.instance.override_logical_id("NotebookInstance") # create notebook assets if (notebook_path and notebook_destination_prefix and notebook_destination_bucket): assets = [Source.asset(path=str(notebook_path))] self.deployment = BucketDeployment( self, "Notebooks", destination_bucket=notebook_destination_bucket, destination_key_prefix=notebook_destination_prefix, sources=assets, )