def __init__( self, scope: core.Construct, id: str, function_name: str, handler: str, config_bucket: aws_s3.Bucket, state_table: aws_dynamodb.Table, dependency_layer: aws_lambda.LayerVersion, api: aws_apigateway.RestApi, endpoint: str, ) -> None: super().__init__(scope, id) environment = { 'bridge_env': 'PROD', 'bridge_config': f's3://{config_bucket.bucket_name}/bridge.json', 'state_dynamodb_table': state_table.table_name, } self.function = aws_lambda.Function( self, function_name, function_name=function_name, runtime=aws_lambda.Runtime.PYTHON_3_8, layers=[dependency_layer], code=code_asset, handler=handler, timeout=core.Duration.seconds(30), retry_attempts=0, environment=environment, ) function_resource = api.root.add_resource(endpoint) function_resource.add_method( 'POST', aws_apigateway.LambdaIntegration(handler=self.function, )) config_bucket.grant_read(self.function) state_table.grant_write_data(self.function)
def __init__(self, scope, id, cluster: ecs.Cluster, tracks_table: dynamodb.Table, processing_queue: sqs.Queue, upload_bucket: s3.Bucket, **kwargs): super().__init__(scope, id, **kwargs) api_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), 'api')) self.api = ecs_patterns.ApplicationLoadBalancedFargateService( self, 'http-api-service', cluster=cluster, task_image_options=ecs_patterns. ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_asset(directory=api_dir), container_port=8080, environment={ 'PROCESSING_QUEUE_URL': processing_queue.queue_url, 'TRACKS_TABLE_NAME': tracks_table.table_name, 'UPLOAD_BUCKET_NAME': upload_bucket.bucket_name }), desired_count=2, cpu=256, memory_limit_mib=512) processing_queue.grant_send_messages( self.api.service.task_definition.task_role) tracks_table.grant_read_write_data( self.api.service.task_definition.task_role) upload_bucket.grant_put(self.api.service.task_definition.task_role)
def secure_bucket(self, name, suppressions=None, **kwargs): bucket = Bucket(self, name, removal_policy=RemovalPolicy.RETAIN, encryption=BucketEncryption.S3_MANAGED, block_public_access=BlockPublicAccess.BLOCK_ALL, **kwargs) bucket.add_to_resource_policy( iam.PolicyStatement( sid="HttpsOnly", resources=[ bucket.arn_for_objects("*"), ], actions=["*"], effect=iam.Effect.DENY, principals=[iam.AnyPrincipal()], conditions={"Bool": { "aws:SecureTransport": False }}, )) bucket_cfn = bucket.node.default_child # type: CfnResource bucket_cfn.override_logical_id(name) if suppressions: add_cfn_nag_suppressions(bucket_cfn, suppressions) return bucket
def __init__( self, scope: Construct, stack_id: str, *, deploy_env: str, storage_bucket: aws_s3.Bucket, **kwargs: Any, ) -> None: super().__init__(scope, stack_id, **kwargs) account_principal = aws_iam.AccountPrincipal(account_id=276514628126) role = aws_iam.Role( self, "koordinates-read-role", role_name=f"koordinates-s3-access-read-{deploy_env}", assumed_by=account_principal, # type: ignore[arg-type] external_id={"prod": "koordinates-jAddR"}.get(deploy_env, "koordinates-4BnJQ"), max_session_duration=MAX_SESSION_DURATION, ) storage_bucket.grant_read(role) # type: ignore[arg-type] Tags.of(self).add("ApplicationLayer", "lds") # type: ignore[arg-type]
def __init__(self, scope: Construct, id: str, *, deployment: Deployment, **kwargs) -> None: super().__init__(scope, id, **kwargs) Tags.of(self).add("Application", self.application_name) Tags.of(self).add("Deployment", deployment.value) bucket_site = Bucket( self, "Site", block_public_access=BlockPublicAccess.BLOCK_ALL, ) bucket_access_logs = Bucket( self, "AccessLogs", encryption=BucketEncryption.S3_MANAGED, block_public_access=BlockPublicAccess.BLOCK_ALL, ) for subdomain_name in self.subdomain_names: func_version = lambda_edge.create_function( self, f"Redirect-{subdomain_name}-{deployment.value}", runtime=Runtime.NODEJS_10_X, handler="index.handler", code=Code.from_asset(f"./lambdas/redirect-{subdomain_name}"), ) if subdomain_name == "grfsearch": S3CloudFrontV2( self, f"S3CloudFront-{subdomain_name}", subdomain_name=subdomain_name, bucket_site=bucket_site, bucket_access_logs=bucket_access_logs, edge_lambdas=[ EdgeLambda( event_type=LambdaEdgeEventType.ORIGIN_REQUEST, function_version=func_version, ), ], forward_query_string=True, forward_query_string_cache_keys=["do", "q"], ) else: S3CloudFront( self, f"S3CloudFront-{subdomain_name}", subdomain_name=subdomain_name, bucket_site=bucket_site, bucket_access_logs=bucket_access_logs, lambda_function_associations=[ LambdaFunctionAssociation( event_type=LambdaEdgeEventType.ORIGIN_REQUEST, lambda_function=func_version, ), ], )
def authorize_output_bucket(self, bucket: s3.Bucket, objects_key_pattern: Optional[str] = None): if self._rehydrated and not self._mutable_instance_role: raise ReadOnlyEMRProfileError() bucket.grant_read_write(self._roles.instance_role, objects_key_pattern).assert_success() return self
def add_endpoint(self, bucket: s3.Bucket, fn: Function): # create the queue queue = sqs.Queue(self, f'{fn.id_prefix}Queue', dead_letter_queue=sqs.DeadLetterQueue( max_receive_count=5, queue=sqs.Queue( self, f'{fn.id_prefix}DLQ', queue_name=f'{fn.queue_name}-dlq')), queue_name=fn.queue_name) # create the receiver function # add the queue url as an environment variable receiver_function = lambda_.Function( self, f'{fn.id_prefix}ReceiverFunction', code=fn.function_code, environment={'QUEUE_URL': queue.queue_url}, function_name=f'{fn.function_name_prefix}-receiver', handler=fn.receiver_function_handler, layers=[fn.function_dependencies_layer], # memory_size=256, runtime=lambda_.Runtime.PYTHON_3_8) # allow the receiver function to enqueue messages queue.grant_send_messages(receiver_function) # route requests to the receiver lambda self.api.add_routes(integration=apigw.LambdaProxyIntegration( handler=receiver_function), methods=[fn.api_method], path=fn.api_path) # create the handler function # add the bucket name as an environment variable handler_function = lambda_.Function( self, f'{fn.id_prefix}HandlerFunction', code=fn.function_code, environment={'BUCKET_NAME': bucket.bucket_name}, function_name=f'{fn.function_name_prefix}-handler', handler=fn.handler_function_handler, layers=[fn.function_dependencies_layer], # memory_size=256, runtime=lambda_.Runtime.PYTHON_3_8) # add the queue as a trigger for the handler function handler_function.add_event_source(SqsEventSource(queue)) # allow the handler function to access the bucket bucket.grant_read_write(handler_function)
def _create_buckets(self): self.anidb_titles_bucket = Bucket( self, "anidb_titles_bucket", block_public_access=BlockPublicAccess( block_public_acls=True, block_public_policy=True, ), removal_policy=core.RemovalPolicy.DESTROY, lifecycle_rules=[ LifecycleRule(expiration=Duration.days(3)), ] )
def __init__(self, scope: core.Construct, id: str, put_bucket: aws_s3.Bucket, **kwargs) -> None: super().__init__(scope, id, **kwargs) # lambda lambda_ = aws_lambda.Function( self, "SameResourceLambda", code=aws_lambda.Code.asset("lambdas/same_resource"), runtime=aws_lambda.Runtime.PYTHON_3_8, handler="lambda_function.lambda_handler", environment={"PUT_BUCKET_NAME": put_bucket.bucket_name}, ) # lambdaの権限 put_bucket.grant_write(lambda_)
def __init__(self, scope: cdk.Construct, id: str, **kwargs): super().__init__(scope, id, **kwargs) bucketName = CfnParameter(self, "BucketName") self.template_options.metadata = { 'AWS::CloudFormation::Interface': { 'ParameterGroups': [{ 'Label': { 'default': 'Bucket Configuration' }, 'Parameters': [bucketName.logical_id] }], 'ParameterLabels': { bucketName.logical_id: { 'default': 'Which name should the bucket have' } } } } bucket = Bucket(self, 'test-bucket', bucket_name=bucketName.value_as_string) CfnOutput(self, 'S3Id', value=bucket.bucket_arn, export_name=Fn.sub('${AWS::StackName}-S3Id'))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # configure S3 origin... bucket = Bucket(self, "bucket", block_public_access=BlockPublicAccess.BLOCK_ALL, bucket_name=environ.get("BUCKET_NAME", None)) identity = OriginAccessIdentity(self, "cloudFrontIAMUser", comment="cloud front identity") bucket.grant_read(identity) apply_removal_policy(bucket) default_behavior = Behavior( is_default_behavior=True, lambda_function_associations=[ LambdaFunctionAssociation( lambda_function=self.init_lambda(), event_type=LambdaEdgeEventType.VIEWER_REQUEST) ]) source_config = SourceConfiguration( s3_origin_source=S3OriginConfig(s3_bucket_source=bucket, origin_access_identity=identity), behaviors=[default_behavior], ) cert = None domain_name = environ.get("DOMAIN_NAME", None) if domain_name is not None: cert = ViewerCertificate.from_acm_certificate( self.init_certificate(domain_name), aliases=[domain_name], security_policy=SecurityPolicyProtocol.TLS_V1_2_2018) distribution = CloudFrontWebDistribution(self, "CloudFront", origin_configs=[ source_config, ], viewer_certificate=cert)
def provision_buckets(self, name: str, s3: List[S3]): self.buckets = {} for bucket, attrs in s3.buckets.items(): use_sse_kms_key = False if attrs.sse_kms_key_id: use_sse_kms_key = True sse_kms_key = Key.from_key_arn(self, f"{bucket}-kms-key", attrs.sse_kms_key_id) self.buckets[bucket] = Bucket( self.scope, bucket, bucket_name=f"{name}-{bucket}", auto_delete_objects=attrs.auto_delete_objects and attrs.removal_policy_destroy, removal_policy=cdk.RemovalPolicy.DESTROY if attrs.removal_policy_destroy else cdk.RemovalPolicy.RETAIN, enforce_ssl=True, bucket_key_enabled=use_sse_kms_key, encryption_key=(sse_kms_key if use_sse_kms_key else None), encryption=(BucketEncryption.KMS if use_sse_kms_key else BucketEncryption.S3_MANAGED), ) self.buckets[bucket].add_to_resource_policy( iam.PolicyStatement( sid="DenyIncorrectEncryptionHeader", effect=iam.Effect.DENY, principals=[iam.ArnPrincipal("*")], actions=[ "s3:PutObject", ], resources=[f"{self.buckets[bucket].bucket_arn}/*"], conditions={ "StringNotEquals": { "s3:x-amz-server-side-encryption": "aws:kms" if use_sse_kms_key else "AES256" } }, )) self.buckets[bucket].add_to_resource_policy( iam.PolicyStatement( sid="DenyUnEncryptedObjectUploads", effect=iam.Effect.DENY, principals=[iam.ArnPrincipal("*")], actions=[ "s3:PutObject", ], resources=[f"{self.buckets[bucket].bucket_arn}/*"], conditions={ "Null": { "s3:x-amz-server-side-encryption": "true" } }, )) self.s3_api_statement.add_resources( f"{self.buckets[bucket].bucket_arn}*") cdk.CfnOutput(self.scope, f"{bucket}-output", value=self.buckets[bucket].bucket_name)
def _create_s3_bucket(self, bucket_name: str) -> Bucket: bucket = Bucket(self, 'DemoBucket', bucket_name=bucket_name, removal_policy=RemovalPolicy.DESTROY) CfnOutput(self, 'S3_BUCKET_NAME', value=bucket.bucket_name) return bucket
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) bucket = Bucket( scope=self, id='WorkshopBucketWithCDK', bucket_name='cloudvisor-workshop-bucket-with-cdk' ) function = Function( scope=self, id='WorkshopFunctionWithCDK', function_name='WorkshopFunctionWithCDK', runtime=Runtime.PYTHON_3_6, handler='index.handler', code=Code.from_inline('def handler(*args, **kwargs): print(args); return 200') ) bucket.add_object_created_notification(LambdaDestination(function))
def __init__(self, scope: Construct, stack_id: str, *, env_name: str, storage_bucket: aws_s3.Bucket) -> None: super().__init__(scope, stack_id) account_principal = aws_iam.AccountPrincipal(account_id=276514628126) external_id = { PRODUCTION_ENVIRONMENT_NAME: "koordinates-jAddR" }.get(env_name, "koordinates-4BnJQ") role = aws_iam.Role( self, "koordinates-read-role", role_name=f"koordinates-s3-access-read-{env_name}", assumed_by=account_principal, # type: ignore[arg-type] external_id=external_id, max_session_duration=MAX_SESSION_DURATION, ) storage_bucket.grant_read(role) # type: ignore[arg-type] Tags.of(self).add("ApplicationLayer", "lds") # type: ignore[arg-type]
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The error page should be index.html as well # so that it can trigger NuxtJS routing # when the using is opening using direct permalink # Reference: https://stackoverflow.com/a/47554827 frontend_bucket = Bucket( self, "frontend", website_index_document="index.html", website_error_document="index.html", public_read_access=True, ) # CloudFront Origin should be S3 DNS name, not the S3 bucket itself # Otherwise, the CloudFront cannot serve dynamic pages (eg. /vote/{id} page) # https://stackoverflow.com/a/59359038/7999204 frontend_distribution = CloudFrontWebDistribution( self, "frontend-cdn", error_configurations=[ CfnDistribution.CustomErrorResponseProperty( error_caching_min_ttl=0, error_code=403, response_code=200, response_page_path="/index.html", ) ], origin_configs=[ SourceConfiguration( custom_origin_source=CustomOriginConfig( domain_name=frontend_bucket.bucket_domain_name, origin_protocol_policy=OriginProtocolPolicy.HTTP_ONLY, ), behaviors=[Behavior(is_default_behavior=True)], ) ], ) BucketDeployment( self, "DeployWithInvalidation", sources=[Source.asset("./frontend/dist")], destination_bucket=frontend_bucket, distribution=frontend_distribution, distribution_paths=["/*"], ) core.CfnOutput( self, "cdn-domain", value=frontend_distribution.distribution_domain_name )
def provide_access_to_artifacts(scope: core.Construct, *, pipeline_def: Pipeline, artifact_bucket: aws_s3.Bucket) -> None: role_arns = set() for role_arn in pipeline_def.get("artifact_access", {}).get("role_arns", []): role_arns.add(role_arn) for stage_def in pipeline_def["stages"]: for action_def in stage_def["actions"]: if "role_arn" in action_def: account = core.Arn.parse(action_def["role_arn"]).account if account != core.Stack.of(scope).account: role_arns.add(action_def["role_arn"]) for role_arn in role_arns: artifact_bucket.add_to_resource_policy( aws_iam.PolicyStatement( actions=["s3:Get*"], resources=[artifact_bucket.arn_for_objects("*")], effect=aws_iam.Effect.ALLOW, principals=[aws_iam.ArnPrincipal(role_arn)], ))
def add_static_site(stack: core.Stack): stack.static_site_bucket = Bucket(stack, 'StaticSiteBucket', website_index_document="index.html", website_error_document="error.html", public_read_access=True, removal_policy=RemovalPolicy.RETAIN) stack.static_bucket_deploy = s3_deployment.BucketDeployment( stack, "StaticSiteDeploy", sources=[s3_deployment.Source.asset("./www/static-site-content")], destination_bucket=stack.static_site_bucket)
def create_deployment_bucket(stack, domain, bucket_name): bucket = Bucket(stack, f'RadiantLounge{bucket_name}Bucket', website_index_document='index.html', public_read_access=True, bucket_name=domain, website_error_document="index.html") BucketDeployment(stack, f'RadiantLounge{bucket_name}DeployBucket', sources=[Source.asset('../../public/wwwroot')], destination_bucket=bucket) return bucket
def __init__(self, scope: core.Construct, id: str, hosted_zone: IHostedZone, domain_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) certificate = DnsValidatedCertificate(self, 'Certificate', domain_name=f'*.{domain_name}', subject_alternative_names=[domain_name], hosted_zone=hosted_zone) bucket = Bucket(self, 'SiteBucket', bucket_name=domain_name, website_index_document='index.html', public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY) cloudfront_distribution = CloudFrontWebDistribution( self, 'CloudFrontDistribution', origin_configs=[ SourceConfiguration( custom_origin_source=CustomOriginConfig( domain_name=bucket.bucket_website_domain_name, origin_protocol_policy=OriginProtocolPolicy.HTTP_ONLY, ), behaviors=[ Behavior(is_default_behavior=True, default_ttl=core.Duration.hours(1)) ], ), ], alias_configuration=AliasConfiguration( acm_cert_ref=certificate.certificate_arn, names=[domain_name], ) ) ARecord(self, 'DefaultRecord', target=RecordTarget(alias_target=CloudFrontTarget( distribution=cloudfront_distribution)), zone=hosted_zone, ttl=core.Duration.hours(1)) BucketDeployment(self, 'DeployWebsite', sources=[Source.asset('./site/public')], destination_bucket=bucket, distribution=cloudfront_distribution) core.CfnOutput(self, 'CloudFrontDomain', value=cloudfront_distribution.distribution_domain_name) core.CfnOutput(self, 'BucketName', value=bucket.bucket_name)
def create_redirect_bucket(stack, domain, bucket_name): bucket = Bucket(stack, f'RadiantLoungeWWW{bucket_name}Bucket', website_index_document='index.html', public_read_access=True, bucket_name=f"www.{domain}") BucketDeployment(stack, f'RadiantLoungeWWW{bucket_name}DeployBucket', sources=[], destination_bucket=bucket, website_redirect_location=domain) return bucket
def __init__(self, scope: App, id: str, envs: EnvSettings, components: ComponentsStack): super().__init__(scope, id) self.backend_domain_name = StringParameter.from_string_parameter_name( self, "DomainNameParameter", string_parameter_name="/schema-cms-app/DOMAIN_NAME").string_value self.backend_url = f"https://{self.backend_domain_name}/api/v1/" self.job_processing_queues = components.data_processing_queues self.app_bucket = Bucket.from_bucket_arn( self, id="App", bucket_arn=Fn.import_value( ApiStack.get_app_bucket_arn_output_export_name(envs))) self.resize_lambda_image_bucket = Bucket.from_bucket_arn( self, id="Images", bucket_arn=Fn.import_value( ImageResizeStack. get_image_resize_bucket_arn_output_export_name(envs)), ) self.lambda_auth_token = Secret.from_secret_arn( self, id="lambda-auth-token", secret_arn=Fn.import_value( ApiStack.get_lambda_auth_token_arn_output_export_name(envs)), ) self.functions = [ self._create_lambda_fn(envs, memory_size, queue) for memory_size, queue in zip(envs.lambdas_sizes, self.job_processing_queues) ]
def __init__(self, scope, id, cluster: ecs.Cluster, tracks_table: dynamodb.Table, input_bucket: s3.Bucket, output_bucket: s3.Bucket, **kwargs): super().__init__(scope, id, **kwargs) worker_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), 'worker')) self.service = ecs_patterns.QueueProcessingFargateService( self, 'separator-service', cluster=cluster, cpu=2048, memory_limit_mib=8192, image=ecs.ContainerImage.from_asset(directory=worker_dir), environment={ 'TRACKS_TABLE_NAME': tracks_table.table_name, 'OUTPUT_BUCKET_NAME': output_bucket.bucket_name }) input_bucket.grant_read(self.service.task_definition.task_role) output_bucket.grant_write(self.service.task_definition.task_role) tracks_table.grant_read_write_data( self.service.task_definition.task_role)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) bucket = Bucket( self, "CdkAngularTemplateBucket", website_index_document="index.html", public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY, # Stack削除と同時にバケットを削除する ) BucketDeployment( self, "CdkAngularTemplateBucketDeployment", sources=[ Source.asset("cdk-angular-template/dist/cdk-angular-template") ], destination_bucket=bucket)
def create_bucket(self, _access_logs_bucket: Bucket, stage): return Bucket(self, "S3bucket", bucket_name="staticsite202104" + stage, encryption=BucketEncryption.S3_MANAGED, removal_policy=RemovalPolicy.DESTROY, auto_delete_objects=True, versioned=True, website_index_document="index.html", website_error_document="index.html", server_access_logs_bucket=_access_logs_bucket, server_access_logs_prefix="gatsbystaticsite", block_public_access=_s3.BlockPublicAccess( block_public_policy=True, block_public_acls=True, ignore_public_acls=True, restrict_public_buckets=True))
def __init__(self, scope): super().__init__(scope, "bug") bucket = Bucket.from_bucket_name( self, "artifacts", core.Fn.import_value("CodeArtifactsBucket") ) pipeline_role = Role.from_role_arn( self, "pipeline", core.Fn.import_value("CodePipelineRole") ) pipeline = Pipeline( self, "Pipeline", artifact_bucket=bucket, role=pipeline_role, stages=[ StageProps( stage_name="Source", actions=[ GitHubSourceAction( action_name="Source", run_order=1, oauth_token=core.SecretValue("something"), output=Artifact(artifact_name="SourceArtifact"), owner="me", repo="repo", branch="master", ) ], ) ], ) pipeline.add_stage( stage_name="Fails", actions=[ LambdaInvokeAction( action_name="LambdaInvokeAction", run_order=1, lambda_=Function.from_function_arn( self, "function", core.Fn.import_value("SomeFunction") ), ) ], )
def __create_cloud_front_www( self, origin_bucket_name: str, domain_name: str, alternative_domain_names: Optional[List[str]], ssl_certificate: aws_certificatemanager.Certificate, cache_policy: aws_cloudfront.CachePolicy, origin_access_identity: aws_cloudfront.OriginAccessIdentity, edge_lambda_viewer_request: aws_lambda.Version ) -> aws_cloudfront.Distribution: domain_names = alternative_domain_names if alternative_domain_names else [] domain_names.append(domain_name) domain_names = set(domain_names) return aws_cloudfront.Distribution( self, 'CloudFrontWWW', enabled=True, certificate=ssl_certificate, comment='CloudFront Distribution for your WWW static website', domain_names=list(domain_names), http_version=HttpVersion.HTTP2, price_class=PriceClass.PRICE_CLASS_100, default_behavior=BehaviorOptions( allowed_methods=AllowedMethods.ALLOW_GET_HEAD, cached_methods=CachedMethods.CACHE_GET_HEAD, cache_policy=cache_policy, viewer_protocol_policy=ViewerProtocolPolicy.REDIRECT_TO_HTTPS, origin=aws_cloudfront_origins.S3Origin( bucket=Bucket.from_bucket_name( self, "OriginProd", bucket_name=origin_bucket_name), origin_access_identity=origin_access_identity, origin_path='/src/prod'), edge_lambdas=[ EdgeLambda(event_type=LambdaEdgeEventType.VIEWER_REQUEST, include_body=False, function_version=edge_lambda_viewer_request) ]), error_responses=[ ErrorResponse(ttl=Duration.seconds(300), response_page_path='/404.html', http_status=403, response_http_status=404) ])
def __create_cloud_front_www_edit_path_for_origin_lambda( self, webflow_aws_setup_bucket: str, lambda_execution_role: aws_iam.Role) -> aws_lambda.Function: return aws_lambda.Function( self, 'CloudFrontEditPathForOrigin', description= 'Appends .html extension to universal paths, preserving files with other extensions (ex .css)', handler='index.handler', runtime=aws_lambda.Runtime.NODEJS_12_X, timeout=Duration.seconds(5), memory_size=128, role=lambda_execution_role, code=Code.bucket( bucket=Bucket.from_bucket_name( self, "SourceBucketWWWEditPathForOriginLambda", bucket_name=webflow_aws_setup_bucket), key= 'lambda_function/cloudfront_www_edit_path_for_origin/package.zip' ))
def create_bucket(self, lambda_url): parsed_url = parse.urlparse(lambda_url) protocol_mapping = { "HTTP": RedirectProtocol.HTTP, "HTTPS": RedirectProtocol.HTTPS, } return Bucket( self, "Images", public_read_access=True, website_index_document="index.html", website_routing_rules=[ RoutingRule( condition=RoutingRuleCondition( http_error_code_returned_equals="404"), protocol=protocol_mapping[parsed_url.scheme.upper()], host_name=parsed_url.netloc, replace_key=ReplaceKey.prefix_with("prod/resize?key="), http_redirect_code="307", ) ], )
def add_react_build(stack: core.Stack, code_pipeline, source_output, bucket_arn: str): # Could refactor the bucket to be part of the stage # https://github.com/aws-samples/aws-cdk-examples/blob/master/typescript/static-site/static-site.ts # Need to move to a stack / into startuptoolbag # The codebuild project can be moved back out into the pipeline (bit awkward?) react_site_bucket = Bucket.from_bucket_arn(stack, id='SiteBucket', bucket_arn=bucket_arn) stack.build_output_artifact = codepipeline.Artifact() build_output_artifact = codepipeline.Artifact() codebuild_project = codebuild.PipelineProject( stack, "t-u-b-CDKCodebuild", project_name="t-u-b-CodebuildProject", build_spec=codebuild.BuildSpec.from_source_filename( filename='buildspec.yml'), environment=codebuild.BuildEnvironment(privileged=True), description='Pipeline for the-ultimate-boilerplate', timeout=core.Duration.minutes(60), ) build_action = codepipeline_actions.CodeBuildAction( action_name="ReactBuild", project=codebuild_project, input=source_output, outputs=[build_output_artifact]) s3_deploy = codepipeline_actions.S3DeployAction( action_name="ReactS3Push", input=build_output_artifact, bucket=react_site_bucket) # Would be more elegant to be one stage but the input to deploy must be created in a prior stage code_pipeline.add_stage(stage_name="ReactBuild", actions=[build_action]) code_pipeline.add_stage(stage_name="ReactDeploy", actions=[s3_deploy])