def __init__(self, scope: core.Construct, construct_id: str, processing_bucket, processing_bucket_upload_prefix) -> None: super().__init__(scope, construct_id) # lambda to act as upload API handler lambda_name = 'image-pipeline-s3-url-generator' s3_url_generator_lambda = aws_lambda.Function( self, lambda_name, function_name=lambda_name, runtime=aws_lambda.Runtime.NODEJS_12_X, code=aws_lambda.Code.asset('lambda_functions/get_signed_s3_url'), handler='app.handler', environment={ 'UploadBucket': processing_bucket.bucket_name, 'UploadPrefix': processing_bucket_upload_prefix, }, timeout=core.Duration.minutes(3) ) # write access allows the lambda to generate signed urls processing_bucket.grant_write(s3_url_generator_lambda) # rest api endpoint to pass requests to lambda base_api = aws_apigateway.RestApi(self, 'ImageUpload', rest_api_name='ImageUpload') # we'll send uploads to the `image` prefix, CORS must be allowed image_entity = base_api.root.add_resource( 'images', default_cors_preflight_options=aws_apigateway.CorsOptions( allow_origins=aws_apigateway.Cors.ALL_ORIGINS) ) # hooks the endpoint up to the lambda above image_entity_lambda_integration = aws_apigateway.LambdaIntegration( s3_url_generator_lambda, proxy=False, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) # GET will be used to get presigned url image_entity.add_method( 'GET', image_entity_lambda_integration, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }]) self.api = base_api
def __init__( self, scope: core.Construct, id: str, # pylint: disable=redefined-builtin lambda_notifications: aws_lambda.IFunction, social_log_group: aws_logs.ILogGroup, pagespeed_table: aws_dynamodb.ITable, **kwargs) -> None: super().__init__(scope, id, **kwargs) api_lambda = get_lambda( self, id, code='lib/stacks/{id}/{id}'.format(id=id), handler='main.handler', environment={ 'CORS_ALLOW_ORIGIN': env['CORS_ALLOW_ORIGIN'], 'PUSHOVER_TOKEN': env['PUSHOVER_TOKEN'], 'PUSHOVER_USERKEY': env['PUSHOVER_USERKEY'], 'LAMBDA_FUNCTIONS_LOG_LEVEL': 'INFO', 'LAMBDA_NOTIFICATIONS': lambda_notifications.function_name, 'PAGESPEED_TABLE': pagespeed_table.table_name, 'REPORT_LOG_GROUP_NAME': social_log_group.log_group_name, }, ) lambda_notifications.grant_invoke(api_lambda) social_log_group.grant(api_lambda, "logs:GetLogEvents", "logs:DescribeLogStreams") pagespeed_table.grant_read_data(api_lambda) cert = aws_certificatemanager.Certificate( self, '{}-certificate'.format(id), domain_name=env['API_DOMAIN'], ) domain = aws_apigateway.DomainNameOptions( certificate=cert, domain_name=env['API_DOMAIN'], ) cors = aws_apigateway.CorsOptions( allow_methods=['POST'], allow_origins=[env['CORS_ALLOW_ORIGIN']] if "CORS_ALLOW_ORIGIN" in env else aws_apigateway.Cors.ALL_ORIGINS) aws_apigateway.LambdaRestApi( self, '%s-gateway' % id, handler=api_lambda, domain_name=domain, default_cors_preflight_options=cors, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) lambda_function = lb.Function(self, 'streamlinkerfunction', runtime=lb.Runtime.PYTHON_3_8, code=lb.Code.asset('lambda'), handler='streamlinker.handler', timeout=core.Duration.seconds(30)) api_gateway = apigw.LambdaRestApi( self, 'streamlinker', handler=lambda_function, rest_api_name='streamlinkerapi', default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) my_lambda = aws_lambda.Function( self, environ['CDK_APP_NAME'], runtime=aws_lambda.Runtime.PYTHON_3_8, code=aws_lambda.Code.asset('lambda'), handler='contact_us.handler', environment={ 'CORS_ALLOW_ORIGIN': environ.get('CORS_ALLOW_ORIGIN', '*'), 'PUSHOVER_API_ENDPOINT': environ['PUSHOVER_API_ENDPOINT'], 'PUSHOVER_TOKEN': environ['PUSHOVER_TOKEN'], 'PUSHOVER_USERKEY': environ['PUSHOVER_USERKEY'], }, log_retention=aws_logs.RetentionDays.ONE_WEEK, ) cert = aws_certificatemanager.Certificate( self, '{}-certificate'.format(environ['CDK_APP_NAME']), domain_name=environ['CDK_BASE_DOMAIN'], ) domain = aws_apigateway.DomainNameOptions( certificate=cert, domain_name=environ['CDK_BASE_DOMAIN'], ) cors = aws_apigateway.CorsOptions( allow_methods=["POST"], allow_origins=[environ['CORS_ALLOW_ORIGIN']] if environ.get('CORS_ALLOW_ORIGIN') else aws_apigateway.Cors.ALL_ORIGINS, ) aws_apigateway.LambdaRestApi( self, '{}-gateway'.format(environ['CDK_APP_NAME']), handler=my_lambda, domain_name=domain, default_cors_preflight_options=cors, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) base_lambda = _lambda.Function(self, 'ApiCorsLambda', handler='lambda-handler.handler', runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.Code.asset('lambda')) base_api = _apigw.RestApi(self, 'ApiGatewayWithCors', rest_api_name='ApiGatewayWithCors') example_entity = base_api.root.add_resource( 'example', default_cors_preflight_options=_apigw.CorsOptions( allow_methods=['GET', 'OPTIONS'], allow_origins=_apigw.Cors.ALL_ORIGINS)) example_entity_lambda_integration = _apigw.LambdaIntegration( base_lambda, proxy=False, integration_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': "'*'", } }]) example_entity.add_method( 'GET', example_entity_lambda_integration, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Origin': True, } }])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # region IAM sa_role = aws_iam.Role( self, "Role", role_name="SaRole", assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com")) sa_role.add_to_policy( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=["*"], actions=[ "cloudwatch:*", "s3:*", "logs:*", "dynamodb:*", "iam:*" ])) # endregion IAM # region S3 mask_images_bucket = aws_s3.Bucket(self, 'MaskImagesBucket') # endregion S3 # region DB masks_db = aws_dynamodb.Table( self, 'MasksTable', table_name='Masks', partition_key=aws_dynamodb.Attribute( name='id', type=aws_dynamodb.AttributeType.STRING), sort_key=aws_dynamodb.Attribute( name='mask_name', type=aws_dynamodb.AttributeType.STRING), removal_policy=core.RemovalPolicy.DESTROY) # endregion DB # region Lambda fetch_lambda = _lambda.Function( self, 'DynamoFetch', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('lambda'), handler='dynamo_fetch.handler', environment=dict(DYNAMO_TABLE_NAME=masks_db.table_name), role=sa_role) insert_lambda = _lambda.Function( self, 'DynamoInsert', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('lambda'), handler='dynamo_insert.handler', environment=dict(DYNAMO_TABLE_NAME=masks_db.table_name), role=sa_role) # endregion # region API base_api = aws_apigateway.LambdaRestApi( self, 'SaApi', rest_api_name='SaApi', handler=fetch_lambda, default_cors_preflight_options=aws_apigateway.CorsOptions( allow_origins=aws_apigateway.Cors.ALL_ORIGINS)) # endregion API # region Frontend frontend_bucket = aws_s3.Bucket(self, "CreateReactAppBucket", website_index_document="index.html") frontend_src = aws_s3_deployment.BucketDeployment( self, "DeployCRA", sources=[ aws_s3_deployment.Source.asset("../frontend/sa-app/build") ], destination_bucket=frontend_bucket) oia = aws_cloudfront.OriginAccessIdentity(self, 'OIA') frontend_bucket.grant_read(oia) cloudFront = aws_cloudfront.CloudFrontWebDistribution( self, "CDKCRAStaticDistribution", origin_configs=[ aws_cloudfront.SourceConfiguration( s3_origin_source=aws_cloudfront.S3OriginConfig( s3_bucket_source=frontend_bucket, origin_access_identity=oia), behaviors=[ aws_cloudfront.Behavior( is_default_behavior=True, default_ttl=core.Duration.seconds(0), max_ttl=core.Duration.seconds(0), min_ttl=core.Duration.seconds(0)) ]) ]) # endregion # region S3 triggers new_mask_image_notification = aws_s3_notifications.LambdaDestination( insert_lambda) mask_images_bucket.add_event_notification( aws_s3.EventType.OBJECT_CREATED, new_mask_image_notification)
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Image Bucket image_bucket = s3.Bucket(self, IMG_BUCKET_NAME, removal_policy=cdk.RemovalPolicy.DESTROY) cdk.CfnOutput(self, "imageBucket", value=image_bucket.bucket_name) image_bucket.add_cors_rule( allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT], allowed_origins=["*"], allowed_headers=["*"], max_age=3000, ) # Thumbnail Bucket resized_image_bucket = s3.Bucket( self, RESIZED_IMG_BUCKET_NAME, removal_policy=cdk.RemovalPolicy.DESTROY) cdk.CfnOutput(self, "resizedBucket", value=resized_image_bucket.bucket_name) resized_image_bucket.add_cors_rule( allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT], allowed_origins=["*"], allowed_headers=["*"], max_age=3000, ) # S3 Static bucket for website code web_bucket = s3.Bucket( self, WEBSITE_BUCKET_NAME, website_index_document="index.html", website_error_document="index.html", removal_policy=cdk.RemovalPolicy.DESTROY, # uncomment this and delete the policy statement below to allow public access to our # static website # public_read_access=true ) web_policy_statement = iam.PolicyStatement( actions=["s3:GetObject"], resources=[web_bucket.arn_for_objects("*")], principals=[iam.AnyPrincipal()], conditions={"IpAddress": { "aws:SourceIp": ["139.138.203.36"] }}, ) web_bucket.add_to_resource_policy(web_policy_statement) cdk.CfnOutput(self, "bucketURL", value=web_bucket.bucket_website_domain_name) # Deploy site contents to S3 Bucket s3_dep.BucketDeployment( self, "DeployWebsite", sources=[s3_dep.Source.asset("./public")], destination_bucket=web_bucket, ) # DynamoDB to store image labels partition_key = dynamodb.Attribute(name="image", type=dynamodb.AttributeType.STRING) table = dynamodb.Table( self, "ImageLabels", partition_key=partition_key, removal_policy=cdk.RemovalPolicy.DESTROY, ) cdk.CfnOutput(self, "ddbTable", value=table.table_name) # Lambda layer for Pillow library layer = lb.LayerVersion( self, "pil", code=lb.Code.from_asset("reklayer"), compatible_runtimes=[lb.Runtime.PYTHON_3_7], license="Apache-2.0", description= "A layer to enable the PIL library in our Rekognition Lambda", ) # Lambda function rek_fn = lb.Function( self, "rekognitionFunction", code=lb.Code.from_asset("rekognitionFunction"), runtime=lb.Runtime.PYTHON_3_7, handler="index.handler", timeout=cdk.Duration.seconds(30), memory_size=1024, layers=[layer], environment={ "TABLE": table.table_name, "BUCKET": image_bucket.bucket_name, "THUMBBUCKET": resized_image_bucket.bucket_name, }, ) image_bucket.grant_read(rek_fn) resized_image_bucket.grant_write(rek_fn) table.grant_write_data(rek_fn) rek_fn.add_to_role_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["rekognition:DetectLabels"], resources=["*"])) # Lambda for Synchronous front end serviceFn = lb.Function( self, "serviceFunction", code=lb.Code.from_asset("servicelambda"), runtime=lb.Runtime.PYTHON_3_7, handler="index.handler", environment={ "TABLE": table.table_name, "BUCKET": image_bucket.bucket_name, "RESIZEDBUCKET": resized_image_bucket.bucket_name, }, ) image_bucket.grant_write(serviceFn) resized_image_bucket.grant_write(serviceFn) table.grant_read_write_data(serviceFn) # Cognito User Pool Auth auto_verified_attrs = cognito.AutoVerifiedAttrs(email=True) sign_in_aliases = cognito.SignInAliases(email=True, username=True) user_pool = cognito.UserPool( self, "UserPool", self_sign_up_enabled=True, auto_verify=auto_verified_attrs, sign_in_aliases=sign_in_aliases, ) user_pool_client = cognito.UserPoolClient(self, "UserPoolClient", user_pool=user_pool, generate_secret=False) identity_pool = cognito.CfnIdentityPool( self, "ImageRekognitionIdentityPool", allow_unauthenticated_identities=False, cognito_identity_providers=[{ "clientId": user_pool_client.user_pool_client_id, "providerName": user_pool.user_pool_provider_name, }], ) # API Gateway cors_options = apigw.CorsOptions(allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=apigw.Cors.ALL_METHODS) api = apigw.LambdaRestApi( self, "imageAPI", default_cors_preflight_options=cors_options, handler=serviceFn, proxy=False, ) auth = apigw.CfnAuthorizer( self, "ApiGatewayAuthorizer", name="customer-authorizer", identity_source="method.request.header.Authorization", provider_arns=[user_pool.user_pool_arn], rest_api_id=api.rest_api_id, # type=apigw.AuthorizationType.COGNITO, type="COGNITO_USER_POOLS", ) assumed_by = iam.FederatedPrincipal( "cognito-identity.amazon.com", conditions={ "StringEquals": { "cognito-identity.amazonaws.com:aud": identity_pool.ref }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" }, }, assume_role_action="sts:AssumeRoleWithWebIdentity", ) authenticated_role = iam.Role( self, "ImageRekognitionAuthenticatedRole", assumed_by=assumed_by, ) # IAM policy granting users permission to get and put their pictures policy_statement = iam.PolicyStatement( actions=["s3:GetObject", "s3:PutObject"], effect=iam.Effect.ALLOW, resources=[ image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/*", image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/", resized_image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/*", resized_image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/", ], ) # IAM policy granting users permission to list their pictures list_policy_statement = iam.PolicyStatement( actions=["s3:ListBucket"], effect=iam.Effect.ALLOW, resources=[ image_bucket.bucket_arn, resized_image_bucket.bucket_arn ], conditions={ "StringLike": { "s3:prefix": ["private/${cognito-identity.amazonaws.com:sub}/*"] } }, ) authenticated_role.add_to_policy(policy_statement) authenticated_role.add_to_policy(list_policy_statement) # Attach role to our Identity Pool cognito.CfnIdentityPoolRoleAttachment( self, "IdentityPoolRoleAttachment", identity_pool_id=identity_pool.ref, roles={"authenticated": authenticated_role.role_arn}, ) # Get some outputs from cognito cdk.CfnOutput(self, "UserPoolId", value=user_pool.user_pool_id) cdk.CfnOutput(self, "AppClientId", value=user_pool_client.user_pool_client_id) cdk.CfnOutput(self, "IdentityPoolId", value=identity_pool.ref) # New Amazon API Gateway with AWS Lambda Integration success_response = apigw.IntegrationResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Origin": "'*'" }, ) error_response = apigw.IntegrationResponse( selection_pattern="(\n|.)+", status_code="500", response_parameters={ "method.response.header.Access-Control-Allow-Origin": "'*'" }, ) request_template = json.dumps({ "action": "$util.escapeJavaScript($input.params('action'))", "key": "$util.escapeJavaScript($input.params('key'))", }) lambda_integration = apigw.LambdaIntegration( serviceFn, proxy=False, request_parameters={ "integration.request.querystring.action": "method.request.querystring.action", "integration.request.querystring.key": "method.request.querystring.key", }, request_templates={"application/json": request_template}, passthrough_behavior=apigw.PassthroughBehavior.WHEN_NO_TEMPLATES, integration_responses=[success_response, error_response], ) imageAPI = api.root.add_resource("images") success_resp = apigw.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Origin": True }, ) error_resp = apigw.MethodResponse( status_code="500", response_parameters={ "method.response.header.Access-Control-Allow-Origin": True }, ) # GET /images get_method = imageAPI.add_method( "GET", lambda_integration, authorization_type=apigw.AuthorizationType.COGNITO, request_parameters={ "method.request.querystring.action": True, "method.request.querystring.key": True, }, method_responses=[success_resp, error_resp], ) # DELETE /images delete_method = imageAPI.add_method( "DELETE", lambda_integration, authorization_type=apigw.AuthorizationType.COGNITO, request_parameters={ "method.request.querystring.action": True, "method.request.querystring.key": True, }, method_responses=[success_resp, error_resp], ) # Override the authorizer id because it doesn't work when defininting it as a param # in add_method get_method_resource = get_method.node.find_child("Resource") get_method_resource.add_property_override("AuthorizerId", auth.ref) delete_method_resource = delete_method.node.find_child("Resource") delete_method_resource.add_property_override("AuthorizerId", auth.ref) # Building SQS queue and DeadLetter Queue dl_queue = sqs.Queue( self, "ImageDLQueue", queue_name="ImageDLQueue", ) dl_queue_opts = sqs.DeadLetterQueue(max_receive_count=2, queue=dl_queue) queue = sqs.Queue( self, "ImageQueue", queue_name="ImageQueue", visibility_timeout=cdk.Duration.seconds(30), receive_message_wait_time=cdk.Duration.seconds(20), dead_letter_queue=dl_queue_opts, ) # S3 Bucket Create Notification to SQS # Whenever an image is uploaded add it to the queue image_bucket.add_object_created_notification( s3n.SqsDestination(queue), s3.NotificationKeyFilter(prefix="private/"))
def __init__(self, scope: core.App, name: str, **kwargs) -> None: super().__init__(scope, name, **kwargs) # dynamoDB table to store pokemon table = ddb.Table(self, "Bashoutter-Table", partition_key=ddb.Attribute( name="pokemon_number", type=ddb.AttributeType.STRING), billing_mode=ddb.BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY) common_params = { "runtime": _lambda.Runtime.PYTHON_3_7, "environment": { "TABLE_NAME": table.table_name } } # define Lambda functions show_lambda = _lambda.Function( self, "ShowPokemon", code=_lambda.Code.from_asset("api"), handler="api.show_pokemon", memory_size=512, timeout=core.Duration.seconds(10), **common_params, ) get_lambda = _lambda.Function( self, "GetPokemon", code=_lambda.Code.from_asset("api"), handler="api.get_pokemon", **common_params, ) levelup_lambda = _lambda.Function( self, "LevelUp", code=_lambda.Code.from_asset("api"), handler="api.level_up", **common_params, ) goodbye_lambda = _lambda.Function( self, "ByePokemon", code=_lambda.Code.from_asset("api"), handler="api.bye_pokemon", **common_params, ) # grant permissions table.grant_read_data(show_lambda) table.grant_read_write_data(get_lambda) table.grant_read_write_data(levelup_lambda) table.grant_read_write_data(goodbye_lambda) # define API Gateway api = apigw.RestApi(self, "BashoutterApi", default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=apigw.Cors.ALL_METHODS, )) pokemon = api.root.add_resource("pokemon") pokemon.add_method("GET", apigw.LambdaIntegration(show_lambda)) pokemon.add_method("POST", apigw.LambdaIntegration(get_lambda)) pokemon_number = pokemon.add_resource("{pokemon_number}") pokemon_number.add_method("PATCH", apigw.LambdaIntegration(levelup_lambda)) pokemon_number.add_method("DELETE", apigw.LambdaIntegration(goodbye_lambda)) # store parameters in SSM ssm.StringParameter(self, "TABLE_NAME", parameter_name="TABLE_NAME", string_value=table.table_name) ssm.StringParameter(self, "ENDPOINT_URL", parameter_name="ENDPOINT_URL", string_value=api.url)
def __init__(self, scope: core.Construct, id: str, artifact_bucket: s3.Bucket, **kwargs) -> None: super().__init__(scope, id, **kwargs) pool = cognito.UserPool(scope=self, id="user-pool", mfa=cognito.Mfa.OPTIONAL, mfa_second_factor=cognito.MfaSecondFactor( otp=True, sms=True), password_policy=cognito.PasswordPolicy( min_length=12, require_lowercase=True, require_uppercase=False, require_digits=False, require_symbols=False, )) client = pool.add_client( id="customer-app-client", auth_flows=cognito.AuthFlow(user_password=True, refresh_token=True), ) backend = _lambda.Function( scope=self, id="api-function", runtime=_lambda.Runtime.GO_1_X, handler="main", memory_size=500, timeout=core.Duration.seconds(10), environment={ "USER_POOL_ID": pool.user_pool_id, "CLIENT_ID": client.user_pool_client_id, }, code=_lambda.Code.from_bucket( bucket=artifact_bucket, key="Server/main.zip", ), ) backend.add_to_role_policy( statement=iam.PolicyStatement(actions=[ "cognito-idp:RespondToAuthChallenge", "cognito-idp:InitiateAuth", "cognito-idp:SetUserMFAPreference", "cognito-idp:AssociateSoftwareToken", "cognito-idp:VerifySoftwareToken" ], resources=[pool.user_pool_arn])) api = apigateway.LambdaRestApi( scope=self, id="mfa-api", handler=backend, endpoint_types=[apigateway.EndpointType.REGIONAL], default_cors_preflight_options=apigateway.CorsOptions( allow_origins=["*"])) self.api = api self.backend_fn = backend static_website_bucket = s3.Bucket( scope=self, id="static-website-bucket", ) self.static_website_bucket = static_website_bucket distribution = cloudfront.CloudFrontWebDistribution( scope=self, id="static-website-distribution", default_root_object="index.html", origin_configs=[ cloudfront.SourceConfiguration( s3_origin_source=cloudfront.S3OriginConfig( s3_bucket_source=static_website_bucket, origin_access_identity=cloudfront.OriginAccessIdentity( scope=self, id="origin-access-identity", )), behaviors=[cloudfront.Behavior(is_default_behavior=True)]) ], )
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # ----------------------------------- # Cognito User Pool # ----------------------------------- userpool = cognito.UserPool( self, "ServerlessTodoUserPool", user_pool_name="ServerlessTodoUserPool", sign_in_aliases=cognito.SignInAliases(username=True, email=True), password_policy=cognito.PasswordPolicy( min_length=6, require_digits=True, require_lowercase=True, require_symbols=True, require_uppercase=True, temp_password_validity=core.Duration.days(7)), auto_verify=cognito.AutoVerifiedAttrs(email=True), standard_attributes=cognito.StandardAttributes( email=cognito.StandardAttribute(mutable=True, required=True), family_name=cognito.StandardAttribute(mutable=True, required=True), given_name=cognito.StandardAttribute(mutable=True, required=True))) user_pool_client = userpool.add_client( "UserPoolClient", auth_flows=cognito.AuthFlow(admin_user_password=True)) # ----------------------------------- # dynamodb # ----------------------------------- dynamodbTable = dynamodb.Table( self, "TaskTable", partition_key=dynamodb.Attribute( name="id", type=dynamodb.AttributeType.STRING), sort_key=dynamodb.Attribute(name="meta", type=dynamodb.AttributeType.STRING), billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, point_in_time_recovery=True, server_side_encryption=True) dynamodbTable.add_global_secondary_index( partition_key=dynamodb.Attribute( name="meta", type=dynamodb.AttributeType.STRING), sort_key=dynamodb.Attribute(name="id", type=dynamodb.AttributeType.STRING), index_name="meta-id-index") dynamodbTable.add_global_secondary_index( partition_key=dynamodb.Attribute( name="owner", type=dynamodb.AttributeType.STRING), sort_key=dynamodb.Attribute(name="meta", type=dynamodb.AttributeType.STRING), index_name="owner-meta-index") # ----------------------------------- # apigateway # ----------------------------------- acm_arn = self.node.try_get_context('acm_arn') domain_name = self.node.try_get_context("domain_name") hosted_zone = self.node.try_get_context("hosted_zone") api_policy = iam.PolicyDocument( statements=iam.PolicyStatement(actions=["lambda:InvokeFunction"], ) .add_resources("arn:aws:lambda:{}:{}:function:*".format( self.region, self.account))) if acm_arn and domain_name and hosted_zone: api = apigw.RestApi( self, 'API', domain_name=apigw.DomainNameOptions( certificate=acm.Certificate.from_certificate_arn( self, 'ApiCertificate', acm_arn), domain_name=domain_name, endpoint_type=apigw.EndpointType.REGIONAL), deploy_options=apigw.StageOptions(metrics_enabled=True), policy=api_policy, rest_api_name="Serverless TODO API", endpoint_types=[apigw.EndpointType.REGIONAL], default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors. ALL_ORIGINS, # TODO: Temporary for development allow_headers=[ "Content-Type", "X-Amz-Date", "Authorization", "X-Api-Key", "X-Amz-Security-Token", "X-Tracing-Id", "x-jeffy-correlation-id", "x-amzn-trace-id" ], allow_methods=apigw.Cors.ALL_METHODS, allow_credentials=True)) route53.CfnRecordSet( self, "apiDomainRecord", name=domain_name, type="A", alias_target={ "dnsName": api.domain_name.domain_name_alias_domain_name, "hostedZoneId": api.domain_name.domain_name_alias_hosted_zone_id }, hosted_zone_id=hosted_zone, ) else: api = apigw.RestApi( self, 'API', deploy_options=apigw.StageOptions(metrics_enabled=True), policy=api_policy, rest_api_name="Serverless TODO API", endpoint_types=[apigw.EndpointType.REGIONAL], default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors. ALL_ORIGINS, # TODO: Temporary for development allow_headers=[ "Content-Type", "X-Amz-Date", "Authorization", "X-Api-Key", "X-Amz-Security-Token", "X-Tracing-Id", "x-jeffy-correlation-id", "x-amzn-trace-id" ], allow_methods=apigw.Cors.ALL_METHODS, allow_credentials=True)) cognito_authorizer = apigw.CognitoUserPoolsAuthorizer( self, "CognitoAuthorizer", cognito_user_pools=[userpool], authorizer_name='todo_cognito_authorizer', identity_source='method.request.header.Authorization', results_cache_ttl=core.Duration.minutes(60)) api_role = iam.Role(self, "ApiRole", assumed_by=iam.ServicePrincipal( service="apigateway.amazonaws.com")) api_statement = iam.PolicyStatement( actions=["lambda:InvokeFunction"], resources=[ "arn:aws:lambda:{}:{}:function:*".format( self.region, self.account) ]) api_role.add_to_policy(api_statement) # ----------------------------------- # lambda common configure # ----------------------------------- env = { "TABLE_NAME": dynamodbTable.table_name, "USER_POOL_ID": userpool.user_pool_id, "USER_POOL_NAME": userpool.user_pool_provider_name, "CLIENT_ID": user_pool_client.user_pool_client_id } # ----------------------------------- # get handler # ----------------------------------- get_resource_base_name = "getTaskFunction" get_task_func = lambda_.Function( self, get_resource_base_name, code=lambda_.Code.from_asset( 'function/src/task', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="get.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) get_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['dynamodb:*'], resources=[ dynamodbTable.table_arn, dynamodbTable.table_arn + '/*' ])) logs.LogGroup(self, get_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + get_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) task_path = api.root.add_resource("task") task_id_path = task_path.add_resource("{task_id}") get_task_integration = apigw.LambdaIntegration( get_task_func, credentials_role=api_role) task_id_path.add_method( "GET", integration=get_task_integration, authorization_type=apigw.AuthorizationType.COGNITO, authorizer=cognito_authorizer, ) # ----------------------------------- # create handler # ----------------------------------- create_resource_base_name = "createTaskFunction" create_task_func = lambda_.Function( self, create_resource_base_name, code=lambda_.Code.from_asset( 'function/src/task', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="create.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) create_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['dynamodb:*'], resources=[ dynamodbTable.table_arn, dynamodbTable.table_arn + '/*' ])) logs.LogGroup(self, create_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + create_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) create_task_integration = apigw.LambdaIntegration( create_task_func, credentials_role=api_role) task_path.add_method( "POST", integration=create_task_integration, authorization_type=apigw.AuthorizationType.COGNITO, authorizer=cognito_authorizer, ) # ----------------------------------- # update handler # ----------------------------------- update_resource_base_name = "updateTaskFunction" update_task_func = lambda_.Function( self, update_resource_base_name, code=lambda_.Code.from_asset( 'function/src/task', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="update.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) update_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['dynamodb:*'], resources=[ dynamodbTable.table_arn, dynamodbTable.table_arn + '/*' ])) logs.LogGroup(self, update_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + update_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) update_task_integration = apigw.LambdaIntegration( update_task_func, credentials_role=api_role) task_id_path.add_method( "POST", integration=update_task_integration, authorization_type=apigw.AuthorizationType.COGNITO, authorizer=cognito_authorizer, ) # ----------------------------------- # delete handler # ----------------------------------- delete_resource_base_name = "deleteTaskFunction" delete_task_func = lambda_.Function( self, delete_resource_base_name, code=lambda_.Code.from_asset( 'function/src/task', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="delete.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) delete_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['dynamodb:*'], resources=[ dynamodbTable.table_arn, dynamodbTable.table_arn + '/*' ])) logs.LogGroup(self, delete_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + delete_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) delete_task_integration = apigw.LambdaIntegration( delete_task_func, credentials_role=api_role) task_id_path.add_method( "DELETE", integration=delete_task_integration, authorization_type=apigw.AuthorizationType.COGNITO, authorizer=cognito_authorizer, ) # ----------------------------------- # search handler # ----------------------------------- search_resource_base_name = "searchTaskFunction" search_task_func = lambda_.Function( self, search_resource_base_name, code=lambda_.Code.from_asset( 'function/src/task', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="search.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) search_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['dynamodb:*'], resources=[ dynamodbTable.table_arn, dynamodbTable.table_arn + '/*' ])) logs.LogGroup(self, search_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + search_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) search_task_integration = apigw.LambdaIntegration( search_task_func, credentials_role=api_role) tasks_path = api.root.add_resource("tasks") tasks_path.add_method( "GET", integration=search_task_integration, authorization_type=apigw.AuthorizationType.COGNITO, authorizer=cognito_authorizer, ) # ----------------------------------- # login handler # ----------------------------------- login_resource_base_name = "loginFunction" login_task_func = lambda_.Function( self, login_resource_base_name, code=lambda_.Code.from_asset( 'function/src/user', bundling=core.BundlingOptions( image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image, command=[ 'bash', '-c', 'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output' ], )), handler="login.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment=env, tracing=lambda_.Tracing.ACTIVE, timeout=core.Duration.seconds(29), memory_size=512) login_task_func.add_to_role_policy(statement=iam.PolicyStatement( actions=['cognito-idp:AdminInitiateAuth'], resources=[userpool.user_pool_arn])) logs.LogGroup(self, login_resource_base_name + 'LogGroup', log_group_name='/aws/lambda/' + login_task_func.function_name, retention=logs.RetentionDays.TWO_WEEKS) login_task_integration = apigw.LambdaIntegration(login_task_func) auth_path = api.root.add_resource("auth") auth_login_path = auth_path.add_resource("login") auth_login_path.add_method("POST", integration=login_task_integration)
def __init__( self, scope: core.Construct, id: str, webhook_function, on_demand_function, schedule_update_function, status_query_function, slack_function, ingest_allowed_ips, ): super().__init__(scope, id) stack_name = core.Stack.of(self).stack_name policy = iam.PolicyDocument( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["execute-api:Invoke"], principals=[iam.AnyPrincipal()], # note that the policy is a prop of the api which cannot # reference itself, see the Cloudformation documentation # for api gateway policy attribute resources=[core.Fn.join("", ["execute-api:/", "*"])], ), iam.PolicyStatement( effect=iam.Effect.DENY, actions=["execute-api:Invoke"], principals=[iam.AnyPrincipal()], resources=[ core.Fn.join("", ["execute-api:/", "*/POST/ingest"]), core.Fn.join("", ["execute-api:/", "*/GET/status"]), ], conditions={ "NotIpAddress": {"aws:SourceIp": ingest_allowed_ips} }, ), ] ) self.rest_api_name = f"{stack_name}-{names.REST_API}" log_group = logs.LogGroup( self, "apilogs", log_group_name=f"/aws/apigateway/{self.rest_api_name}/access_logs", removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.SIX_MONTHS, ) self.api = apigw.LambdaRestApi( self, "api", handler=webhook_function, # default handler rest_api_name=self.rest_api_name, proxy=False, deploy=True, policy=policy, deploy_options=apigw.StageOptions( access_log_destination=apigw.LogGroupLogDestination(log_group), access_log_format=apigw.AccessLogFormat.clf(), data_trace_enabled=True, metrics_enabled=True, logging_level=apigw.MethodLoggingLevel.INFO, stage_name=names.API_STAGE, ), ) self.api.add_api_key("ZoomIngesterApiKey") self.new_recording_resource = self.create_resource( "new_recording", webhook_function, "POST", ) self.ingest_resource = self.create_resource( "ingest", on_demand_function, "POST", cors_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=["POST", "OPTIONS"], allow_headers=apigw.Cors.DEFAULT_HEADERS + ["Accept-Language", "X-Requested-With"], ), ) self.schedule_update_resource = self.create_resource( "schedule_update", schedule_update_function, "POST", ) self.status_query_resource = self.create_resource( "status", status_query_function, "GET", ) self.slack_resource = self.create_resource( "slack", slack_function, "POST", ) def endpoint_url(resource_name): return ( f"https://{self.api.rest_api_id}.execute-api." f"{core.Stack.of(self).region}.amazonaws.com/" f"{names.API_STAGE}/{resource_name}" ) on_demand_function.add_environment( "WEBHOOK_ENDPOINT_URL", endpoint_url("new_recording"), ) core.CfnOutput( self, "WebhookEndpoint", export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-url", value=endpoint_url("new_recording"), ) core.CfnOutput( self, "OnDemandEndpoint", export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-url", value=endpoint_url("ingest"), ) core.CfnOutput( self, "ScheduleUpdateEndpoint", export_name=f"{stack_name}-{names.SCHEDULE_UPDATE_ENDPOINT}-url", value=endpoint_url("schedule_update"), ) core.CfnOutput( self, "StatusQueryEndpoint", export_name=f"{stack_name}-{names.STATUS_ENDPOINT}-url", value=endpoint_url("status"), ) core.CfnOutput( self, "SlackEndpoint", export_name=f"{stack_name}-{names.SLACK_ENDPOINT}-url", value=endpoint_url("slack"), ) core.CfnOutput( self, "WebhookResourceId", export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-resource-id", value=self.new_recording_resource.resource_id, ) core.CfnOutput( self, "OnDemandResourceId", export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-resource-id", value=self.ingest_resource.resource_id, ) core.CfnOutput( self, "ScheduleUpdateResourceId", export_name=f"{stack_name}-{names.SCHEDULE_UPDATE_ENDPOINT}-resource-id", value=self.schedule_update_resource.resource_id, ) core.CfnOutput( self, "StatusQueryResourceId", export_name=f"{stack_name}-{names.STATUS_ENDPOINT}-resource-id", value=self.status_query_resource.resource_id, ) core.CfnOutput( self, "SlackResourceId", export_name=f"{stack_name}-{names.SLACK_ENDPOINT}-resource-id", value=self.slack_resource.resource_id, ) core.CfnOutput( self, "RestApiId", export_name=f"{stack_name}-{names.REST_API}-id", value=self.api.rest_api_id, )
def __init__(self, scope: core.App, name: str, **kwargs) -> None: super().__init__(scope, name, **kwargs) # <1> # dynamoDB table to store haiku table = ddb.Table(self, "Bashoutter-Table", partition_key=ddb.Attribute( name="item_id", type=ddb.AttributeType.STRING), billing_mode=ddb.BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY) # <2> bucket = s3.Bucket(self, "Bashoutter-Bucket", website_index_document="index.html", public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY) s3_deploy.BucketDeployment( self, "BucketDeployment", destination_bucket=bucket, sources=[s3_deploy.Source.asset("./gui/dist")], retain_on_delete=False, ) common_params = { "runtime": _lambda.Runtime.PYTHON_3_7, "environment": { "TABLE_NAME": table.table_name } } # <3> # define Lambda functions get_haiku_lambda = _lambda.Function( self, "GetHaiku", code=_lambda.Code.from_asset("api"), handler="api.get_haiku", memory_size=512, timeout=core.Duration.seconds(10), **common_params, ) post_haiku_lambda = _lambda.Function( self, "PostHaiku", code=_lambda.Code.from_asset("api"), handler="api.post_haiku", **common_params, ) patch_haiku_lambda = _lambda.Function( self, "PatchHaiku", code=_lambda.Code.from_asset("api"), handler="api.patch_haiku", **common_params, ) delete_haiku_lambda = _lambda.Function( self, "DeleteHaiku", code=_lambda.Code.from_asset("api"), handler="api.delete_haiku", **common_params, ) # <4> # grant permissions table.grant_read_data(get_haiku_lambda) table.grant_read_write_data(post_haiku_lambda) table.grant_read_write_data(patch_haiku_lambda) table.grant_read_write_data(delete_haiku_lambda) # <5> # define API Gateway api = apigw.RestApi(self, "BashoutterApi", default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=apigw.Cors.ALL_METHODS, )) haiku = api.root.add_resource("haiku") haiku.add_method("GET", apigw.LambdaIntegration(get_haiku_lambda)) haiku.add_method("POST", apigw.LambdaIntegration(post_haiku_lambda)) haiku_item_id = haiku.add_resource("{item_id}") haiku_item_id.add_method("PATCH", apigw.LambdaIntegration(patch_haiku_lambda)) haiku_item_id.add_method("DELETE", apigw.LambdaIntegration(delete_haiku_lambda)) # store parameters in SSM ssm.StringParameter(self, "TABLE_NAME", parameter_name="TABLE_NAME", string_value=table.table_name) ssm.StringParameter(self, "ENDPOINT_URL", parameter_name="ENDPOINT_URL", string_value=api.url) # Output parameters core.CfnOutput(self, 'BucketUrl', value=bucket.bucket_website_domain_name)
def __init__(self, scope: core.Construct, id: str, webhook_function, on_demand_function, ingest_allowed_ips): super().__init__(scope, id) stack_name = core.Stack.of(self).stack_name policy = iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["execute-api:Invoke"], principals=[iam.AnyPrincipal()], # note that the policy is a prop of the api which cannot reference itself # see the Cloudformation documentation for api gateway policy attribute resources=[core.Fn.join('', ['execute-api:/', '*'])]), iam.PolicyStatement(effect=iam.Effect.DENY, actions=["execute-api:Invoke"], principals=[iam.AnyPrincipal()], resources=[ core.Fn.join( '', ['execute-api:/', '*/POST/ingest']) ], conditions={ "NotIpAddress": { "aws:SourceIp": ingest_allowed_ips } }) ]) self.rest_api_name = f"{stack_name}-{names.REST_API}" log_group = logs.LogGroup( self, "apilogs", log_group_name=f"/aws/apigateway/{self.rest_api_name}/access_logs", removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.SIX_MONTHS) self.api = apigw.LambdaRestApi( self, "api", handler=webhook_function, # default handler rest_api_name=self.rest_api_name, proxy=False, deploy=True, policy=policy, deploy_options=apigw.StageOptions( access_log_destination=apigw.LogGroupLogDestination(log_group), access_log_format=apigw.AccessLogFormat.clf(), data_trace_enabled=True, metrics_enabled=True, logging_level=apigw.MethodLoggingLevel.INFO, stage_name=names.API_STAGE)) self.api.add_api_key("ZoomIngesterApiKey") self.new_recording_resource = self.api.root.add_resource( "new_recording") self.new_recording_method = self.new_recording_resource.add_method( "POST", method_responses=[ apigw.MethodResponse(status_code="200", response_models={ "application/json": apigw.Model.EMPTY_MODEL }) ]) self.ingest_resource = self.api.root.add_resource("ingest", default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=["POST", "OPTIONS"], allow_headers=apigw.Cors.DEFAULT_HEADERS \ + ["Accept-Language","X-Requested-With"] ) ) on_demand_integration = apigw.LambdaIntegration(on_demand_function) self.ingest_method = self.ingest_resource.add_method( "POST", on_demand_integration, method_responses=[ apigw.MethodResponse(status_code="200", response_models={ "application/json": apigw.Model.EMPTY_MODEL }) ]) def endpoint_url(resource_name): return (f"https://{self.api.rest_api_id}.execute-api." f"{core.Stack.of(self).region}.amazonaws.com/" f"{names.API_STAGE}/{resource_name}") on_demand_function.add_environment("WEBHOOK_ENDPOINT_URL", endpoint_url("new_recording")) core.CfnOutput( self, "WebhookEndpoint", export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-url", value=endpoint_url("new_recording")) core.CfnOutput( self, "OnDemandEndpoint", export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-url", value=endpoint_url("ingest")) core.CfnOutput( self, "WebhookResourceId", export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-resource-id", value=self.new_recording_resource.resource_id) core.CfnOutput( self, "OnDemandResourceId", export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-resource-id", value=self.ingest_resource.resource_id) core.CfnOutput(self, "RestApiId", export_name=f"{stack_name}-{names.REST_API}-id", value=self.api.rest_api_id)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # 俳句情報を記録しておくためのDynamoDBテーブルを定義 table = ddb.Table(self, 'Bashoutter-Table', partition_key=ddb.Attribute( name='item_id', type=ddb.AttributeType.STRING), billing_mode=ddb.BillingMode.PAY_PER_REQUEST, removal_policy=core.RemovalPolicy.DESTROY) # 続いて、静的コンテンツを配信するためのS3バケットを用意 bucket = s3.Bucket(self, 'Bashoutter-Bucket', website_index_document='index.html', public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY) s3_deploy.BucketDeployment( self, 'BucketDeployment', destination_bucket=bucket, sources=[s3_deploy.Source.asset('./gui/dist')], retain_on_delete=False, ) # lambda関数の定義 common_params = { 'runtime': _lambda.Runtime.PYTHON_3_7, 'environment': { 'TABLE_NAME': table.table_name } } get_haiku_lambda = _lambda.Function( self, "GetHaiku", code=_lambda.Code.from_asset("api"), handler="api.get_haiku", memory_size=512, **common_params, ) post_haiku_lambda = _lambda.Function( self, "PostHaiku", code=_lambda.Code.from_asset("api"), handler="api.post_haiku", **common_params, ) patch_haiku_lambda = _lambda.Function( self, "PatchHaiku", code=_lambda.Code.from_asset("api"), handler="api.patch_haiku", **common_params, ) delete_haiku_lambda = _lambda.Function( self, "DeleteHaiku", code=_lambda.Code.from_asset("api"), handler="api.delete_haiku", **common_params, ) # DBの権限付与 table.grant_read_data(get_haiku_lambda) table.grant_read_write_data(post_haiku_lambda) table.grant_read_write_data(patch_haiku_lambda) table.grant_read_write_data(delete_haiku_lambda) # API GateewayによってlambdaとAPIパスを紐付ける api = apigw.RestApi(self, "BashoutterApi", default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=apigw.Cors.ALL_METHODS, )) haiku = api.root.add_resource("haiku") haiku.add_method("GET", apigw.LambdaIntegration(get_haiku_lambda)) haiku.add_method("POST", apigw.LambdaIntegration(post_haiku_lambda)) haiku_item_id = haiku.add_resource("{item_id}") haiku_item_id.add_method("PATCH", apigw.LambdaIntegration(patch_haiku_lambda)) haiku_item_id.add_method("DELETE", apigw.LambdaIntegration(delete_haiku_lambda))
def __init__(self, scope: core.Construct, id: str, *, prefix: str, environment: str, configuration, **kwargs): """ :param scope: Stack class, used by CDK. :param id: ID of the construct, used by CDK. :param prefix: Prefix of the construct, used for naming purposes. :param environment: Environment of the construct, used for naming purposes. :param configuration: Configuration of the construct. In this case APIGATEWAY_LAMBDA_SIMPLE_WEB_SERVICE_SCHEMA. :param kwargs: Other parameters that could be used by the construct. """ super().__init__(scope, id, **kwargs) self.prefix = prefix self.environment_ = environment self._configuration = configuration # Validating that the payload passed is correct validate_configuration( configuration_schema=APIGATEWAY_ROBUST_WEB_SERVICE_SCHEMA, configuration_received=self._configuration) # Define S3 Buckets Cluster if isinstance(self._configuration.get("buckets"), list): self._s3_buckets = [ base_bucket(self, **bucket) for bucket in self._configuration["buckets"] ] api_configuration = self._configuration["api"] api_gateway_name = self.prefix + "_" + api_configuration[ "apigateway_name"] + "_" + self.environment_ api_gateway_name_description = api_configuration.get( "apigateway_description") # # Define Lambda Authorizer Function # authorizer_functions = api_configuration.get("authorizer_function") # self._authorizer_function = None # if authorizer_functions is not None: # if authorizer_functions.get("imported") is not None: # self._authorizer_function = lambda_.Function.from_function_arn( # self, # id=authorizer_functions.get("imported").get("identifier"), # function_arn=authorizer_functions.get("imported").get("arn"), # ) # elif authorizer_functions.get("origin") is not None: # self._authorizer_function = base_lambda_function(self, **authorizer_functions.get("origin")) # # # Define API Gateway Authorizer # gateway_authorizer = None # if self._authorizer_function is not None: # # Define Gateway Token Authorizer # authorizer_name = api_configuration["apigateway_name"] + "_" + "authorizer" # if authorizer_functions.get("results_cache_ttl") is not None: # results_cache_ttl = core.Duration.minutes(authorizer_functions.get("results_cache_ttl")) # else: # results_cache_ttl = None # gateway_authorizer = api_gateway.TokenAuthorizer( # self, # id=authorizer_name, # authorizer_name=authorizer_name, # handler=self._authorizer_function, # results_cache_ttl=results_cache_ttl # ) # # api_gateway.TokenAuthorizer # api_gateway.RequestAuthorizer # api_gateway.CognitoUserPoolsAuthorizer # Define API Gateway Authorizer self._authorizer_function = None self._gateway_authorizer = self.set_authorizer() # Defining Custom Domain domain_options = None custom_domain = api_configuration["settings"].get("custom_domain") if custom_domain is not None: domain_name = custom_domain["domain_name"] certificate_arn = custom_domain["certificate_arn"] domain_options = api_gateway.DomainNameOptions( certificate=cert_manager.Certificate.from_certificate_arn( self, id=domain_name, certificate_arn=certificate_arn), domain_name=domain_name, ) # Define API Gateway Lambda Handler self._handler_function = base_lambda_function( self, **api_configuration["settings"]["default_handler"]) # Validating Proxy configuration for API Gateway proxy_configuration = api_configuration["settings"]["proxy"] if proxy_configuration is False and api_configuration["settings"].get( "default_http_methods") is None: print( "Unable to check which method to use for the API! Use proxy: True or define methods..." ) raise RuntimeError # Defining allowed binary media types by API Gateway binary_media_types = api_configuration["settings"].get( "default_media_types") # Defining CORS preflight options default_cors_options = None default_cors_configuration = api_configuration["settings"].get( "default_cors_options") if default_cors_configuration is not None: default_cors_options = api_gateway.CorsOptions( allow_origins=default_cors_configuration["allow_origins"], allow_methods=["ANY"], status_code=default_cors_configuration["options_status_code"], ) # Defining STAGE Options default_stage_options = None default_stage_configuration = api_configuration["settings"].get( "default_stage_options") if default_stage_configuration is not None: logging_level = api_gateway.MethodLoggingLevel.ERROR logging_level_configuration = default_stage_configuration[ "logging_level"] for element in api_gateway.MethodLoggingLevel: if logging_level_configuration in str(element): logging_level = element default_stage_options = api_gateway.StageOptions( logging_level=logging_level, metrics_enabled=default_stage_configuration["metrics_enabled"], ) # Defining Rest API Gateway with Lambda Integration self._lambda_rest_api = api_gateway.LambdaRestApi( self, id=api_gateway_name, rest_api_name=api_gateway_name, description=api_gateway_name_description, domain_name=domain_options, handler=self._handler_function, proxy=proxy_configuration, binary_media_types=binary_media_types, default_cors_preflight_options=default_cors_options, cloud_watch_role=True, deploy_options=default_stage_options, ) # Add Custom responses self._lambda_rest_api.add_gateway_response( f"{self.prefix}_4XXresponse_{self.environment_}", type=api_gateway.ResponseType.DEFAULT_4_XX, response_headers={"Access-Control-Allow-Origin": "'*'"}, ) self._lambda_rest_api.add_gateway_response( f"{self.prefix}_5XXresponse_{self.environment_}", type=api_gateway.ResponseType.DEFAULT_5_XX, response_headers={"Access-Control-Allow-Origin": "'*'"}, ) # Define API Gateway Root Methods root_methods = api_configuration["settings"].get( "default_http_methods", list()) for method in root_methods: self._lambda_rest_api.root.add_method( http_method=method, authorizer=self._gateway_authorizer) # Defining Resource Trees for API Gateway with Custom Integrations resource_trees = api_configuration["resource_trees"] for resource_tree in resource_trees: resource_base = self._lambda_rest_api.root.add_resource( path_part=resource_tree["resource_name"]) resource_base_handler = base_lambda_function( self, **resource_tree["handler"]) for method in resource_tree["methods"]: resource_base.add_method( http_method=method, integration=api_gateway.LambdaIntegration( handler=resource_base_handler), authorizer=self._gateway_authorizer, ) # resource_base.add_cors_preflight(allow_methods=resource_tree["methods"], allow_origins=["*"]) resource_base_child_definition = resource_tree.get("child") if resource_base_child_definition is not None: resource_base_child = resource_base.add_resource( path_part=resource_base_child_definition["resource_name"]) resource_base_child_handler = base_lambda_function( self, **resource_base_child_definition["handler"]) for method in resource_base_child_definition["methods"]: resource_base_child.add_method( http_method=method, integration=api_gateway.LambdaIntegration( handler=resource_base_child_handler), authorizer=self._gateway_authorizer, ) # resource_base_child.add_cors_preflight( # allow_methods=resource_base_child_definition["methods"], allow_origins=["*"] # ) resource_base_child_trees = resource_base_child_definition.get( "childs", list()) for resource_base_grandchild_tree in resource_base_child_trees: resource_base_grandchild = resource_base_child.add_resource( path_part=resource_base_grandchild_tree[ "resource_name"]) resource_base_grandchild_handler = base_lambda_function( self, **resource_base_grandchild_tree["handler"]) for method in resource_base_grandchild_tree["methods"]: resource_base_grandchild.add_method( http_method=method, integration=api_gateway.LambdaIntegration( handler=resource_base_grandchild_handler), authorizer=self._gateway_authorizer, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.current_dir = os.path.dirname(__file__) self.website_bucket = s3.Bucket( self, "qs-embed-bucket", bucket_name=f'quicksight-embed-{core.Aws.ACCOUNT_ID}', website_index_document="index.html", public_read_access=True) self.quicksight_embed_lambda_role = iam.Role( self, 'quicksight-embed-lambda-role', description='Role for the Quicksight dashboard embed Lambdas', role_name='quicksight-embed-lambda-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents' ], resources=[ f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:*' ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["sts:AssumeRole", "iam:ListRoles"], resources=[ "arn:aws:iam::*:role/quicksight-migration-*-assume-role" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["secrets:GetSecretValue"], resources=[ f"arn:aws:secretsmanager:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:secret:*" ]), iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ "quicksight:*", ], resources=["*"]) ]) }) self.quicksight_migration_lambda = _lambda.Function( self, 'quicksight-migration-lambda', handler='quicksight_embed.lambda_handler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( os.path.join(self.current_dir, '../lambda/quicksight_embed/')), function_name='quicksight_embed_lambda', role=self.quicksight_embed_lambda_role, timeout=core.Duration.minutes(3), memory_size=512, environment={ 'DASHBOARD_ID': '938b365e-c001-4723-9a27-029654da7531', 'QUICKSIGHT_USER_ARN': f'arn:aws:quicksight:us-east-1:{core.Aws.ACCOUNT_ID}:user/default/quicksight-migration-user' }) self.apigw_lambda = ApiGatewayToLambda( self, "ApiGatewayToLambdaQSEmbed", existing_lambda_obj=self.quicksight_migration_lambda, api_gateway_props=apigw.LambdaRestApiProps( rest_api_name="quicksight-embed", handler=self.quicksight_migration_lambda, deploy=True, proxy=False, default_method_options=apigw.MethodOptions( authorization_type=apigw.AuthorizationType.NONE), default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=apigw.Cors.ALL_METHODS, allow_headers=[ 'Access-Control-Allow-Origin', 'Access-Control-Allow-Headers', 'Content-Type' ]), policy=iam.PolicyDocument(statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=['execute-api:Invoke'], resources=["execute-api:/prod/*"], principals=[iam.ArnPrincipal("*")]) ]))) self.embedurl = self.apigw_lambda.api_gateway.root.add_resource( "embedurl") self.embedurl.add_method("GET")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) my_api = apigw.RestApi( self, "SageMakerAPI", default_cors_preflight_options=apigw.CorsOptions( allow_origins=["*"], allow_headers=["*"], allow_methods=["*"])) endpoint_name = self.node.try_get_context("endpoint_name") options = apigw.IntegrationOptions( credentials_role=iam.Role( self, "SMInvoke", assumed_by=iam.ServicePrincipal("apigateway"), inline_policies={ "FullSageMaker": iam.PolicyDocument(statements=[ iam.PolicyStatement(actions=["*"], resources=["*"]) ]) }), integration_responses=[ apigw.IntegrationResponse( status_code="200", response_templates={"application/json": ""}, response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'*'", "method.response.header.Access-Control-Allow-Methods": "'*'", "method.response.header.Access-Control-Allow-Origin": "'*'" }) ]) integration = apigw.Integration( type=apigw.IntegrationType.AWS, uri= "arn:aws:apigateway:us-east-1:runtime.sagemaker:path/endpoints/{}/invocations" .format(self.node.try_get_context("endpoint_name")), integration_http_method="POST", options=options) apigw.Method( self, "PostRoot", http_method="POST", resource=my_api.root, integration=integration, options=apigw.MethodOptions(method_responses=[ apigw.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Origin": True }) ]))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.current_dir = os.path.dirname(__file__) self.bucket = s3.Bucket( self, "qs-migration-bucket", bucket_name=f'quicksight-migration-{core.Aws.ACCOUNT_ID}', block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) self.quicksight_migration_lambda_role = iam.Role( self, 'quicksight-migration-lambda-role', description='Role for the Quicksight dashboard migration Lambdas', role_name='quicksight-migration-lambda-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents' ], resources=[ f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:*' ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["sts:AssumeRole", "iam:ListRoles"], resources=[ "arn:aws:iam::*:role/quicksight-migration-*-assume-role" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:PutObject", "s3:ListBucket"], resources=[ self.bucket.bucket_arn, f"{self.bucket.bucket_arn}/*" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["secrets:GetSecretValue"], resources=[ f"arn:aws:secretsmanager:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:secret:*" ]), iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ "quicksight:*", ], resources=["*"]) ]) }) # API Gateway to SQS self.apigw_sqs = ApiGatewayToSqs( self, "ApiGatewayToSQSqsMigration", allow_create_operation=True, allow_read_operation=False, allow_delete_operation=False, api_gateway_props=apigw.RestApiProps( rest_api_name="quicksight-migration-sqs", deploy=True, default_method_options=apigw.MethodOptions( authorization_type=apigw.AuthorizationType.NONE), default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=apigw.Cors.ALL_METHODS, allow_headers=[ 'Access-Control-Allow-Origin', 'Access-Control-Allow-Headers', 'Content-Type' ]), policy=iam.PolicyDocument(statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=['execute-api:Invoke'], resources=["execute-api:/prod/*"], principals=[iam.ArnPrincipal("*")]) ])), queue_props=sqs.QueueProps( queue_name="quicksight-migration-sqs", visibility_timeout=core.Duration.minutes(15))) self.quicksight_migration_lambda = _lambda.Function( self, 'quicksight-migration-lambda', handler='quicksight_migration.lambda_function.lambda_handler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( os.path.join(self.current_dir, '../lambda/quicksight_migration/')), function_name='quicksight_migration_lambda', role=self.quicksight_migration_lambda_role, timeout=core.Duration.minutes(15), memory_size=1024, environment={ 'BUCKET_NAME': self.bucket.bucket_name, 'S3_KEY': 'None', 'INFRA_CONFIG_PARAM': '/infra/config', 'SQS_URL': self.apigw_sqs.sqs_queue.queue_url }) self.quicksight_migration_lambda.add_event_source( event_sources.SqsEventSource( enabled=True, queue=self.apigw_sqs.sqs_queue, ))
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) core.Tags.of(self).add("Project", "Blog") core_layer = lambda_.LayerVersion( self, "blog-core-layer", code=lambda_.Code.from_asset("lambda/core_layer"), compatible_runtimes=[lambda_.Runtime.PYTHON_3_8] ) proxy_fn = lambda_.Function( self, "blog-proxy-fn", runtime=lambda_.Runtime.PYTHON_3_8, handler="lambda_function.handler", code=lambda_.Code.from_asset("lambda/proxy"), layers=[core_layer] ) proxy_fn_integration = apigw.LambdaIntegration(proxy_fn) domain_name = apigw.DomainName( self, "blog-api-domain-name", domain_name="api.juliuskrahn.com", certificate=cm.Certificate.from_certificate_arn( self, "blog-domain-name-certificate", "arn:aws:acm:us-east-1:473883619336:certificate/1ad12871-4b46-44ef-a24d-7af5ac43972b" ), endpoint_type=apigw.EndpointType.EDGE ) api = apigw.RestApi( self, "blog-api", default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=apigw.Cors.ALL_METHODS ), default_integration=proxy_fn_integration ) domain_name.add_base_path_mapping(api) route53.ARecord( self, "blog-api-a-record", record_name="api", target=route53.RecordTarget.from_alias(route53_targets.ApiGatewayDomain(domain_name)), zone=route53.HostedZone.from_lookup(self, "blog-hosted-zone", domain_name="juliuskrahn.com") ) api.root.add_proxy() article_table = dynamodb.Table( self, "blog-article-table", table_name="blog-article", partition_key=dynamodb.Attribute(name="urlTitle", type=dynamodb.AttributeType.STRING), billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, point_in_time_recovery=True ) article_table.add_global_secondary_index( index_name="tagIndex", partition_key=dynamodb.Attribute(name="tag", type=dynamodb.AttributeType.STRING), sort_key=dynamodb.Attribute(name="published", type=dynamodb.AttributeType.STRING), projection_type=dynamodb.ProjectionType.INCLUDE, non_key_attributes=["urlTitle", "title", "description"] ) article_table.grant_read_write_data(proxy_fn) comment_table = dynamodb.Table( self, "blog-comment-table", table_name="blog-comment", partition_key=dynamodb.Attribute(name="articleUrlTitle", type=dynamodb.AttributeType.STRING), sort_key=dynamodb.Attribute(name="id", type=dynamodb.AttributeType.STRING), billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST ) comment_table.grant_read_write_data(proxy_fn) admin_key_secret = sm.Secret( self, "blog-admin-key", secret_name="blog-admin-key" ) admin_key_secret.grant_read(proxy_fn)
def __init__( self, scope: core.Construct, id: str, create_dependency_layer: Callable[[], None], **kwargs ) -> None: super().__init__(scope, id, **kwargs) # create dependency layer zip for lambda function create_dependency_layer() api_secret = secretsmanager.Secret( self, "ActualApiSecret", description="Secrets required to communicate with Aarogya Setu OpenAPI", ) # create cognito user pool for authentication user_pool = cognito.UserPool( self, "AppUserPool", self_sign_up_enabled=True, account_recovery=cognito.AccountRecovery.PHONE_AND_EMAIL, user_verification=cognito.VerificationEmailStyle.CODE, auto_verify={"email": True}, standard_attributes={"email": {"required": True, "mutable": True}}, ) user_pool_client = cognito.UserPoolClient( self, "UserPoolClient", user_pool=user_pool ) # Create storage and queue bulk_request_queue = sqs.Queue( self, "BulkRequestQueue", ) user_status_table = ddb.Table( self, "UserStatusTable", partition_key={"name": "mobile_number", "type": ddb.AttributeType.STRING}, time_to_live_attribute="expdate", ) self._user_status_table = user_status_table requests_table = ddb.Table( self, "RequestsTable", partition_key={"name": "mobile_number", "type": ddb.AttributeType.STRING}, time_to_live_attribute="expdate", ) # Create layer for lambda run time dependencies dependency_layer = _lambda.LayerVersion( self, "PythonDependencies", code=_lambda.Code.from_asset(path.join("lambda", "dependency-layer.zip")), compatible_runtimes=[_lambda.Runtime.PYTHON_3_7], description="The layer contains requests and pyjwt dependencies", ) # Create Lambda functions single_request = _lambda.Function( self, "SingleRequesetHandler", runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.Code.asset("lambda"), handler="single_request.handler", timeout=core.Duration.seconds(10), layers=[dependency_layer], environment={ "USER_STATUS_TABLE": user_status_table.table_name, "REQUESTS_TABLE": requests_table.table_name, "API_SECRET_ARN": api_secret.secret_full_arn, }, ) # give lambda access permissions to ddb tables and secrets user_status_table.grant_read_write_data(single_request) requests_table.grant_read_write_data(single_request) api_secret.grant_read(single_request) bulk_request = _lambda.Function( self, "BulkRequestHandler", runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.Code.asset("lambda"), handler="bulk_request.handler", timeout=core.Duration.seconds(30), environment={ "QUEUE_URL": bulk_request_queue.queue_url, }, ) # give lambda access to write to queue bulk_request_queue.grant_send_messages(bulk_request) queue_receiver = _lambda.Function( self, "QueueReceiverHandler", runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.Code.asset("lambda"), handler="queue_receiver.handler", timeout=core.Duration.seconds(10), layers=[dependency_layer], environment={ "USER_STATUS_TABLE": user_status_table.table_name, "REQUESTS_TABLE": requests_table.table_name, "QUEUE_URL": bulk_request_queue.queue_url, "API_SECRET_ARN": api_secret.secret_full_arn, }, ) # lambda gets triggered by sqs queue and writes to both tables queue_receiver.add_event_source( events.SqsEventSource(bulk_request_queue, batch_size=1) ) # give queue receiver access to tables, queue and secrets bulk_request_queue.grant_consume_messages(queue_receiver) user_status_table.grant_read_write_data(queue_receiver) requests_table.grant_read_write_data(queue_receiver) api_secret.grant_read(queue_receiver) scan_table = _lambda.Function( self, "ScanTableHandler", runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.Code.asset("lambda"), handler="scan_table.handler", timeout=core.Duration.seconds(30), environment={ "USER_STATUS_TABLE": user_status_table.table_name, }, ) user_status_table.grant_read_data(scan_table) # create api endpoints with authorization api = apigw.RestApi( self, "ASetuApiGateway", default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS ), ) auth = apigw.CfnAuthorizer( self, "ApiCognitoAuthorizer", name="CognitoAuthorizer", type="COGNITO_USER_POOLS", authorizer_result_ttl_in_seconds=300, identity_source="method.request.header.Authorization", rest_api_id=api.rest_api_id, provider_arns=[user_pool.user_pool_arn], ) single_request_integration = apigw.LambdaIntegration(single_request, proxy=True) single_request_resource = api.root.add_resource("status") single_method = single_request_resource.add_method( "POST", single_request_integration, api_key_required=False, authorizer=auth, authorization_type=apigw.AuthorizationType.COGNITO, ) bulk_request_integration = apigw.LambdaIntegration(bulk_request, proxy=True) bulk_request_resource = api.root.add_resource("bulk_status") bulk_method = bulk_request_resource.add_method( "POST", bulk_request_integration, api_key_required=False, authorizer=auth, authorization_type=apigw.AuthorizationType.COGNITO, ) scan_table_integration = apigw.LambdaIntegration(scan_table, proxy=True) scan_table_resource = api.root.add_resource("scan") scan_method = scan_table_resource.add_method( "GET", scan_table_integration, api_key_required=False, authorizer=auth, authorization_type=apigw.AuthorizationType.COGNITO, ) # Override authorizer to use COGNITO to authorize apis # Solution from: https://github.com/aws/aws-cdk/issues/9023#issuecomment-658309644 methods = [single_method, bulk_method, scan_method] for method in methods: method.node.find_child("Resource").add_property_override( "AuthorizationType", "COGNITO_USER_POOLS" ) method.node.find_child("Resource").add_property_override( "AuthorizerId", {"Ref": auth.logical_id} ) # Export output values for frontend application core.CfnOutput( self, "user-pool-id", value=user_pool.user_pool_id, export_name="USER-POOL-ID", ) core.CfnOutput( self, "user-pool-web-client", value=user_pool_client.user_pool_client_id, export_name="WEB-CLIENT-ID", ) core.CfnOutput( self, "api-endpoint-url", value=api.url, export_name="API-ENDPOINT-URL" ) core.CfnOutput( self, "deployment-region", value=self.region, export_name="REGION", ) core.CfnOutput( self, "stack-name", value=self.stack_name, export_name="STACK-NAME" ) core.CfnOutput( self, "api-secret-arn", value=api_secret.secret_full_arn, export_name="API-SECRET-ARN", )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.current_dir = os.path.dirname(__file__) self.bucket = s3.Bucket( self, "qs-migration-bucket", bucket_name=f'quicksight-migration-{core.Aws.ACCOUNT_ID}', block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) self.quicksight_migration_lambda_role = iam.Role( self, 'quicksight-migration-lambda-role', description='Role for the Quicksight dashboard migration Lambdas', role_name='quicksight-migration-lambda-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents' ], resources=[ f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:*' ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["sts:AssumeRole", "iam:ListRoles"], resources=[ "arn:aws:iam::*:role/quicksight-migration-*-assume-role" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:PutObject", "s3:ListBucket"], resources=[ self.bucket.bucket_arn, f"{self.bucket.bucket_arn}/*" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["secrets:GetSecretValue"], resources=[ f"arn:aws:secretsmanager:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:secret:*" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:Create*", "quicksight:Delete*", "quicksight:Describe*", "quicksight:List*", "quicksight:Search*", "quicksight:Update*" ], resources=["*"]) ]) }) self.quicksight_migration_target_assume_role = iam.Role( self, 'quicksight-migration-target-assume-role', description= 'Role for the Quicksight dashboard migration Lambdas to assume', role_name='quicksight-migration-target-assume-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:Create*", "quicksight:Delete*", "quicksight:Describe*", "quicksight:List*", "quicksight:Search*", "quicksight:Update*" ], resources=["*"]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:GetParameter", ], resources=["arn:aws:ssm:*:*:parameter/infra/config"]) ]) }) self.quicksight_migration_target_assume_role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['sts:AssumeRole'], principals=[iam.AccountPrincipal(core.Aws.ACCOUNT_ID)])) # API Gateway to SQS self.rest_api_role = iam.Role( self, "RestAPIRole", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess") ]) self.queue = sqs.Queue(self, "quicksight-migration-sqs-queue", queue_name="quicksight-migration-sqs", visibility_timeout=core.Duration.minutes(15)) self.integration_response = apigw.IntegrationResponse( status_code="200", response_templates={"application/json": ""}, response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'", "method.response.header.Access-Control-Allow-Origin": "'*'", "method.response.header.Access-Control-Allow-Methods": "'POST,OPTIONS'" }) self.api_integration_options = apigw.IntegrationOptions( credentials_role=self.rest_api_role, integration_responses=[self.integration_response], request_templates={ "application/json": 'Action=SendMessage&MessageBody=$util.urlEncode("$input.body")' }, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'" }) self.api_resource_sqs_integration = apigw.AwsIntegration( service="sqs", integration_http_method="POST", path="{}/{}".format(core.Aws.ACCOUNT_ID, self.queue.queue_name), options=self.api_integration_options) self.base_api = apigw.RestApi( self, 'quicksight-migration-sqs', rest_api_name='quicksight-migration-sqs', default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=["POST", "OPTIONS"], allow_headers=[ 'Access-Control-Allow-Origin', 'Access-Control-Allow-Headers', 'Content-Type' ])) self.base_api.root.add_method( "POST", self.api_resource_sqs_integration, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Headers': True, 'method.response.header.Access-Control-Allow-Methods': True, 'method.response.header.Access-Control-Allow-Origin': True } }]) self.quicksight_migration_lambda = _lambda.Function( self, 'quicksight-migration-lambda', handler='quicksight_migration.lambda_function.lambda_handler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( os.path.join(self.current_dir, '../lambda/quicksight_migration/')), function_name='quicksight_migration_lambda', role=self.quicksight_migration_lambda_role, timeout=core.Duration.minutes(15), memory_size=1024, environment={ 'BUCKET_NAME': self.bucket.bucket_name, 'S3_KEY': 'None', 'INFRA_CONFIG_PARAM': '/infra/config', 'SQS_URL': self.queue.queue_url }) self.sqs_event_source = event_sources.SqsEventSource(self.queue) self.quicksight_migration_lambda.add_event_source( self.sqs_event_source) core.CfnOutput(self, "MigrationAPIGatewayURL", value=self.base_api.url, description="Migration API GW URL")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # DDB table to store the Long and Short URLs with Short URL as the partition key url_mapping_table = ddb.Table( self, "url_shortener_mapping_table", partition_key=ddb.Attribute(name="short_url", type=ddb.AttributeType.STRING), read_capacity=10, write_capacity=10, removal_policy=core.RemovalPolicy.DESTROY, ) # AutoScaling of RCUs with a Target Utilization of 70% url_mapping_table.auto_scale_read_capacity( min_capacity=10, max_capacity=40000).scale_on_utilization( target_utilization_percent=70) # AutoScaling of WCUs with a Target Utilization of 70% url_mapping_table.auto_scale_write_capacity( min_capacity=10, max_capacity=40000).scale_on_utilization( target_utilization_percent=70) # DDB table to keep track of an Atomic Counter used for generating Short URLs url_counter_table = ddb.Table( self, "url_shortener_counter_table", partition_key=ddb.Attribute(name="id", type=ddb.AttributeType.STRING), read_capacity=10, write_capacity=10, removal_policy=core.RemovalPolicy.DESTROY, ) # AutoScaling of RCUs with a Target Utilization of 70% url_counter_table.auto_scale_read_capacity( min_capacity=10, max_capacity=40000).scale_on_utilization( target_utilization_percent=70) # AutoScaling of WCUs with a Target Utilization of 70% url_counter_table.auto_scale_write_capacity( min_capacity=10, max_capacity=40000).scale_on_utilization( target_utilization_percent=70) # Lambda function with custom code to handle shortening/unshortening logic url_lambda = _lambda.Function( self, "url_shortener_lambda", code=_lambda.Code.asset("lambda_proxy"), handler="lambda_function.lambda_handler", runtime=_lambda.Runtime.PYTHON_3_8, timeout=core.Duration.seconds(10), environment={ "BACKOFF": "25", "HASH_DIGEST_SIZE": "8", "MAX_RETRIES": "3", "URL_SHORTENER_MAPPING_TABLE": url_mapping_table.table_name, "URL_SHORTENER_COUNTER_TABLE": url_counter_table.table_name, }, log_retention=logs.RetentionDays.ONE_MONTH, ) # A Custom IAM Policy statement to grant DDB access to the Lambda function ddb_policy_statement = iam.PolicyStatement( actions=[ "dynamodb:PutItem", "dynamodb:GetItem", "dynamodb:UpdateItem" ], effect=iam.Effect.ALLOW, resources=[ url_mapping_table.table_arn, url_counter_table.table_arn ], ) # Attaching DDB Policy statement with the Lambda IAM Role url_lambda.add_to_role_policy(ddb_policy_statement) # Including X-Requested-With to the default CORS headers list headers = apigw.Cors.DEFAULT_HEADERS headers.append('X-Requested-With') # API Gateway endpoint to serve Shorten/Unshorten APIs url_rest_api = apigw.RestApi( self, "url_shortener_API", default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_headers=headers, allow_methods=["POST", "GET", "OPTIONS"], status_code=200, ), ) # Shorten API using POST and Lambda proxy url_rest_api.root.add_resource(path_part="shorten", ).add_method( http_method="POST", request_models={ "application/json": apigw.Model.EMPTY_MODEL, }, integration=apigw.LambdaIntegration( handler=url_lambda, proxy=True, allow_test_invoke=True, ), ) # Unshorten API using GET and Lambda proxy url_rest_api.root.add_resource(path_part="unshorten", ).add_resource( path_part="{shorturl}").add_method( http_method="GET", request_models={ "application/json": apigw.Model.EMPTY_MODEL, }, integration=apigw.LambdaIntegration( handler=url_lambda, proxy=True, allow_test_invoke=True, ), ) # S3 bucket to host the URL Shortener Static Website s3_web_hosting = s3.Bucket( self, "url_shortener_web_hosting_bucket", website_index_document="index.html", ) # Uploading HTML and ICO files from local directory to S3 Static Website bucket s3_deploy = s3deploy.BucketDeployment( self, "website_source_files", sources=[s3deploy.Source.asset(path="website", )], destination_bucket=s3_web_hosting, ) # Lambda function to integrate the API GW Shorten endpoint with the HTML file stored in S3 cr_provider = _lambda.Function( self, "cr_provider", code=_lambda.Code.asset("custom_resource"), handler="lambda_function.lambda_handler", runtime=_lambda.Runtime.PYTHON_3_8, timeout=core.Duration.minutes(1), ) # A Custom IAM Policy statement to grant S3 access to the Lambda function lambda_cr_statement = iam.PolicyStatement( actions=["s3:List*", "s3:Get*", "s3:Put*"], effect=iam.Effect.ALLOW, resources=[ s3_web_hosting.bucket_arn, s3_web_hosting.bucket_arn + "/*" ]) cr_provider.add_to_role_policy(lambda_cr_statement) # CFN Custom Resource backed by Lambda lambda_cr = core.CustomResource( self, "lambda_cr", service_token=cr_provider.function_arn, properties={ "S3_BUCKET": s3_web_hosting.bucket_name, "S3_KEY": "index.html", "POST_URL": url_rest_api.url + "shorten", }, removal_policy=core.RemovalPolicy.DESTROY, ) # Adding dependency so that Custom Resource creation happens after files are uploaded to S3 lambda_cr.node.add_dependency(s3_deploy) # CloudFront Distribution with S3 and APIGateway origins url_cf_distribution = cf.CloudFrontWebDistribution( self, "url_shortener_distribution", origin_configs=[ cf.SourceConfiguration(s3_origin_source=cf.S3OriginConfig( s3_bucket_source=s3_web_hosting, origin_access_identity=cf.OriginAccessIdentity( self, id="OAI", comment= "OAI that allows CloudFront to access the S3 bucket"), ), behaviors=[ cf.Behavior( is_default_behavior=False, path_pattern="/index.html", ), cf.Behavior( is_default_behavior=False, path_pattern="/favicon.ico", ), ]), cf.SourceConfiguration( custom_origin_source=cf.CustomOriginConfig( domain_name=url_rest_api.url.lstrip("https://").split( "/")[0], ), origin_path="/" + url_rest_api.deployment_stage.stage_name + "/unshorten", behaviors=[ cf.Behavior( is_default_behavior=True, allowed_methods=cf.CloudFrontAllowedMethods. GET_HEAD_OPTIONS, ) ]) ], price_class=cf.PriceClass.PRICE_CLASS_ALL, default_root_object="index.html", ) # Adding the CloudFront Distribution endpoint to CFN Output core.CfnOutput( self, "URLShortenerWebsite", value=url_cf_distribution.domain_name, )