def get_integration_response(): integration_response = aws_apigateway.IntegrationResponse( status_code="200", response_parameters={ 'method.response.header.Access-Control-Allow-Origin': "'*'" }) return integration_response
def rest_api(self, event_streams, event_replayer): rest_api = _api_gtw.RestApi(self, "{}RestApi".format(self.stack_id)) rest_api.add_usage_plan("RestApiUsagePlan", api_key=_api_gtw.ApiKey(self, "TestApiKey"), api_stages=[ _api_gtw.UsagePlanPerApiStage( api=rest_api, stage=rest_api.deployment_stage) ]) api_role = _iam.Role( self, "RestApiRole", assumed_by=_iam.ServicePrincipal('apigateway.amazonaws.com')) api_role.add_to_policy( _iam.PolicyStatement( actions=['firehose:PutRecord'], resources=[stream.attr_arn for stream in event_streams])) for stream in event_streams: stream_resource = rest_api.root.add_resource( path_part=stream.delivery_stream_name.lower(), ) stream_resource.add_method( 'POST', api_key_required=True, integration=_api_gtw.Integration( type=_api_gtw.IntegrationType.AWS, uri= "arn:aws:apigateway:eu-west-1:firehose:action/PutRecord", integration_http_method='POST', options=_api_gtw.IntegrationOptions( credentials_role=api_role, passthrough_behavior=_api_gtw.PassthroughBehavior. NEVER, request_parameters={ 'integration.request.header.Content-Type': "'application/x-amz-json-1.1'" }, request_templates={ 'application/json': json.dumps({ "DeliveryStreamName": stream.delivery_stream_name, "Record": { "Data": "$util.base64Encode($input.body)" } }) }, integration_responses=[ _api_gtw.IntegrationResponse(status_code="200") ])), method_responses=[_api_gtw.MethodResponse(status_code="200")]) replay = stream_resource.add_resource(path_part='replay') replay.add_method( http_method='POST', integration=_api_gtw.LambdaIntegration(event_replayer), method_responses=[ _api_gtw.MethodResponse(status_code="202"), _api_gtw.MethodResponse(status_code="400") ])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) api_log_group = cw_logs.LogGroup(self, "HelloWorldAPILogs") # Create the api gateway for this lambda set self.target_api = api_gw.RestApi( self, 'HelloWorldAPI', rest_api_name='HelloWorld', endpoint_types=[api_gw.EndpointType.REGIONAL], deploy_options=api_gw.StageOptions( access_log_destination=api_gw.LogGroupLogDestination( api_log_group), access_log_format=api_gw.AccessLogFormat.clf(), method_options={ # This special path applies to all resource paths and all HTTP methods "/*/*": api_gw.MethodDeploymentOptions(throttling_rate_limit=100, throttling_burst_limit=200) })) hello_world = _lambda.Function( self, "HelloWorld", runtime=_lambda.Runtime.PYTHON_3_8, handler='helloworld.lambda_handler', code=_lambda.Code.from_asset("lambda_fns"), timeout=core.Duration.seconds(60)) entity = self.target_api.root.add_resource('helloworld') this_lambda_integration = api_gw.LambdaIntegration( hello_world, proxy=False, integration_responses=[ api_gw.IntegrationResponse( status_code='200', response_parameters={ 'method.response.header.Access-Control-Allow-Origin': "'*'" }) ]) entity.add_method( 'GET', this_lambda_integration, method_responses=[ api_gw.MethodResponse( status_code='200', response_parameters={ 'method.response.header.Access-Control-Allow-Origin': True }) ]) self.resource_arn = f"arn:aws:apigateway:{core.Stack.of(self).region}::/restapis/{self.target_api.rest_api_id}/stages/{self.target_api.deployment_stage.stage_name}"
def get_options_integration_response(): integration_response = aws_apigateway.IntegrationResponse( status_code="200", response_parameters={ 'method.response.header.Access-Control-Allow-Headers': "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'", 'method.response.header.Access-Control-Allow-Origin': "'*'", 'method.response.header.Access-Control-Allow-Methods': "'GET,OPTIONS'" }) return integration_response
def create_and_integrate_apigw(self, queue: sqs.Queue, dashboard_name_prefix: str) -> str: """Creates API Gateway and integrates with SQS queue :param queue: the SQS queue to integrate with :type queue: aws_cdk.aws_sqs.Queue :param dashboard_name_prefix: the dashboard name to use as the API Gateway resource name :type dashboard_name_prefix: str :returns: the url that the webhooks will post to :rtype: str """ webhook_apigw_role = iam.Role( self, 'WebhookAPIRole', role_name='WebhookAPIRole', assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com')) webhook_apigw_role.add_to_policy( iam.PolicyStatement(resources=['*'], actions=['sqs:SendMessage'])) webhook_apigw = apigw.RestApi( self, 'RepositoryStatusMonitorAPI', rest_api_name='RepositoryStatusMonitorAPI') webhook_apigw_resource = webhook_apigw.root.add_resource( dashboard_name_prefix) apigw_integration_response = apigw.IntegrationResponse( status_code='200', response_templates={'application/json': ""}) apigw_integration_options = apigw.IntegrationOptions( credentials_role=webhook_apigw_role, integration_responses=[apigw_integration_response], request_templates={ 'application/json': 'Action=SendMessage&MessageBody=$input.body' }, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ 'integration.request.header.Content-Type': "'application/x-www-form-urlencoded'" }) webhook_apigw_resource_sqs_integration = apigw.AwsIntegration( service='sqs', integration_http_method='POST', path='{}/{}'.format(core.Aws.ACCOUNT_ID, queue.queue_name), options=apigw_integration_options) webhook_apigw_resource.add_method( 'POST', webhook_apigw_resource_sqs_integration, method_responses=[apigw.MethodResponse(status_code='200')]) path = '/' + dashboard_name_prefix return webhook_apigw.url_for_path(path)
def add_get_method( self, api: aws_apigateway.RestApi, resource: aws_apigateway.Resource, table: aws_dynamodb.Table, ) -> aws_apigateway.Method: list_announcements_lambda = create_function( stack=self, id="ListAnnouncementLambda", settings={ "handler": "list_announcements.main", "runtime": aws_lambda.Runtime.PYTHON_3_8, "timeout": core.Duration.minutes( self.settings.AWS_LAMBDA_GET_ANNOUNCEMENT_TIMEOUT), "retry_attempts": self.settings.AWS_LAMBDA_GET_ANNOUNCEMENT_RETRY_ATTEMPTS, }, ) table.grant_read_data(list_announcements_lambda) list_announcements_lambda.add_environment( "TABLE_NAME", table.table_name, ) list_announcements_method = resource.add_method( "GET", integration=aws_apigateway.LambdaIntegration( list_announcements_lambda, proxy=True, integration_responses=[ aws_apigateway.IntegrationResponse(status_code="200", ), aws_apigateway.IntegrationResponse(status_code="404", ), ], ), ) self.methods_to_deploy.append(list_announcements_method) return list_announcements_method
def __init__(self, scope: core.Construct, construct_id: str, apigw_role: _iam.Role, eventBus: _events.EventBus, **kwargs): super().__init__(scope, construct_id, **kwargs) integrationOptions = \ _apigw.IntegrationOptions( credentials_role=apigw_role, request_parameters={ "integration.request.header.X-Amz-Target": "'AWSEvents.PutEvents'", "integration.request.header.Content-Type": "'application/x-amz-json-1.1'", }, request_templates={ "application/json":'#set($language=$input.params(\'language\'))\n{"Entries": [{"Source": "com.amazon.alexa.$language", "Detail": ' + \ '"$util.escapeJavaScript($input.body)",' + \ ' "Resources": ["resource1", "resource2"], "DetailType": "myDetailType", "EventBusName": "' + eventBus.event_bus_name + '"}]}' }, integration_responses=[_apigw.IntegrationResponse( status_code="200", response_templates={"application/json": ""}, )] ) # Integration API Gateway with EventBridge integrationEventBridge = _apigw.Integration( type=_apigw.IntegrationType("AWS"), integration_http_method="POST", options=integrationOptions, uri= f"arn:aws:apigateway:{os.environ['CDK_DEFAULT_REGION']}:events:path//" ) myApi = _apigw.RestApi(self, construct_id) # myApi.root.add_method("POST", # integrationEventBridge, # method_responses=[ # _apigw.MethodResponse( # status_code="200" # ) # ] # ) languageResource = myApi.root.add_resource("{language}") languageResource.add_method( "POST", integrationEventBridge, method_responses=[_apigw.MethodResponse(status_code="200")], request_models={"application/json": self.getModel(myApi)}, request_validator=_apigw.RequestValidator( self, "myValidator", rest_api=myApi, validate_request_body=True))
def _create_lambda_integration(self, lambda_fn_alias, _passthrough_behavior, _response_template, _request_template, _status_code): return _api_gw.LambdaIntegration( lambda_fn_alias, proxy=False, passthrough_behavior=_passthrough_behavior, integration_responses=[ _api_gw.IntegrationResponse(status_code=_status_code, response_templates={ "application/json": _response_template }) ], request_templates={"application/json": _request_template})
def _retrieve_lambda_integration(self, lambda_fn_alias, _passthrough_behavior, _response_template, _request_template, _status_code): return _api_gw.LambdaIntegration( lambda_fn_alias, proxy=False, passthrough_behavior=_passthrough_behavior, integration_responses=[ _api_gw.IntegrationResponse( status_code=_status_code, response_parameters={ 'method.response.header.Location': "integration.response.body.location" }) ], request_templates={"application/json": _request_template}, request_parameters={ 'integration.request.path.proxy': "method.request.path.proxy" })
def __init__(self, app: core.App, cfn_name: str, stack_env): super().__init__(scope=app, id=f"{cfn_name}-{stack_env}") # lambda lambda_function = lambda_.Function( scope=self, id=f"{cfn_name}-lambda-task", code=lambda_.AssetCode.from_asset("lambda_script"), handler="lambda_handler.lambda_task", timeout=core.Duration.seconds(10), runtime=self.LAMBDA_PYTHON_RUNTIME, memory_size=128) # resource policy whitelisted_ips = ["127.0.0./32"] api_resource_policy = iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["execute-api:Invoke"], principals=[iam.AnyPrincipal()], resources=["execute-api:/*/*/*"], conditions={"IpAddress": { "aws:SourceIp": whitelisted_ips }}) ]) # api_gateway base_api = apigw_.RestApi( scope=self, id=f"{cfn_name}-{stack_env}-apigw", rest_api_name=f"{cfn_name}-{stack_env}-apigw", deploy_options=apigw_.StageOptions(stage_name=stack_env), policy=api_resource_policy) api_entity = base_api.root.add_resource("task") api_entity_lambda = apigw_.LambdaIntegration( handler=lambda_function, integration_responses=[ apigw_.IntegrationResponse(status_code="200") ]) api_entity.add_method(http_method="POST", integration=api_entity_lambda)
def __init__(self, scope: core.App, id_: str, **kwargs) -> None: super().__init__(scope, id_, **kwargs) base_lambda = lambda_.Function( self, 'ApiCorsLambda', handler='lambda_handler.handler', runtime=lambda_.Runtime.PYTHON_3_8, code=lambda_.Code.asset('lambda_script'), ) base_api = apigw_.RestApi(scope=self, id='ApiGatewayWithCors', rest_api_name='ApiGatewayWithCors') example_entity = base_api.root.add_resource('example') example_entity_lambda_integration = apigw_.LambdaIntegration( handler=base_lambda, proxy=False, integration_responses=[ apigw_.IntegrationResponse( status_code="200", response_parameters={ 'method.response.header.Access-Control-Allow-Origin': "'*'" }) ]) example_entity.add_method( http_method='GET', integration=example_entity_lambda_integration, method_responses=[ apigw_.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Access-Control-Allow-Origin': True, }) ]) self.add_cors_options(example_entity)
def add_cors_options(apigw_resource): mock_integration = apigw_.MockIntegration(integration_responses=[ apigw_.IntegrationResponse( status_code="200", response_parameters={ 'method.response.header.Access-Control-Allow-Headers': "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'", 'method.response.header.Access-Control-Allow-Origin': "'*'", 'method.response.header.Access-Control-Allow-Methods': "'GET,OPTIONS'" }), ], passthrough_behavior=apigw_. PassthroughBehavior. WHEN_NO_MATCH, request_templates={ "application/json": "{\"statusCode\":200}" }) apigw_resource.add_method( http_method='OPTIONS', integration=mock_integration, method_responses=[ apigw_.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Access-Control-Allow-Headers': True, 'method.response.header.Access-Control-Allow-Methods': True, 'method.response.header.Access-Control-Allow-Origin': True, }) ])
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #XXX: For createing Amazon MWAA in the existing VPC, # remove comments from the below codes and # comments out vpc = aws_ec2.Vpc(..) codes, # then pass -c vpc_name=your-existing-vpc to cdk command # for example, # cdk -c vpc_name=your-existing-vpc syth # # vpc_name = self.node.try_get_context('vpc_name') # vpc = aws_ec2.Vpc.from_lookup(self, 'ExistingVPC', # is_default=True, # vpc_name=vpc_name # ) vpc = aws_ec2.Vpc(self, "ApiGatewayDynamoDBVPC", max_azs=2, gateway_endpoints={ "S3": aws_ec2.GatewayVpcEndpointOptions( service=aws_ec2.GatewayVpcEndpointAwsService.S3 ), "DynamoDB": aws_ec2.GatewayVpcEndpointOptions( service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB ) } ) DDB_TABLE_SUFFIX = ''.join(random.sample((string.ascii_lowercase + string.digits), k=7)) DDB_TABLE_NAME = "Comments-{}".format(DDB_TABLE_SUFFIX) ddb_table = aws_dynamodb.Table(self, "DynamoDbTable", table_name=DDB_TABLE_NAME, removal_policy=cdk.RemovalPolicy.DESTROY, partition_key=aws_dynamodb.Attribute(name="commentId", type=aws_dynamodb.AttributeType.STRING), time_to_live_attribute="ttl", billing_mode=aws_dynamodb.BillingMode.PROVISIONED, read_capacity=15, write_capacity=5, ) ddb_table.add_global_secondary_index( read_capacity=15, write_capacity=5, index_name="pageId-index", partition_key=aws_dynamodb.Attribute(name='pageId', type=aws_dynamodb.AttributeType.STRING), projection_type=aws_dynamodb.ProjectionType.ALL ) user_pool = aws_cognito.UserPool(self, 'UserPool', user_pool_name='UserPoolForApiGateway', removal_policy=cdk.RemovalPolicy.DESTROY, self_sign_up_enabled=True, sign_in_aliases={'email': True}, auto_verify={'email': True}, password_policy={ 'min_length': 8, 'require_lowercase': False, 'require_digits': False, 'require_uppercase': False, 'require_symbols': False, }, account_recovery=aws_cognito.AccountRecovery.EMAIL_ONLY ) user_pool_client = aws_cognito.UserPoolClient(self, 'UserPoolClient', user_pool=user_pool, auth_flows={ 'admin_user_password': True, 'user_password': True, 'custom': True, 'user_srp': True }, supported_identity_providers=[aws_cognito.UserPoolClientIdentityProvider.COGNITO] ) auth = aws_apigateway.CognitoUserPoolsAuthorizer(self, 'AuthorizerForDynamoDBApi', cognito_user_pools=[user_pool] ) ddb_access_policy_doc = aws_iam.PolicyDocument() ddb_access_policy_doc.add_statements(aws_iam.PolicyStatement(**{ "effect": aws_iam.Effect.ALLOW, "resources": [ddb_table.table_arn], "actions": [ "dynamodb:DeleteItem", "dynamodb:PartiQLInsert", "dynamodb:UpdateTimeToLive", "dynamodb:BatchWriteItem", "dynamodb:PutItem", "dynamodb:PartiQLUpdate", "dynamodb:UpdateItem", "dynamodb:PartiQLDelete" ] })) apigw_dynamodb_role = aws_iam.Role(self, "ApiGatewayRoleForDynamoDB", role_name='APIGatewayRoleForDynamoDB', assumed_by=aws_iam.ServicePrincipal('apigateway.amazonaws.com'), inline_policies={ 'DynamoDBAccessPolicy': ddb_access_policy_doc }, managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonDynamoDBReadOnlyAccess'), ] ) dynamodb_api = aws_apigateway.RestApi(self, "DynamoDBProxyAPI", rest_api_name="comments-api", description="An Amazon API Gateway REST API that integrated with an Amazon DynamoDB.", endpoint_types=[aws_apigateway.EndpointType.REGIONAL], default_cors_preflight_options={ "allow_origins": aws_apigateway.Cors.ALL_ORIGINS }, deploy=True, deploy_options=aws_apigateway.StageOptions(stage_name="v1"), endpoint_export_name="DynamoDBProxyAPIEndpoint" ) all_resources = dynamodb_api.root.add_resource("comments") one_resource = all_resources.add_resource("{pageId}") apigw_error_responses = [ aws_apigateway.IntegrationResponse(status_code="400", selection_pattern="4\d{2}"), aws_apigateway.IntegrationResponse(status_code="500", selection_pattern="5\d{2}") ] apigw_ok_responses = [ aws_apigateway.IntegrationResponse( status_code="200" ) ] ddb_put_item_options = aws_apigateway.IntegrationOptions( credentials_role=apigw_dynamodb_role, integration_responses=[*apigw_ok_responses, *apigw_error_responses], request_templates={ 'application/json': json.dumps({ "TableName": DDB_TABLE_NAME, "Item": { "commentId": { "S": "$context.requestId" }, "pageId": { "S": "$input.path('$.pageId')" }, "userName": { "S": "$input.path('$.userName')" }, "message": { "S": "$input.path('$.message')" } } }, indent=2) }, passthrough_behavior=aws_apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES ) create_integration = aws_apigateway.AwsIntegration( service='dynamodb', action='PutItem', integration_http_method='POST', options=ddb_put_item_options ) method_responses = [ aws_apigateway.MethodResponse(status_code='200'), aws_apigateway.MethodResponse(status_code='400'), aws_apigateway.MethodResponse(status_code='500') ] all_resources.add_method('POST', create_integration, method_responses=method_responses, authorization_type=aws_apigateway.AuthorizationType.COGNITO, authorizer=auth ) get_response_templates = ''' #set($inputRoot = $input.path('$')) { "comments": [ #foreach($elem in $inputRoot.Items) { "commentId": "$elem.commentId.S", "userName": "******", "message": "$elem.message.S" }#if($foreach.hasNext),#end #end ] }''' ddb_query_item_options = aws_apigateway.IntegrationOptions( credentials_role=apigw_dynamodb_role, integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_templates={ 'application/json': get_response_templates } ), *apigw_error_responses ], request_templates={ 'application/json': json.dumps({ "TableName": DDB_TABLE_NAME, "IndexName": "pageId-index", "KeyConditionExpression": "pageId = :v1", "ExpressionAttributeValues": { ":v1": { "S": "$input.params('pageId')" } } }, indent=2) }, passthrough_behavior=aws_apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES ) get_integration = aws_apigateway.AwsIntegration( service='dynamodb', action='Query', integration_http_method='POST', options=ddb_query_item_options ) one_resource.add_method('GET', get_integration, method_responses=method_responses, authorization_type=aws_apigateway.AuthorizationType.COGNITO, authorizer=auth ) cdk.CfnOutput(self, 'DynamoDBTableName', value=ddb_table.table_name) cdk.CfnOutput(self, 'UserPoolId', value=user_pool.user_pool_id) cdk.CfnOutput(self, 'UserPoolClientId', value=user_pool_client.user_pool_client_id)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Note: typo of role name is copied from original workshop mysfits_notebook_role = aws_iam.Role( self, "MysfitsNotbookRole", assumed_by=aws_iam.ServicePrincipal("sagemaker.amazonaws.com"), ) mysfits_notebook_policy = aws_iam.PolicyStatement() mysfits_notebook_policy.add_actions( "sagemaker:*", "ecr:GetAuthorizationToken", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "cloudwatch:PutMetricData", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", "s3:DeleteObject", ) mysfits_notebook_policy.add_all_resources() mysfits_notebook_pass_role_policy = aws_iam.PolicyStatement() mysfits_notebook_pass_role_policy.add_actions("iam:PassRole") mysfits_notebook_pass_role_policy.add_all_resources() mysfits_notebook_pass_role_policy.add_condition( "StringEquals", {"iam:PassedToService": "sagemaker.amazonaws.com"}) aws_iam.Policy( self, "MysfitsNotebookPolicy", statements=[ mysfits_notebook_pass_role_policy, mysfits_notebook_policy ], roles=[mysfits_notebook_role], ) notebook_instance = aws_sagemaker.CfnNotebookInstance( self, "MythicalMysfits-SageMaker-Notebook", instance_type="ml.t2.medium", role_arn=mysfits_notebook_role.role_arn, ) lambda_repository = aws_codecommit.Repository( self, "RecommendationsLambdaRepository", repository_name="MythicalMysfits-RecommendationsLambdaRepository", ) core.CfnOutput( self, "recommandationsRepositoryCloneUrlHttp", value=lambda_repository.repository_clone_url_http, description="Recommendations Lambda Repository Clone Url HTTP", ) core.CfnOutput( self, "recommandationsRepositoryCloneUrlSsh", value=lambda_repository.repository_clone_url_ssh, description="Recommendations Lambda Repository Clone Url SSH", ) recommendations_lambda_function_policy_statement = aws_iam.PolicyStatement( ) recommendations_lambda_function_policy_statement.add_actions( "sagemaker:InvokeEndpoint") recommendations_lambda_function_policy_statement.add_all_resources() mysfits_recommendations = aws_lambda.Function( self, "Function", handler="recommendations.recommend", runtime=aws_lambda.Runtime.PYTHON_3_6, description="A microservice backend to a SageMaker endpoint", memory_size=128, code=aws_lambda.Code.asset( os.path.join("..", "..", "lambda-recommendations/service")), timeout=core.Duration.seconds(30), initial_policy=[recommendations_lambda_function_policy_statement], ) questions_api_role = aws_iam.Role( self, "QuestionsApiRole", assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"), ) api_policy = aws_iam.PolicyStatement() api_policy.add_actions("lambda:InvokeFunction") api_policy.add_resources(mysfits_recommendations.function_arn) aws_iam.Policy( self, "QuestionsApiPolicy", policy_name="questions_api_policy", statements=[api_policy], roles=[questions_api_role], ) questions_integration = aws_apigateway.LambdaIntegration( mysfits_recommendations, credentials_role=questions_api_role, integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_templates={ "application/json": '{"status": "OK"}' }, ) ], ) api = aws_apigateway.LambdaRestApi( self, "APIEndpoint", handler=mysfits_recommendations, rest_api_name="Recommendation API Service", proxy=False, ) recommendations_method = api.root.add_resource("recommendations") recommendations_method.add_method( "POST", questions_integration, method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Origin": True, }, ) ], authorization_type=aws_apigateway.AuthorizationType.NONE, ) recommendations_method.add_method( "OPTIONS", aws_apigateway.MockIntegration( integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'", "method.response.header.Access-Control-Allow-Origin": "'*'", "method.response.header.Access-Control-Allow-Credentials": "'false'", "method.response.header.Access-Control-Allow-Methods": "'OPTIONS,GET,PUT,POST,DELETE'", }, ) ], passthrough_behavior=aws_apigateway.PassthroughBehavior.NEVER, request_templates={"application/json": '{"statusCode": 200}'}, ), method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Credentials": True, "method.response.header.Access-Control-Allow-Origin": True, }, ) ], )
def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None: super().__init__(scope, _id, **kwargs) # Setup SSM parameter of credentials, bucket_para, ignore_list ssm_credential_para = ssm.StringParameter.from_secure_string_parameter_attributes( self, "ssm_parameter_credentials", parameter_name=ssm_parameter_credentials, version=1) ssm_bucket_para = ssm.StringParameter(self, "s3bucket_serverless", string_value=json.dumps( bucket_para, indent=4)) ssm_parameter_ignore_list = ssm.StringParameter( self, "s3_migrate_ignore_list", string_value=ignore_list) # Setup DynamoDB ddb_file_list = ddb.Table(self, "s3migrate_serverless", partition_key=ddb.Attribute( name="Key", type=ddb.AttributeType.STRING), billing_mode=ddb.BillingMode.PAY_PER_REQUEST) ddb_file_list.add_global_secondary_index( partition_key=ddb.Attribute(name="desBucket", type=ddb.AttributeType.STRING), index_name="desBucket-index", projection_type=ddb.ProjectionType.INCLUDE, non_key_attributes=["desKey", "versionId"]) # Setup SQS sqs_queue_DLQ = sqs.Queue(self, "s3migrate_serverless_Q_DLQ", visibility_timeout=core.Duration.minutes(15), retention_period=core.Duration.days(14)) sqs_queue = sqs.Queue(self, "s3migrate_serverless_Q", visibility_timeout=core.Duration.minutes(15), retention_period=core.Duration.days(14), dead_letter_queue=sqs.DeadLetterQueue( max_receive_count=60, queue=sqs_queue_DLQ)) # Setup API for Lambda to get IP address (for debug networking routing purpose) checkip = api.RestApi( self, "lambda-checkip-api", cloud_watch_role=True, deploy=True, description="For Lambda get IP address", default_integration=api.MockIntegration( integration_responses=[ api.IntegrationResponse(status_code="200", response_templates={ "application/json": "$context.identity.sourceIp" }) ], request_templates={"application/json": '{"statusCode": 200}'}), endpoint_types=[api.EndpointType.REGIONAL]) checkip.root.add_method("GET", method_responses=[ api.MethodResponse( status_code="200", response_models={ "application/json": api.Model.EMPTY_MODEL }) ]) # Setup Lambda functions handler = lam.Function(self, "s3-migrate-worker", code=lam.Code.asset("./lambda"), handler="lambda_function_worker.lambda_handler", runtime=lam.Runtime.PYTHON_3_8, memory_size=1024, timeout=core.Duration.minutes(15), tracing=lam.Tracing.ACTIVE, environment={ 'table_queue_name': ddb_file_list.table_name, 'Des_bucket_default': Des_bucket_default, 'Des_prefix_default': Des_prefix_default, 'StorageClass': StorageClass, 'checkip_url': checkip.url, 'ssm_parameter_credentials': ssm_parameter_credentials, 'JobType': JobType, 'MaxRetry': MaxRetry, 'MaxThread': MaxThread, 'MaxParallelFile': MaxParallelFile, 'JobTimeout': JobTimeout, 'UpdateVersionId': UpdateVersionId, 'GetObjectWithVersionId': GetObjectWithVersionId }) handler_jobsender = lam.Function( self, "s3-migrate-jobsender", code=lam.Code.asset("./lambda"), handler="lambda_function_jobsender.lambda_handler", runtime=lam.Runtime.PYTHON_3_8, memory_size=1024, timeout=core.Duration.minutes(15), tracing=lam.Tracing.ACTIVE, environment={ 'table_queue_name': ddb_file_list.table_name, 'StorageClass': StorageClass, 'checkip_url': checkip.url, 'sqs_queue': sqs_queue.queue_name, 'ssm_parameter_credentials': ssm_parameter_credentials, 'ssm_parameter_ignore_list': ssm_parameter_ignore_list.parameter_name, 'ssm_parameter_bucket': ssm_bucket_para.parameter_name, 'JobType': JobType, 'MaxRetry': MaxRetry, 'JobsenderCompareVersionId': JobsenderCompareVersionId }) # Allow lambda read/write DDB, SQS ddb_file_list.grant_read_write_data(handler) ddb_file_list.grant_read_write_data(handler_jobsender) sqs_queue.grant_send_messages(handler_jobsender) # SQS trigger Lambda worker handler.add_event_source(SqsEventSource(sqs_queue, batch_size=1)) # Option1: Create S3 Bucket, all new objects in this bucket will be transmitted by Lambda Worker s3bucket = s3.Bucket(self, "s3_new_migrate") s3bucket.grant_read(handler) s3bucket.add_event_notification(s3.EventType.OBJECT_CREATED, s3n.SqsDestination(sqs_queue)) # Option2: Allow Exist S3 Buckets to be read by Lambda functions. # Lambda Jobsender will scan and compare the these buckets and trigger Lambda Workers to transmit bucket_name = '' for b in bucket_para: if bucket_name != b['src_bucket']: # 如果列了多个相同的Bucket,就跳过 bucket_name = b['src_bucket'] s3exist_bucket = s3.Bucket.from_bucket_name( self, bucket_name, # 用这个做id bucket_name=bucket_name) if JobType == 'PUT': s3exist_bucket.grant_read(handler_jobsender) s3exist_bucket.grant_read(handler) else: # 'GET' mode s3exist_bucket.grant_read_write(handler_jobsender) s3exist_bucket.grant_read_write(handler) # Allow Lambda read ssm parameters ssm_bucket_para.grant_read(handler_jobsender) ssm_credential_para.grant_read(handler) ssm_credential_para.grant_read(handler_jobsender) ssm_parameter_ignore_list.grant_read(handler_jobsender) # Schedule cron event to trigger Lambda Jobsender per hour: event.Rule(self, 'cron_trigger_jobsender', schedule=event.Schedule.rate(core.Duration.hours(1)), targets=[target.LambdaFunction(handler_jobsender)]) # TODO: Trigger event imediately, add custom resource lambda to invoke handler_jobsender # Create Lambda logs filter to create network traffic metric handler.log_group.add_metric_filter( "Completed-bytes", metric_name="Completed-bytes", metric_namespace="s3_migrate", metric_value="$bytes", filter_pattern=logs.FilterPattern.literal( '[info, date, sn, p="--->Complete", bytes, key]')) handler.log_group.add_metric_filter( "Uploading-bytes", metric_name="Uploading-bytes", metric_namespace="s3_migrate", metric_value="$bytes", filter_pattern=logs.FilterPattern.literal( '[info, date, sn, p="--->Uploading", bytes, key]')) handler.log_group.add_metric_filter( "Downloading-bytes", metric_name="Downloading-bytes", metric_namespace="s3_migrate", metric_value="$bytes", filter_pattern=logs.FilterPattern.literal( '[info, date, sn, p="--->Downloading", bytes, key]')) handler.log_group.add_metric_filter( "MaxMemoryUsed", metric_name="MaxMemoryUsed", metric_namespace="s3_migrate", metric_value="$memory", filter_pattern=logs.FilterPattern.literal( '[head="REPORT", a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, ' 'a13, a14, a15, a16, memory, MB="MB", rest]')) lambda_metric_Complete = cw.Metric(namespace="s3_migrate", metric_name="Completed-bytes", statistic="Sum", period=core.Duration.minutes(1)) lambda_metric_Upload = cw.Metric(namespace="s3_migrate", metric_name="Uploading-bytes", statistic="Sum", period=core.Duration.minutes(1)) lambda_metric_Download = cw.Metric(namespace="s3_migrate", metric_name="Downloading-bytes", statistic="Sum", period=core.Duration.minutes(1)) lambda_metric_MaxMemoryUsed = cw.Metric( namespace="s3_migrate", metric_name="MaxMemoryUsed", statistic="Maximum", period=core.Duration.minutes(1)) handler.log_group.add_metric_filter( "ERROR", metric_name="ERROR-Logs", metric_namespace="s3_migrate", metric_value="1", filter_pattern=logs.FilterPattern.literal('"ERROR"')) handler.log_group.add_metric_filter( "WARNING", metric_name="WARNING-Logs", metric_namespace="s3_migrate", metric_value="1", filter_pattern=logs.FilterPattern.literal('"WARNING"')) # Task timed out handler.log_group.add_metric_filter( "TIMEOUT", metric_name="TIMEOUT-Logs", metric_namespace="s3_migrate", metric_value="1", filter_pattern=logs.FilterPattern.literal('"Task timed out"')) log_metric_ERROR = cw.Metric(namespace="s3_migrate", metric_name="ERROR-Logs", statistic="Sum", period=core.Duration.minutes(1)) log_metric_WARNING = cw.Metric(namespace="s3_migrate", metric_name="WARNING-Logs", statistic="Sum", period=core.Duration.minutes(1)) log_metric_TIMEOUT = cw.Metric(namespace="s3_migrate", metric_name="TIMEOUT-Logs", statistic="Sum", period=core.Duration.minutes(1)) # Dashboard to monitor SQS and Lambda board = cw.Dashboard(self, "s3_migrate_serverless") board.add_widgets( cw.GraphWidget(title="Lambda-NETWORK", left=[ lambda_metric_Download, lambda_metric_Upload, lambda_metric_Complete ]), cw.GraphWidget(title="Lambda-concurrent", left=[ handler.metric( metric_name="ConcurrentExecutions", period=core.Duration.minutes(1)) ]), cw.GraphWidget( title="Lambda-invocations/errors/throttles", left=[ handler.metric_invocations( period=core.Duration.minutes(1)), handler.metric_errors(period=core.Duration.minutes(1)), handler.metric_throttles(period=core.Duration.minutes(1)) ]), cw.GraphWidget( title="Lambda-duration", left=[ handler.metric_duration(period=core.Duration.minutes(1)) ]), ) board.add_widgets( cw.GraphWidget(title="Lambda_MaxMemoryUsed(MB)", left=[lambda_metric_MaxMemoryUsed]), cw.GraphWidget(title="ERROR/WARNING Logs", left=[log_metric_ERROR], right=[log_metric_WARNING, log_metric_TIMEOUT]), cw.GraphWidget( title="SQS-Jobs", left=[ sqs_queue.metric_approximate_number_of_messages_visible( period=core.Duration.minutes(1)), sqs_queue. metric_approximate_number_of_messages_not_visible( period=core.Duration.minutes(1)) ]), cw.SingleValueWidget( title="Running/Waiting and Dead Jobs", metrics=[ sqs_queue. metric_approximate_number_of_messages_not_visible( period=core.Duration.minutes(1)), sqs_queue.metric_approximate_number_of_messages_visible( period=core.Duration.minutes(1)), sqs_queue_DLQ. metric_approximate_number_of_messages_not_visible( period=core.Duration.minutes(1)), sqs_queue_DLQ. metric_approximate_number_of_messages_visible( period=core.Duration.minutes(1)) ], height=6)) # Alarm for queue - DLQ alarm_DLQ = cw.Alarm( self, "SQS_DLQ", metric=sqs_queue_DLQ.metric_approximate_number_of_messages_visible( ), threshold=0, comparison_operator=cw.ComparisonOperator.GREATER_THAN_THRESHOLD, evaluation_periods=1, datapoints_to_alarm=1) alarm_topic = sns.Topic(self, "SQS queue-DLQ has dead letter") alarm_topic.add_subscription( subscription=sub.EmailSubscription(alarm_email)) alarm_DLQ.add_alarm_action(action.SnsAction(alarm_topic)) core.CfnOutput(self, "Dashboard", value="CloudWatch Dashboard name s3_migrate_serverless")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ### # Let's create our own Event Bus for this rather than using default ### bus = events.EventBus(self, 'DestinedEventBus', event_bus_name='the-destined-lambda') ### # Destinations need invoked Asynchronously so let's use SNS ### topic = sns.Topic(self, 'theDestinedLambdaTopic', display_name='The Destined Lambda CDK Pattern Topic') ### # Lambda configured with success and failure destinations # Note the actual lambda has no EventBridge code inside it ### destined_lambda = _lambda.Function( self, "destinedLambda", runtime=_lambda.Runtime.NODEJS_12_X, handler="destinedLambda.handler", code=_lambda.Code.from_asset("lambda_fns"), retry_attempts=0, on_success=destinations.EventBridgeDestination(event_bus=bus), on_failure=destinations.EventBridgeDestination(event_bus=bus)) topic.add_subscription( subscriptions.LambdaSubscription(destined_lambda)) ### # This is a lambda that will be called by onSuccess for destinedLambda # It simply prints the event it receives to the cloudwatch logs ### success_lambda = _lambda.Function( self, "successLambda", runtime=_lambda.Runtime.NODEJS_12_X, handler="success.handler", code=_lambda.Code.from_asset("lambda_fns"), timeout=core.Duration.seconds(3)) ### # EventBridge Rule to send events to our success lambda # Notice how we can still do event filtering based on the json payload returned by the destined lambda ### success_rule = events.Rule( self, 'successRule', event_bus=bus, description= 'all success events are caught here and logged centrally', event_pattern=events.EventPattern( detail={ "requestContext": { "condition": ["Success"] }, "responsePayload": { "source": ["cdkpatterns.the-destined-lambda"], "action": ["message"] } })) success_rule.add_target(targets.LambdaFunction(success_lambda)) ### # This is a lambda that will be called by onFailure for destinedLambda # It simply prints the event it receives to the cloudwatch logs. # Notice how it includes the message that came into destined lambda to make it fail so you have # everything you need to do retries or manually investigate ### failure_lambda = _lambda.Function( self, "failureLambda", runtime=_lambda.Runtime.NODEJS_12_X, handler="failure.handler", code=_lambda.Code.from_asset("lambda_fns"), timeout=core.Duration.seconds(3)) ### # EventBridge Rule to send events to our failure lambda ### failure_rule = events.Rule( self, 'failureRule', event_bus=bus, description= 'all failure events are caught here and logged centrally', event_pattern=events.EventPattern( detail={"responsePayload": { "errorType": ["Error"] }})) failure_rule.add_target(targets.LambdaFunction(failure_lambda)) ### # API Gateway Creation # This is complicated because it transforms the incoming json payload into a query string url # this url is used to post the payload to sns without a lambda inbetween ### gateway = api_gw.RestApi( self, 'theDestinedLambdaAPI', deploy_options=api_gw.StageOptions( metrics_enabled=True, logging_level=api_gw.MethodLoggingLevel.INFO, data_trace_enabled=True, stage_name='prod')) # Give our gateway permissions to interact with SNS api_gw_sns_role = iam.Role( self, 'ApiGatewaySNSRole', assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com')) topic.grant_publish(api_gw_sns_role) # shortening the lines of later code schema = api_gw.JsonSchema schema_type = api_gw.JsonSchemaType # Because this isn't a proxy integration, we need to define our response model response_model = gateway.add_model( 'ResponseModel', content_type='application/json', model_name='ResponseModel', schema=schema( schema=api_gw.JsonSchemaVersion.DRAFT4, title='pollResponse', type=schema_type.OBJECT, properties={'message': schema(type=schema_type.STRING)})) error_response_model = gateway.add_model( 'ErrorResponseModel', content_type='application/json', model_name='ErrorResponseModel', schema=schema(schema=api_gw.JsonSchemaVersion.DRAFT4, title='errorResponse', type=schema_type.OBJECT, properties={ 'state': schema(type=schema_type.STRING), 'message': schema(type=schema_type.STRING) })) request_template = "Action=Publish&" + \ "TargetArn=$util.urlEncode('" + topic.topic_arn + "')&" + \ "Message=please $input.params().querystring.get('mode')&" + \ "Version=2010-03-31" # This is the VTL to transform the error response error_template = { "state": 'error', "message": "$util.escapeJavaScript($input.path('$.errorMessage'))" } error_template_string = json.dumps(error_template, separators=(',', ':')) # This is how our gateway chooses what response to send based on selection_pattern integration_options = api_gw.IntegrationOptions( credentials_role=api_gw_sns_role, request_parameters={ 'integration.request.header.Content-Type': "'application/x-www-form-urlencoded'" }, request_templates={"application/json": request_template}, passthrough_behavior=api_gw.PassthroughBehavior.NEVER, integration_responses=[ api_gw.IntegrationResponse( status_code='200', response_templates={ "application/json": json.dumps({"message": 'Message added to SNS topic'}) }), api_gw.IntegrationResponse( selection_pattern="^\[Error\].*", status_code='400', response_templates={ "application/json": error_template_string }, response_parameters={ 'method.response.header.Content-Type': "'application/json'", 'method.response.header.Access-Control-Allow-Origin': "'*'", 'method.response.header.Access-Control-Allow-Credentials': "'true'" }) ]) # Add an SendEvent endpoint onto the gateway gateway.root.add_resource('SendEvent') \ .add_method('GET', api_gw.Integration(type=api_gw.IntegrationType.AWS, integration_http_method='POST', uri='arn:aws:apigateway:us-east-1:sns:path//', options=integration_options ), method_responses=[ api_gw.MethodResponse(status_code='200', response_parameters={ 'method.response.header.Content-Type': True, 'method.response.header.Access-Control-Allow-Origin': True, 'method.response.header.Access-Control-Allow-Credentials': True }, response_models={ 'application/json': response_model }), api_gw.MethodResponse(status_code='400', response_parameters={ 'method.response.header.Content-Type': True, 'method.response.header.Access-Control-Allow-Origin': True, 'method.response.header.Access-Control-Allow-Credentials': True }, response_models={ 'application/json': error_response_model }), ] )
def __init__(self, scope: core.App, id_: str, stack_env: str, **kwargs) -> None: super().__init__(scope, id_, **kwargs) # create dynamo table demo_table = aws_dynamodb.Table( scope=self, id="demo_table", partition_key=aws_dynamodb.Attribute( name="id", type=aws_dynamodb.AttributeType.STRING ), write_capacity=3, read_capacity=3, removal_policy=core.RemovalPolicy.DESTROY ) queue = aws_sqs.Queue(self, f"{id_}-SQSQueue") # create producer lambda function producer_lambda = self._create_lambda_function( function_name="producer", environment={ "TABLE_NAME": demo_table.table_name, "QUEUE_URL": queue.queue_url } ) queue.grant_send_messages(producer_lambda) # grant permission to lambda to write to demo table demo_table.grant_write_data(producer_lambda) # create consumer lambda function consumer_lambda = self._create_lambda_function( function_name="consumer", environment={"TABLE_NAME": demo_table.table_name} ) # grant permission to lambda to read from demo table demo_table.grant_read_data(consumer_lambda) # api_gateway for root base_api = apigw_.RestApi( scope=self, id=f"{id_}-{stack_env}-apigw", rest_api_name=f"{id_}-{stack_env}-apigw", deploy_options=apigw_.StageOptions(stage_name=stack_env) ) # /example entity api_entity = base_api.root.add_resource("example") # GET /example api_entity.add_method( http_method="GET", integration=apigw_.LambdaIntegration( handler=consumer_lambda, integration_responses=[ apigw_.IntegrationResponse( status_code="200" ) ] ) ) # POST /example api_entity.add_method( http_method="POST", integration=apigw_.LambdaIntegration( handler=producer_lambda, integration_responses=[ apigw_.IntegrationResponse( status_code="200" ) ] ) ) # ============= # # StepFunctions # # ============= # dynamodb_update_running_task = self._dynamodb_update_in_sfn(table=demo_table, status="running") wait_1_min = aws_sfn.Wait( scope=self, id="Wait one minutes as heavy task", time=aws_sfn.WaitTime.duration(core.Duration.minutes(1)), ) dynamodb_update_complete_task = self._dynamodb_update_in_sfn(table=demo_table, status="complete") dynamodb_update_failure_task = self._dynamodb_update_in_sfn(table=demo_table, status="failure") check_task_status = aws_sfn.Choice(scope=self, id="Job Complete?")\ .when(aws_sfn.Condition.string_equals("$.job_status", "success"), dynamodb_update_complete_task) \ .otherwise(dynamodb_update_failure_task) # StepFunctions definition = dynamodb_update_running_task \ .next(wait_1_min) \ .next(check_task_status) sfn_process = aws_sfn.StateMachine( scope=self, id=f"{id_}-{stack_env}", definition=definition ) # Lambda to invoke StepFunction sfn_invoke_lambda = self._create_lambda_function( function_name="invoke_step_function", environment={ "STEP_FUNCTION_ARN": sfn_process.state_machine_arn, "QUEUE_URL": queue.queue_url } ) # grant queue.grant_consume_messages(sfn_invoke_lambda) sfn_process.grant_start_execution(sfn_invoke_lambda) # ================ # # CloudWatch Event # # ================ # # Runs every 2 hour invoke_automatically = aws_events.Rule( scope=self, id=f"InvokeSFnViaLambda-{stack_env}", schedule=aws_events.Schedule.rate(core.Duration.hours(2)) ) invoke_automatically.add_target(aws_events_targets.LambdaFunction(sfn_invoke_lambda))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) env_name = self.node.try_get_context('env') #Create the SQS queue queue = sqs.Queue(self, id=f"{env_name}-SQSQueue", queue_name=f"{env_name}-queue") #Create the API GW service role with permissions to call SQS rest_api_role = iam.Role( self, id=f"{env_name}-RestAPISQSRole", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess") ]) #Create an API GW Rest API base_api = apigw.RestApi( self, id=f'{env_name}-ApiGW', rest_api_name=f'{env_name}SQSTestAPI', api_key_source_type=apigw.ApiKeySourceType.HEADER) usage_api_key_value = ''.join( random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(40)) usage_api_key = base_api.add_api_key(id=f'{env_name}-apikey', value=usage_api_key_value) usage_plan = base_api.add_usage_plan(id=f'{env_name}-usageplan', name=f'{env_name}-usageplan', api_key=usage_api_key, throttle=apigw.ThrottleSettings( rate_limit=10, burst_limit=2)) usage_plan.add_api_stage(stage=base_api.deployment_stage) #Create a resource named "example" on the base API api_resource = base_api.root.add_resource('sqstest') #Create API Integration Response object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html integration_response = apigw.IntegrationResponse( status_code="200", response_templates={"application/json": ""}, ) #Create API Integration Options object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html api_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[integration_response], request_templates={ "application/json": "Action=SendMessage&MessageBody=$input.body" }, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'" }, ) #Create AWS Integration Object for SQS: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/AwsIntegration.html api_resource_sqs_integration = apigw.AwsIntegration( service="sqs", integration_http_method="POST", # must be ACCOUNT_ID. Just the way URL to SQS is created path="{}/{}".format(core.Aws.ACCOUNT_ID, queue.queue_name), options=api_integration_options) #Create a Method Response Object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/MethodResponse.html method_response = apigw.MethodResponse(status_code="200") #Add the API GW Integration to the "example" API GW Resource api_resource.add_method("POST", api_resource_sqs_integration, method_responses=[method_response], api_key_required=True) #Creating Lambda function that will be triggered by the SQS Queue sqs_lambda = _lambda.Function( self, 'SQSTriggerLambda', handler='sqs_lambda.handler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('pr_sqs_lambda'), ) #Create an SQS event source for Lambda sqs_event_source = lambda_event_source.SqsEventSource(queue) #Add SQS event source to the Lambda function sqs_lambda.add_event_source(sqs_event_source) # https://67ixnggm81.execute-api.us-east-1.amazonaws.com/prod/sqstest region = core.Aws.REGION core.CfnOutput(self, 'api-gw-url', value='https://' + base_api.rest_api_id + '.execute-api.' + region + '.amazonaws.com/prod/sqstest', export_name='api-sqs-gw-url') print(f'API Key: {usage_api_key_value}') """
def __init__(self, scope: Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) #Create the SQS queue queue = sqs.Queue(self, "SQSQueue") #Create the API GW service role with permissions to call SQS rest_api_role = iam.Role( self, "RestAPIRole", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess") ]) #Create an API GW Rest API base_api = apigw.RestApi(self, 'ApiGW', rest_api_name='TestAPI') base_api.root.add_method("ANY") #Create a resource named "example" on the base API api_resource = base_api.root.add_resource('example') #Create API Integration Response object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html integration_response = apigw.IntegrationResponse( status_code="200", response_templates={"application/json": ""}, ) #Create API Integration Options object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html api_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[integration_response], request_templates={ "application/json": "Action=SendMessage&MessageBody=$input.body" }, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'" }, ) #Create AWS Integration Object for SQS: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/AwsIntegration.html api_resource_sqs_integration = apigw.AwsIntegration( service="sqs", integration_http_method="POST", path="{}/{}".format(Aws.ACCOUNT_ID, queue.queue_name), options=api_integration_options) #Create a Method Response Object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/MethodResponse.html method_response = apigw.MethodResponse(status_code="200") #Add the API GW Integration to the "example" API GW Resource api_resource.add_method("POST", api_resource_sqs_integration, method_responses=[method_response]) #Creating Lambda function that will be triggered by the SQS Queue sqs_lambda = _lambda.Function( self, 'SQSTriggerLambda', handler='lambda-handler.handler', runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.Code.from_asset('lambda'), ) #Create an SQS event source for Lambda sqs_event_source = lambda_event_source.SqsEventSource(queue) #Add SQS event source to the Lambda function sqs_lambda.add_event_source(sqs_event_source)
def __init__( self, scope: core.Construct, id: str, stack_log_level: str, back_end_api_name: str, back_end_api_datastore_name: str, **kwargs ) -> None: super().__init__(scope, id, **kwargs) # DynamoDB: Key-Value Database): if not back_end_api_datastore_name: back_end_api_datastore_name = f"{GlobalArgs.REPO_NAME}-api-datastore" self.ddb_table_01 = _dynamodb.Table( self, "apiPerformanceWithCaching", partition_key=_dynamodb.Attribute( name="id", type=_dynamodb.AttributeType.STRING ), read_capacity=20, write_capacity=20, table_name=f"{back_end_api_datastore_name}-{id}", removal_policy=core.RemovalPolicy.DESTROY ) # Let us use our Cfn Custom Resource to load data into our dynamodb table. data_loader_status = DdbDataLoaderStack( self, "cachedApiDdbLoader", Ddb_table_name=self.ddb_table_01.table_name ) # Read Lambda Code): try: with open("api_performance_with_caching/stacks/back_end/lambda_src/serverless_greeter.py", mode="r") as f: greeter_fn_code = f.read() except OSError as e: print("Unable to read Lambda Function Code") raise e greeter_fn = _lambda.Function( self, "greeterFn", function_name=f"greeter_fn_{id}", runtime=_lambda.Runtime.PYTHON_3_7, handler="index.lambda_handler", code=_lambda.InlineCode(greeter_fn_code), timeout=core.Duration.seconds(10), reserved_concurrent_executions=50, environment={ "LOG_LEVEL": f"{stack_log_level}", "Environment": "Production", "DDB_TABLE_NAME": self.ddb_table_01.table_name, "RANDOM_SLEEP_SECS": "2", "ANDON_CORD_PULLED": "False" }, description="Creates a simple greeter function" ) greeter_fn_version = greeter_fn.latest_version greeter_fn_version_alias = _lambda.Alias( self, "greeterFnAlias", alias_name="MystiqueAutomation", version=greeter_fn_version ) # Create Custom Loggroup greeter_fn_lg = _logs.LogGroup( self, "squareFnLoggroup", log_group_name=f"/aws/lambda/{greeter_fn.function_name}", retention=_logs.RetentionDays.ONE_WEEK, removal_policy=core.RemovalPolicy.DESTROY ) # Add DDB Read Write Permission to the Lambda self.ddb_table_01.grant_read_write_data(greeter_fn) # Add API GW front end for the Lambda back_end_api_stage_01_options = _apigw.StageOptions( stage_name="miztiik", cache_cluster_enabled=True, caching_enabled=True, cache_cluster_size="0.5", cache_ttl=core.Duration.seconds(30), # Log full requests/responses data data_trace_enabled=True, # Enable Detailed CloudWatch Metrics metrics_enabled=True, logging_level=_apigw.MethodLoggingLevel.INFO, method_options={ "/cached/movie/GET": _apigw.MethodDeploymentOptions( caching_enabled=False ) } ) # Create API Gateway cached_api = _apigw.RestApi( self, "backEnd01Api", rest_api_name=f"{back_end_api_name}", deploy_options=back_end_api_stage_01_options, minimum_compression_size=0, endpoint_types=[ _apigw.EndpointType.EDGE ], description=f"{GlobalArgs.OWNER}: API Best Practice Demonstration - Cached-vs-UnCached APIs" ) back_end_01_api_res = cached_api.root.add_resource("cached") res_movie = back_end_01_api_res.add_resource( "movie" ) res_movie_method_get = res_movie.add_method( http_method="GET", request_parameters={ "method.request.header.InvocationType": True, "method.request.path.number": True }, integration=_apigw.LambdaIntegration( handler=greeter_fn, proxy=True ) ) # Add Method for getting Movie by {id} res_movie_by_id = res_movie.add_resource("{id}") # Because this is NOT a proxy integration, we need to define our response model response_model = cached_api.add_model( "ResponseModel", content_type="application/json", model_name="MiztiikResponseModel", schema=_apigw.JsonSchema( schema=_apigw.JsonSchemaVersion.DRAFT4, title="updateResponse", type=_apigw.JsonSchemaType.OBJECT, properties={ "message": _apigw.JsonSchema(type=_apigw.JsonSchemaType.STRING) } ) ) res_movie_by_id_validator_request = cached_api.add_request_validator( "apiReqValidator", validate_request_parameters=True ) req_template = { "id": "$input.params('id')" } request_template_string = json.dumps( req_template, separators=(',', ':')) # resp_template = """$input.path('$.body.message')""" resp_template = """$input.path('$.body')""" res_movie_by_id_method_get = res_movie_by_id.add_method( http_method="GET", request_parameters={ "method.request.header.InvocationType": False, "method.request.path.id": True }, request_validator=res_movie_by_id_validator_request, integration=_apigw.LambdaIntegration( handler=greeter_fn, proxy=False, request_parameters={ "integration.request.path.id": "method.request.path.id" }, cache_key_parameters=[ "method.request.path.id" ], request_templates={ "application/json": request_template_string }, passthrough_behavior=_apigw.PassthroughBehavior.NEVER, integration_responses=[ _apigw.IntegrationResponse( status_code="200", # selection_pattern="2\d{2}", # Use for mapping Lambda Errors response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'cache-control,Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'", "method.response.header.Content-Type": "'application/json'", }, response_templates={ "application/json": f"{resp_template}" } ) ] ), method_responses=[ _apigw.MethodResponse( status_code="200", response_parameters={ "method.response.header.Content-Type": True, "method.response.header.Access-Control-Allow-Headers": True, }, response_models={ "application/json": response_model } ) ] ) self.cached_api_url = res_movie.url # Outputs output_1 = core.CfnOutput( self, "CachedApiUrl", value=f"{res_movie.url}", description="Use an utility like curl/Postman to access this API." ) output_2 = core.CfnOutput( self, "ddbDataLoaderStatus", value=f"{data_loader_status.response}", description="Waf Rate Rule Creator Status" )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = aws_ec2.Vpc( self, "OctemberVPC", max_azs=2, # subnet_configuration=[{ # "cidrMask": 24, # "name": "Public", # "subnetType": aws_ec2.SubnetType.PUBLIC, # }, # { # "cidrMask": 24, # "name": "Private", # "subnetType": aws_ec2.SubnetType.PRIVATE # }, # { # "cidrMask": 28, # "name": "Isolated", # "subnetType": aws_ec2.SubnetType.ISOLATED, # "reserved": True # } # ], gateway_endpoints={ "S3": aws_ec2.GatewayVpcEndpointOptions( service=aws_ec2.GatewayVpcEndpointAwsService.S3) }) dynamo_db_endpoint = vpc.add_gateway_endpoint( "DynamoDbEndpoint", service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB) s3_bucket = s3.Bucket( self, "s3bucket", bucket_name="octember-bizcard-{region}-{account}".format( region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID)) api = apigw.RestApi( self, "BizcardImageUploader", rest_api_name="BizcardImageUploader", description="This service serves uploading bizcard images into s3.", endpoint_types=[apigw.EndpointType.REGIONAL], binary_media_types=["image/png", "image/jpg"], deploy=True, deploy_options=apigw.StageOptions(stage_name="v1")) rest_api_role = aws_iam.Role( self, "ApiGatewayRoleForS3", role_name="ApiGatewayRoleForS3FullAccess", assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess") ]) list_objects_responses = [ apigw.IntegrationResponse( status_code="200", #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html#aws_cdk.aws_apigateway.IntegrationResponse.response_parameters # The response parameters from the backend response that API Gateway sends to the method response. # Use the destination as the key and the source as the value: # - The destination must be an existing response parameter in the MethodResponse property. # - The source must be an existing method request parameter or a static value. response_parameters={ 'method.response.header.Timestamp': 'integration.response.header.Date', 'method.response.header.Content-Length': 'integration.response.header.Content-Length', 'method.response.header.Content-Type': 'integration.response.header.Content-Type' }), apigw.IntegrationResponse(status_code="400", selection_pattern="4\d{2}"), apigw.IntegrationResponse(status_code="500", selection_pattern="5\d{2}") ] list_objects_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=list_objects_responses) get_s3_integration = apigw.AwsIntegration( service="s3", integration_http_method="GET", path='/', options=list_objects_integration_options) api.root.add_method( "GET", get_s3_integration, authorization_type=apigw.AuthorizationType.IAM, api_key_required=False, method_responses=[ apigw.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Timestamp': False, 'method.response.header.Content-Length': False, 'method.response.header.Content-Type': False }, response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ], request_parameters={'method.request.header.Content-Type': False}) get_s3_folder_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=list_objects_responses, #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html#aws_cdk.aws_apigateway.IntegrationOptions.request_parameters # Specify request parameters as key-value pairs (string-to-string mappings), with a destination as the key and a source as the value. # The source must be an existing method request parameter or a static value. request_parameters={ "integration.request.path.bucket": "method.request.path.folder" }) get_s3_folder_integration = apigw.AwsIntegration( service="s3", integration_http_method="GET", path="{bucket}", options=get_s3_folder_integration_options) s3_folder = api.root.add_resource('{folder}') s3_folder.add_method( "GET", get_s3_folder_integration, authorization_type=apigw.AuthorizationType.IAM, api_key_required=False, method_responses=[ apigw.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Timestamp': False, 'method.response.header.Content-Length': False, 'method.response.header.Content-Type': False }, response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ], request_parameters={ 'method.request.header.Content-Type': False, 'method.request.path.folder': True }) get_s3_item_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=list_objects_responses, request_parameters={ "integration.request.path.bucket": "method.request.path.folder", "integration.request.path.object": "method.request.path.item" }) get_s3_item_integration = apigw.AwsIntegration( service="s3", integration_http_method="GET", path="{bucket}/{object}", options=get_s3_item_integration_options) s3_item = s3_folder.add_resource('{item}') s3_item.add_method( "GET", get_s3_item_integration, authorization_type=apigw.AuthorizationType.IAM, api_key_required=False, method_responses=[ apigw.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Timestamp': False, 'method.response.header.Content-Length': False, 'method.response.header.Content-Type': False }, response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ], request_parameters={ 'method.request.header.Content-Type': False, 'method.request.path.folder': True, 'method.request.path.item': True }) put_s3_item_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[ apigw.IntegrationResponse(status_code="200"), apigw.IntegrationResponse(status_code="400", selection_pattern="4\d{2}"), apigw.IntegrationResponse(status_code="500", selection_pattern="5\d{2}") ], request_parameters={ "integration.request.header.Content-Type": "method.request.header.Content-Type", "integration.request.path.bucket": "method.request.path.folder", "integration.request.path.object": "method.request.path.item" }) put_s3_item_integration = apigw.AwsIntegration( service="s3", integration_http_method="PUT", path="{bucket}/{object}", options=put_s3_item_integration_options) s3_item.add_method( "PUT", put_s3_item_integration, authorization_type=apigw.AuthorizationType.IAM, api_key_required=False, method_responses=[ apigw.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Content-Type': False }, response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ], request_parameters={ 'method.request.header.Content-Type': False, 'method.request.path.folder': True, 'method.request.path.item': True }) ddb_table = dynamodb.Table( self, "BizcardImageMetaInfoDdbTable", table_name="OctemberBizcardImgMeta", partition_key=dynamodb.Attribute( name="image_id", type=dynamodb.AttributeType.STRING), billing_mode=dynamodb.BillingMode.PROVISIONED, read_capacity=15, write_capacity=5) img_kinesis_stream = kinesis.Stream( self, "BizcardImagePath", stream_name="octember-bizcard-image") # create lambda function trigger_textract_lambda_fn = _lambda.Function( self, "TriggerTextExtractorFromImage", runtime=_lambda.Runtime.PYTHON_3_7, function_name="TriggerTextExtractorFromImage", handler="trigger_text_extract_from_s3_image.lambda_handler", description="Trigger to extract text from an image in S3", code=_lambda.Code.asset( "./src/main/python/TriggerTextExtractFromS3Image"), environment={ 'REGION_NAME': core.Aws.REGION, 'DDB_TABLE_NAME': ddb_table.table_name, 'KINESIS_STREAM_NAME': img_kinesis_stream.stream_name }, timeout=core.Duration.minutes(5)) ddb_table_rw_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, resources=[ddb_table.table_arn], actions=[ "dynamodb:BatchGetItem", "dynamodb:Describe*", "dynamodb:List*", "dynamodb:GetItem", "dynamodb:Query", "dynamodb:Scan", "dynamodb:BatchWriteItem", "dynamodb:DeleteItem", "dynamodb:PutItem", "dynamodb:UpdateItem", "dax:Describe*", "dax:List*", "dax:GetItem", "dax:BatchGetItem", "dax:Query", "dax:Scan", "dax:BatchWriteItem", "dax:DeleteItem", "dax:PutItem", "dax:UpdateItem" ]) trigger_textract_lambda_fn.add_to_role_policy( ddb_table_rw_policy_statement) trigger_textract_lambda_fn.add_to_role_policy( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=[img_kinesis_stream.stream_arn], actions=[ "kinesis:Get*", "kinesis:List*", "kinesis:Describe*", "kinesis:PutRecord", "kinesis:PutRecords" ])) # assign notification for the s3 event type (ex: OBJECT_CREATED) s3_event_filter = s3.NotificationKeyFilter(prefix="bizcard-raw-img/", suffix=".jpg") s3_event_source = S3EventSource(s3_bucket, events=[s3.EventType.OBJECT_CREATED], filters=[s3_event_filter]) trigger_textract_lambda_fn.add_event_source(s3_event_source) #XXX: https://github.com/aws/aws-cdk/issues/2240 # To avoid to create extra Lambda Functions with names like LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8a # if log_retention=aws_logs.RetentionDays.THREE_DAYS is added to the constructor props log_group = aws_logs.LogGroup( self, "TriggerTextractLogGroup", log_group_name="/aws/lambda/TriggerTextExtractorFromImage", retention=aws_logs.RetentionDays.THREE_DAYS) log_group.grant_write(trigger_textract_lambda_fn) text_kinesis_stream = kinesis.Stream( self, "BizcardTextData", stream_name="octember-bizcard-txt") textract_lambda_fn = _lambda.Function( self, "GetTextFromImage", runtime=_lambda.Runtime.PYTHON_3_7, function_name="GetTextFromImage", handler="get_text_from_s3_image.lambda_handler", description="extract text from an image in S3", code=_lambda.Code.asset("./src/main/python/GetTextFromS3Image"), environment={ 'REGION_NAME': core.Aws.REGION, 'DDB_TABLE_NAME': ddb_table.table_name, 'KINESIS_STREAM_NAME': text_kinesis_stream.stream_name }, timeout=core.Duration.minutes(5)) textract_lambda_fn.add_to_role_policy(ddb_table_rw_policy_statement) textract_lambda_fn.add_to_role_policy( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=[text_kinesis_stream.stream_arn], actions=[ "kinesis:Get*", "kinesis:List*", "kinesis:Describe*", "kinesis:PutRecord", "kinesis:PutRecords" ])) textract_lambda_fn.add_to_role_policy( aws_iam.PolicyStatement( **{ "effect": aws_iam.Effect.ALLOW, "resources": [ s3_bucket.bucket_arn, "{}/*".format( s3_bucket.bucket_arn) ], "actions": [ "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject" ] })) textract_lambda_fn.add_to_role_policy( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=["*"], actions=["textract:*"])) img_kinesis_event_source = KinesisEventSource( img_kinesis_stream, batch_size=100, starting_position=_lambda.StartingPosition.LATEST) textract_lambda_fn.add_event_source(img_kinesis_event_source) log_group = aws_logs.LogGroup( self, "GetTextFromImageLogGroup", log_group_name="/aws/lambda/GetTextFromImage", retention=aws_logs.RetentionDays.THREE_DAYS) log_group.grant_write(textract_lambda_fn) sg_use_bizcard_es = aws_ec2.SecurityGroup( self, "BizcardSearchClientSG", vpc=vpc, allow_all_outbound=True, description= 'security group for octember bizcard elasticsearch client', security_group_name='use-octember-bizcard-es') core.Tags.of(sg_use_bizcard_es).add('Name', 'use-octember-bizcard-es') sg_bizcard_es = aws_ec2.SecurityGroup( self, "BizcardSearchSG", vpc=vpc, allow_all_outbound=True, description='security group for octember bizcard elasticsearch', security_group_name='octember-bizcard-es') core.Tags.of(sg_bizcard_es).add('Name', 'octember-bizcard-es') sg_bizcard_es.add_ingress_rule(peer=sg_bizcard_es, connection=aws_ec2.Port.all_tcp(), description='octember-bizcard-es') sg_bizcard_es.add_ingress_rule(peer=sg_use_bizcard_es, connection=aws_ec2.Port.all_tcp(), description='use-octember-bizcard-es') sg_ssh_access = aws_ec2.SecurityGroup( self, "BastionHostSG", vpc=vpc, allow_all_outbound=True, description='security group for bastion host', security_group_name='octember-bastion-host-sg') core.Tags.of(sg_ssh_access).add('Name', 'octember-bastion-host') sg_ssh_access.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(), connection=aws_ec2.Port.tcp(22), description='ssh access') bastion_host = aws_ec2.BastionHostLinux( self, "BastionHost", vpc=vpc, instance_type=aws_ec2.InstanceType('t3.nano'), security_group=sg_ssh_access, subnet_selection=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.PUBLIC)) bastion_host.instance.add_security_group(sg_use_bizcard_es) #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873 es_cfn_domain = aws_elasticsearch.CfnDomain( self, 'BizcardSearch', elasticsearch_cluster_config={ "dedicatedMasterCount": 3, "dedicatedMasterEnabled": True, "dedicatedMasterType": "t2.medium.elasticsearch", "instanceCount": 2, "instanceType": "t2.medium.elasticsearch", "zoneAwarenessEnabled": True }, ebs_options={ "ebsEnabled": True, "volumeSize": 10, "volumeType": "gp2" }, domain_name="octember-bizcard", elasticsearch_version="7.9", encryption_at_rest_options={"enabled": False}, access_policies={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": ["es:Describe*", "es:List*", "es:Get*", "es:ESHttp*"], "Resource": self.format_arn(service="es", resource="domain", resource_name="octember-bizcard/*") }] }, snapshot_options={"automatedSnapshotStartHour": 17}, vpc_options={ "securityGroupIds": [sg_bizcard_es.security_group_id], "subnetIds": vpc.select_subnets( subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids }) core.Tags.of(es_cfn_domain).add('Name', 'octember-bizcard-es') s3_lib_bucket_name = self.node.try_get_context("lib_bucket_name") #XXX: https://github.com/aws/aws-cdk/issues/1342 s3_lib_bucket = s3.Bucket.from_bucket_name(self, id, s3_lib_bucket_name) es_lib_layer = _lambda.LayerVersion( self, "ESLib", layer_version_name="es-lib", compatible_runtimes=[_lambda.Runtime.PYTHON_3_7], code=_lambda.Code.from_bucket(s3_lib_bucket, "var/octember-es-lib.zip")) redis_lib_layer = _lambda.LayerVersion( self, "RedisLib", layer_version_name="redis-lib", compatible_runtimes=[_lambda.Runtime.PYTHON_3_7], code=_lambda.Code.from_bucket(s3_lib_bucket, "var/octember-redis-lib.zip")) #XXX: Deploy lambda in VPC - https://github.com/aws/aws-cdk/issues/1342 upsert_to_es_lambda_fn = _lambda.Function( self, "UpsertBizcardToES", runtime=_lambda.Runtime.PYTHON_3_7, function_name="UpsertBizcardToElasticSearch", handler="upsert_bizcard_to_es.lambda_handler", description="Upsert bizcard text into elasticsearch", code=_lambda.Code.asset("./src/main/python/UpsertBizcardToES"), environment={ 'ES_HOST': es_cfn_domain.attr_domain_endpoint, 'ES_INDEX': 'octember_bizcard', 'ES_TYPE': 'bizcard' }, timeout=core.Duration.minutes(5), layers=[es_lib_layer], security_groups=[sg_use_bizcard_es], vpc=vpc) text_kinesis_event_source = KinesisEventSource( text_kinesis_stream, batch_size=99, starting_position=_lambda.StartingPosition.LATEST) upsert_to_es_lambda_fn.add_event_source(text_kinesis_event_source) log_group = aws_logs.LogGroup( self, "UpsertBizcardToESLogGroup", log_group_name="/aws/lambda/UpsertBizcardToElasticSearch", retention=aws_logs.RetentionDays.THREE_DAYS) log_group.grant_write(upsert_to_es_lambda_fn) firehose_role_policy_doc = aws_iam.PolicyDocument() firehose_role_policy_doc.add_statements( aws_iam.PolicyStatement( **{ "effect": aws_iam.Effect.ALLOW, "resources": [ s3_bucket.bucket_arn, "{}/*".format( s3_bucket.bucket_arn) ], "actions": [ "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject" ] })) firehose_role_policy_doc.add_statements( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=["*"], actions=[ "glue:GetTable", "glue:GetTableVersion", "glue:GetTableVersions" ])) firehose_role_policy_doc.add_statements( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=[text_kinesis_stream.stream_arn], actions=[ "kinesis:DescribeStream", "kinesis:GetShardIterator", "kinesis:GetRecords" ])) firehose_log_group_name = "/aws/kinesisfirehose/octember-bizcard-txt-to-s3" firehose_role_policy_doc.add_statements( aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, #XXX: The ARN will be formatted as follows: # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name} resources=[ self.format_arn(service="logs", resource="log-group", resource_name="{}:log-stream:*".format( firehose_log_group_name), sep=":") ], actions=["logs:PutLogEvents"])) firehose_role = aws_iam.Role( self, "FirehoseDeliveryRole", role_name="FirehoseDeliveryRole", assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"), #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221 inline_policies={"firehose_role_policy": firehose_role_policy_doc}) bizcard_text_to_s3_delivery_stream = aws_kinesisfirehose.CfnDeliveryStream( self, "BizcardTextToS3", delivery_stream_name="octember-bizcard-txt-to-s3", delivery_stream_type="KinesisStreamAsSource", kinesis_stream_source_configuration={ "kinesisStreamArn": text_kinesis_stream.stream_arn, "roleArn": firehose_role.role_arn }, extended_s3_destination_configuration={ "bucketArn": s3_bucket.bucket_arn, "bufferingHints": { "intervalInSeconds": 60, "sizeInMBs": 1 }, "cloudWatchLoggingOptions": { "enabled": True, "logGroupName": firehose_log_group_name, "logStreamName": "S3Delivery" }, "compressionFormat": "GZIP", "prefix": "bizcard-text/", "roleArn": firehose_role.role_arn }) sg_use_bizcard_es_cache = aws_ec2.SecurityGroup( self, "BizcardSearchCacheClientSG", vpc=vpc, allow_all_outbound=True, description= 'security group for octember bizcard search query cache client', security_group_name='use-octember-bizcard-es-cache') core.Tags.of(sg_use_bizcard_es_cache).add( 'Name', 'use-octember-bizcard-es-cache') sg_bizcard_es_cache = aws_ec2.SecurityGroup( self, "BizcardSearchCacheSG", vpc=vpc, allow_all_outbound=True, description= 'security group for octember bizcard search query cache', security_group_name='octember-bizcard-es-cache') core.Tags.of(sg_bizcard_es_cache).add('Name', 'octember-bizcard-es-cache') sg_bizcard_es_cache.add_ingress_rule( peer=sg_use_bizcard_es_cache, connection=aws_ec2.Port.tcp(6379), description='use-octember-bizcard-es-cache') es_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup( self, "QueryCacheSubnetGroup", description="subnet group for octember-bizcard-es-cache", subnet_ids=vpc.select_subnets( subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids, cache_subnet_group_name='octember-bizcard-es-cache') es_query_cache = aws_elasticache.CfnCacheCluster( self, "BizcardSearchQueryCache", cache_node_type="cache.t3.small", num_cache_nodes=1, engine="redis", engine_version="5.0.5", auto_minor_version_upgrade=False, cluster_name="octember-bizcard-es-cache", snapshot_retention_limit=3, snapshot_window="17:00-19:00", preferred_maintenance_window="mon:19:00-mon:20:30", #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098 #cache_subnet_group_name=es_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC cache_subnet_group_name='octember-bizcard-es-cache', vpc_security_group_ids=[sg_bizcard_es_cache.security_group_id]) #XXX: If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-cache-cluster.html#cfn-elasticache-cachecluster-cachesubnetgroupname es_query_cache.add_depends_on(es_query_cache_subnet_group) #XXX: add more than 2 security groups # https://github.com/aws/aws-cdk/blob/ea10f0d141a48819ec0000cd7905feda993870a9/packages/%40aws-cdk/aws-lambda/lib/function.ts#L387 # https://github.com/aws/aws-cdk/issues/1555 # https://github.com/aws/aws-cdk/pull/5049 bizcard_search_lambda_fn = _lambda.Function( self, "BizcardSearchServer", runtime=_lambda.Runtime.PYTHON_3_7, function_name="BizcardSearchProxy", handler="es_search_bizcard.lambda_handler", description="Proxy server to search bizcard text", code=_lambda.Code.asset("./src/main/python/SearchBizcard"), environment={ 'ES_HOST': es_cfn_domain.attr_domain_endpoint, 'ES_INDEX': 'octember_bizcard', 'ES_TYPE': 'bizcard', 'ELASTICACHE_HOST': es_query_cache.attr_redis_endpoint_address }, timeout=core.Duration.minutes(1), layers=[es_lib_layer, redis_lib_layer], security_groups=[sg_use_bizcard_es, sg_use_bizcard_es_cache], vpc=vpc) #XXX: create API Gateway + LambdaProxy search_api = apigw.LambdaRestApi( self, "BizcardSearchAPI", handler=bizcard_search_lambda_fn, proxy=False, rest_api_name="BizcardSearch", description="This service serves searching bizcard text.", endpoint_types=[apigw.EndpointType.REGIONAL], deploy=True, deploy_options=apigw.StageOptions(stage_name="v1")) bizcard_search = search_api.root.add_resource('search') bizcard_search.add_method( "GET", method_responses=[ apigw.MethodResponse( status_code="200", response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ]) sg_use_bizcard_graph_db = aws_ec2.SecurityGroup( self, "BizcardGraphDbClientSG", vpc=vpc, allow_all_outbound=True, description='security group for octember bizcard graph db client', security_group_name='use-octember-bizcard-neptune') core.Tags.of(sg_use_bizcard_graph_db).add( 'Name', 'use-octember-bizcard-neptune') sg_bizcard_graph_db = aws_ec2.SecurityGroup( self, "BizcardGraphDbSG", vpc=vpc, allow_all_outbound=True, description='security group for octember bizcard graph db', security_group_name='octember-bizcard-neptune') core.Tags.of(sg_bizcard_graph_db).add('Name', 'octember-bizcard-neptune') sg_bizcard_graph_db.add_ingress_rule( peer=sg_bizcard_graph_db, connection=aws_ec2.Port.tcp(8182), description='octember-bizcard-neptune') sg_bizcard_graph_db.add_ingress_rule( peer=sg_use_bizcard_graph_db, connection=aws_ec2.Port.tcp(8182), description='use-octember-bizcard-neptune') bizcard_graph_db_subnet_group = aws_neptune.CfnDBSubnetGroup( self, "NeptuneSubnetGroup", db_subnet_group_description= "subnet group for octember-bizcard-neptune", subnet_ids=vpc.select_subnets( subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids, db_subnet_group_name='octember-bizcard-neptune') bizcard_graph_db = aws_neptune.CfnDBCluster( self, "BizcardGraphDB", availability_zones=vpc.availability_zones, db_subnet_group_name=bizcard_graph_db_subnet_group. db_subnet_group_name, db_cluster_identifier="octember-bizcard", backup_retention_period=1, preferred_backup_window="08:45-09:15", preferred_maintenance_window="sun:18:00-sun:18:30", vpc_security_group_ids=[sg_bizcard_graph_db.security_group_id]) bizcard_graph_db.add_depends_on(bizcard_graph_db_subnet_group) bizcard_graph_db_instance = aws_neptune.CfnDBInstance( self, "BizcardGraphDBInstance", db_instance_class="db.r5.large", allow_major_version_upgrade=False, auto_minor_version_upgrade=False, availability_zone=vpc.availability_zones[0], db_cluster_identifier=bizcard_graph_db.db_cluster_identifier, db_instance_identifier="octember-bizcard", preferred_maintenance_window="sun:18:00-sun:18:30") bizcard_graph_db_instance.add_depends_on(bizcard_graph_db) bizcard_graph_db_replica_instance = aws_neptune.CfnDBInstance( self, "BizcardGraphDBReplicaInstance", db_instance_class="db.r5.large", allow_major_version_upgrade=False, auto_minor_version_upgrade=False, availability_zone=vpc.availability_zones[-1], db_cluster_identifier=bizcard_graph_db.db_cluster_identifier, db_instance_identifier="octember-bizcard-replica", preferred_maintenance_window="sun:18:00-sun:18:30") bizcard_graph_db_replica_instance.add_depends_on(bizcard_graph_db) bizcard_graph_db_replica_instance.add_depends_on( bizcard_graph_db_instance) gremlinpython_lib_layer = _lambda.LayerVersion( self, "GremlinPythonLib", layer_version_name="gremlinpython-lib", compatible_runtimes=[_lambda.Runtime.PYTHON_3_7], code=_lambda.Code.from_bucket( s3_lib_bucket, "var/octember-gremlinpython-lib.zip")) #XXX: https://github.com/aws/aws-cdk/issues/1342 upsert_to_neptune_lambda_fn = _lambda.Function( self, "UpsertBizcardToGraphDB", runtime=_lambda.Runtime.PYTHON_3_7, function_name="UpsertBizcardToNeptune", handler="upsert_bizcard_to_graph_db.lambda_handler", description="Upsert bizcard into neptune", code=_lambda.Code.asset( "./src/main/python/UpsertBizcardToGraphDB"), environment={ 'REGION_NAME': core.Aws.REGION, 'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_endpoint, 'NEPTUNE_PORT': bizcard_graph_db.attr_port }, timeout=core.Duration.minutes(5), layers=[gremlinpython_lib_layer], security_groups=[sg_use_bizcard_graph_db], vpc=vpc) upsert_to_neptune_lambda_fn.add_event_source(text_kinesis_event_source) log_group = aws_logs.LogGroup( self, "UpsertBizcardToGraphDBLogGroup", log_group_name="/aws/lambda/UpsertBizcardToNeptune", retention=aws_logs.RetentionDays.THREE_DAYS) log_group.grant_write(upsert_to_neptune_lambda_fn) sg_use_bizcard_neptune_cache = aws_ec2.SecurityGroup( self, "BizcardNeptuneCacheClientSG", vpc=vpc, allow_all_outbound=True, description= 'security group for octember bizcard recommendation query cache client', security_group_name='use-octember-bizcard-neptune-cache') core.Tags.of(sg_use_bizcard_neptune_cache).add( 'Name', 'use-octember-bizcard-es-cache') sg_bizcard_neptune_cache = aws_ec2.SecurityGroup( self, "BizcardNeptuneCacheSG", vpc=vpc, allow_all_outbound=True, description= 'security group for octember bizcard recommendation query cache', security_group_name='octember-bizcard-neptune-cache') core.Tags.of(sg_bizcard_neptune_cache).add( 'Name', 'octember-bizcard-neptune-cache') sg_bizcard_neptune_cache.add_ingress_rule( peer=sg_use_bizcard_neptune_cache, connection=aws_ec2.Port.tcp(6379), description='use-octember-bizcard-neptune-cache') recomm_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup( self, "RecommQueryCacheSubnetGroup", description="subnet group for octember-bizcard-neptune-cache", subnet_ids=vpc.select_subnets( subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids, cache_subnet_group_name='octember-bizcard-neptune-cache') recomm_query_cache = aws_elasticache.CfnCacheCluster( self, "BizcardRecommQueryCache", cache_node_type="cache.t3.small", num_cache_nodes=1, engine="redis", engine_version="5.0.5", auto_minor_version_upgrade=False, cluster_name="octember-bizcard-neptune-cache", snapshot_retention_limit=3, snapshot_window="17:00-19:00", preferred_maintenance_window="mon:19:00-mon:20:30", #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098 #cache_subnet_group_name=recomm_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC cache_subnet_group_name='octember-bizcard-neptune-cache', vpc_security_group_ids=[ sg_bizcard_neptune_cache.security_group_id ]) recomm_query_cache.add_depends_on(recomm_query_cache_subnet_group) bizcard_recomm_lambda_fn = _lambda.Function( self, "BizcardRecommender", runtime=_lambda.Runtime.PYTHON_3_7, function_name="BizcardRecommender", handler="neptune_recommend_bizcard.lambda_handler", description="This service serves PYMK(People You May Know).", code=_lambda.Code.asset("./src/main/python/RecommendBizcard"), environment={ 'REGION_NAME': core.Aws.REGION, 'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_read_endpoint, 'NEPTUNE_PORT': bizcard_graph_db.attr_port, 'ELASTICACHE_HOST': recomm_query_cache.attr_redis_endpoint_address }, timeout=core.Duration.minutes(1), layers=[gremlinpython_lib_layer, redis_lib_layer], security_groups=[ sg_use_bizcard_graph_db, sg_use_bizcard_neptune_cache ], vpc=vpc) #XXX: create API Gateway + LambdaProxy recomm_api = apigw.LambdaRestApi( self, "BizcardRecommendAPI", handler=bizcard_recomm_lambda_fn, proxy=False, rest_api_name="BizcardRecommend", description="This service serves PYMK(People You May Know).", endpoint_types=[apigw.EndpointType.REGIONAL], deploy=True, deploy_options=apigw.StageOptions(stage_name="v1")) bizcard_recomm = recomm_api.root.add_resource('pymk') bizcard_recomm.add_method( "GET", method_responses=[ apigw.MethodResponse( status_code="200", response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ]) sagemaker_notebook_role_policy_doc = aws_iam.PolicyDocument() sagemaker_notebook_role_policy_doc.add_statements( aws_iam.PolicyStatement( **{ "effect": aws_iam.Effect.ALLOW, "resources": [ "arn:aws:s3:::aws-neptune-notebook", "arn:aws:s3:::aws-neptune-notebook/*" ], "actions": ["s3:GetObject", "s3:ListBucket"] })) sagemaker_notebook_role_policy_doc.add_statements( aws_iam.PolicyStatement( **{ "effect": aws_iam.Effect.ALLOW, "resources": [ "arn:aws:neptune-db:{region}:{account}:{cluster_id}/*". format(region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID, cluster_id=bizcard_graph_db. attr_cluster_resource_id) ], "actions": ["neptune-db:connect"] })) sagemaker_notebook_role = aws_iam.Role( self, 'SageMakerNotebookForNeptuneWorkbenchRole', role_name='AWSNeptuneNotebookRole-OctemberBizcard', assumed_by=aws_iam.ServicePrincipal('sagemaker.amazonaws.com'), #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221 inline_policies={ 'AWSNeptuneNotebook': sagemaker_notebook_role_policy_doc }) neptune_wb_lifecycle_content = '''#!/bin/bash sudo -u ec2-user -i <<'EOF' echo "export GRAPH_NOTEBOOK_AUTH_MODE=DEFAULT" >> ~/.bashrc echo "export GRAPH_NOTEBOOK_HOST={NeptuneClusterEndpoint}" >> ~/.bashrc echo "export GRAPH_NOTEBOOK_PORT={NeptuneClusterPort}" >> ~/.bashrc echo "export NEPTUNE_LOAD_FROM_S3_ROLE_ARN=''" >> ~/.bashrc echo "export AWS_REGION={AWS_Region}" >> ~/.bashrc aws s3 cp s3://aws-neptune-notebook/graph_notebook.tar.gz /tmp/graph_notebook.tar.gz rm -rf /tmp/graph_notebook tar -zxvf /tmp/graph_notebook.tar.gz -C /tmp /tmp/graph_notebook/install.sh EOF '''.format(NeptuneClusterEndpoint=bizcard_graph_db.attr_endpoint, NeptuneClusterPort=bizcard_graph_db.attr_port, AWS_Region=core.Aws.REGION) neptune_wb_lifecycle_config_prop = aws_sagemaker.CfnNotebookInstanceLifecycleConfig.NotebookInstanceLifecycleHookProperty( content=core.Fn.base64(neptune_wb_lifecycle_content)) neptune_wb_lifecycle_config = aws_sagemaker.CfnNotebookInstanceLifecycleConfig( self, 'NpetuneWorkbenchLifeCycleConfig', notebook_instance_lifecycle_config_name= 'AWSNeptuneWorkbenchOctemberBizcardLCConfig', on_start=[neptune_wb_lifecycle_config_prop]) neptune_workbench = aws_sagemaker.CfnNotebookInstance( self, 'NeptuneWorkbench', instance_type='ml.t2.medium', role_arn=sagemaker_notebook_role.role_arn, lifecycle_config_name=neptune_wb_lifecycle_config. notebook_instance_lifecycle_config_name, notebook_instance_name='OctemberBizcard-NeptuneWorkbench', root_access='Disabled', security_group_ids=[sg_use_bizcard_graph_db.security_group_name], subnet_id=bizcard_graph_db_subnet_group.subnet_ids[0])
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Image Bucket image_bucket = s3.Bucket(self, IMG_BUCKET_NAME, removal_policy=cdk.RemovalPolicy.DESTROY) cdk.CfnOutput(self, "imageBucket", value=image_bucket.bucket_name) image_bucket.add_cors_rule( allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT], allowed_origins=["*"], allowed_headers=["*"], max_age=3000, ) # Thumbnail Bucket resized_image_bucket = s3.Bucket( self, RESIZED_IMG_BUCKET_NAME, removal_policy=cdk.RemovalPolicy.DESTROY) cdk.CfnOutput(self, "resizedBucket", value=resized_image_bucket.bucket_name) resized_image_bucket.add_cors_rule( allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT], allowed_origins=["*"], allowed_headers=["*"], max_age=3000, ) # S3 Static bucket for website code web_bucket = s3.Bucket( self, WEBSITE_BUCKET_NAME, website_index_document="index.html", website_error_document="index.html", removal_policy=cdk.RemovalPolicy.DESTROY, # uncomment this and delete the policy statement below to allow public access to our # static website # public_read_access=true ) web_policy_statement = iam.PolicyStatement( actions=["s3:GetObject"], resources=[web_bucket.arn_for_objects("*")], principals=[iam.AnyPrincipal()], conditions={"IpAddress": { "aws:SourceIp": ["139.138.203.36"] }}, ) web_bucket.add_to_resource_policy(web_policy_statement) cdk.CfnOutput(self, "bucketURL", value=web_bucket.bucket_website_domain_name) # Deploy site contents to S3 Bucket s3_dep.BucketDeployment( self, "DeployWebsite", sources=[s3_dep.Source.asset("./public")], destination_bucket=web_bucket, ) # DynamoDB to store image labels partition_key = dynamodb.Attribute(name="image", type=dynamodb.AttributeType.STRING) table = dynamodb.Table( self, "ImageLabels", partition_key=partition_key, removal_policy=cdk.RemovalPolicy.DESTROY, ) cdk.CfnOutput(self, "ddbTable", value=table.table_name) # Lambda layer for Pillow library layer = lb.LayerVersion( self, "pil", code=lb.Code.from_asset("reklayer"), compatible_runtimes=[lb.Runtime.PYTHON_3_7], license="Apache-2.0", description= "A layer to enable the PIL library in our Rekognition Lambda", ) # Lambda function rek_fn = lb.Function( self, "rekognitionFunction", code=lb.Code.from_asset("rekognitionFunction"), runtime=lb.Runtime.PYTHON_3_7, handler="index.handler", timeout=cdk.Duration.seconds(30), memory_size=1024, layers=[layer], environment={ "TABLE": table.table_name, "BUCKET": image_bucket.bucket_name, "THUMBBUCKET": resized_image_bucket.bucket_name, }, ) image_bucket.grant_read(rek_fn) resized_image_bucket.grant_write(rek_fn) table.grant_write_data(rek_fn) rek_fn.add_to_role_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["rekognition:DetectLabels"], resources=["*"])) # Lambda for Synchronous front end serviceFn = lb.Function( self, "serviceFunction", code=lb.Code.from_asset("servicelambda"), runtime=lb.Runtime.PYTHON_3_7, handler="index.handler", environment={ "TABLE": table.table_name, "BUCKET": image_bucket.bucket_name, "RESIZEDBUCKET": resized_image_bucket.bucket_name, }, ) image_bucket.grant_write(serviceFn) resized_image_bucket.grant_write(serviceFn) table.grant_read_write_data(serviceFn) # Cognito User Pool Auth auto_verified_attrs = cognito.AutoVerifiedAttrs(email=True) sign_in_aliases = cognito.SignInAliases(email=True, username=True) user_pool = cognito.UserPool( self, "UserPool", self_sign_up_enabled=True, auto_verify=auto_verified_attrs, sign_in_aliases=sign_in_aliases, ) user_pool_client = cognito.UserPoolClient(self, "UserPoolClient", user_pool=user_pool, generate_secret=False) identity_pool = cognito.CfnIdentityPool( self, "ImageRekognitionIdentityPool", allow_unauthenticated_identities=False, cognito_identity_providers=[{ "clientId": user_pool_client.user_pool_client_id, "providerName": user_pool.user_pool_provider_name, }], ) # API Gateway cors_options = apigw.CorsOptions(allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=apigw.Cors.ALL_METHODS) api = apigw.LambdaRestApi( self, "imageAPI", default_cors_preflight_options=cors_options, handler=serviceFn, proxy=False, ) auth = apigw.CfnAuthorizer( self, "ApiGatewayAuthorizer", name="customer-authorizer", identity_source="method.request.header.Authorization", provider_arns=[user_pool.user_pool_arn], rest_api_id=api.rest_api_id, # type=apigw.AuthorizationType.COGNITO, type="COGNITO_USER_POOLS", ) assumed_by = iam.FederatedPrincipal( "cognito-identity.amazon.com", conditions={ "StringEquals": { "cognito-identity.amazonaws.com:aud": identity_pool.ref }, "ForAnyValue:StringLike": { "cognito-identity.amazonaws.com:amr": "authenticated" }, }, assume_role_action="sts:AssumeRoleWithWebIdentity", ) authenticated_role = iam.Role( self, "ImageRekognitionAuthenticatedRole", assumed_by=assumed_by, ) # IAM policy granting users permission to get and put their pictures policy_statement = iam.PolicyStatement( actions=["s3:GetObject", "s3:PutObject"], effect=iam.Effect.ALLOW, resources=[ image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/*", image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/", resized_image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/*", resized_image_bucket.bucket_arn + "/private/${cognito-identity.amazonaws.com:sub}/", ], ) # IAM policy granting users permission to list their pictures list_policy_statement = iam.PolicyStatement( actions=["s3:ListBucket"], effect=iam.Effect.ALLOW, resources=[ image_bucket.bucket_arn, resized_image_bucket.bucket_arn ], conditions={ "StringLike": { "s3:prefix": ["private/${cognito-identity.amazonaws.com:sub}/*"] } }, ) authenticated_role.add_to_policy(policy_statement) authenticated_role.add_to_policy(list_policy_statement) # Attach role to our Identity Pool cognito.CfnIdentityPoolRoleAttachment( self, "IdentityPoolRoleAttachment", identity_pool_id=identity_pool.ref, roles={"authenticated": authenticated_role.role_arn}, ) # Get some outputs from cognito cdk.CfnOutput(self, "UserPoolId", value=user_pool.user_pool_id) cdk.CfnOutput(self, "AppClientId", value=user_pool_client.user_pool_client_id) cdk.CfnOutput(self, "IdentityPoolId", value=identity_pool.ref) # New Amazon API Gateway with AWS Lambda Integration success_response = apigw.IntegrationResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Origin": "'*'" }, ) error_response = apigw.IntegrationResponse( selection_pattern="(\n|.)+", status_code="500", response_parameters={ "method.response.header.Access-Control-Allow-Origin": "'*'" }, ) request_template = json.dumps({ "action": "$util.escapeJavaScript($input.params('action'))", "key": "$util.escapeJavaScript($input.params('key'))", }) lambda_integration = apigw.LambdaIntegration( serviceFn, proxy=False, request_parameters={ "integration.request.querystring.action": "method.request.querystring.action", "integration.request.querystring.key": "method.request.querystring.key", }, request_templates={"application/json": request_template}, passthrough_behavior=apigw.PassthroughBehavior.WHEN_NO_TEMPLATES, integration_responses=[success_response, error_response], ) imageAPI = api.root.add_resource("images") success_resp = apigw.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Origin": True }, ) error_resp = apigw.MethodResponse( status_code="500", response_parameters={ "method.response.header.Access-Control-Allow-Origin": True }, ) # GET /images get_method = imageAPI.add_method( "GET", lambda_integration, authorization_type=apigw.AuthorizationType.COGNITO, request_parameters={ "method.request.querystring.action": True, "method.request.querystring.key": True, }, method_responses=[success_resp, error_resp], ) # DELETE /images delete_method = imageAPI.add_method( "DELETE", lambda_integration, authorization_type=apigw.AuthorizationType.COGNITO, request_parameters={ "method.request.querystring.action": True, "method.request.querystring.key": True, }, method_responses=[success_resp, error_resp], ) # Override the authorizer id because it doesn't work when defininting it as a param # in add_method get_method_resource = get_method.node.find_child("Resource") get_method_resource.add_property_override("AuthorizerId", auth.ref) delete_method_resource = delete_method.node.find_child("Resource") delete_method_resource.add_property_override("AuthorizerId", auth.ref) # Building SQS queue and DeadLetter Queue dl_queue = sqs.Queue( self, "ImageDLQueue", queue_name="ImageDLQueue", ) dl_queue_opts = sqs.DeadLetterQueue(max_receive_count=2, queue=dl_queue) queue = sqs.Queue( self, "ImageQueue", queue_name="ImageQueue", visibility_timeout=cdk.Duration.seconds(30), receive_message_wait_time=cdk.Duration.seconds(20), dead_letter_queue=dl_queue_opts, ) # S3 Bucket Create Notification to SQS # Whenever an image is uploaded add it to the queue image_bucket.add_object_created_notification( s3n.SqsDestination(queue), s3.NotificationKeyFilter(prefix="private/"))
def __init__(self, scope: core.Construct, id: str, wiki_api_endpoint, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create API Gateway): api_01 = _apigw.RestApi(self, 'apiEndpoint', rest_api_name='mystique-wiki-api', deploy_options=_apigw.StageOptions( stage_name="myst", data_trace_enabled=True, tracing_enabled=True)) v1 = api_01.root.add_resource("v1") # Add resource for HTTP Endpoint: API Hosted on EC2 self.wiki_url_path_00 = v1.add_resource('wiki_url') wiki_url_path_01 = self.wiki_url_path_00.add_resource('{needle}') # Create the API Gateway Integration Responses list_objects_responses = [ _apigw.IntegrationResponse( status_code="200", response_parameters={ 'method.response.header.Timestamp': 'integration.response.header.Date', 'method.response.header.Content-Length': 'integration.response.header.Content-Length', 'method.response.header.Content-Type': 'integration.response.header.Content-Type' }) ] # Create the API Gateway Integration Request Path mapping wiki_url_integration_options = _apigw.IntegrationOptions( integration_responses=list_objects_responses, request_parameters={ "integration.request.path.needle": "method.request.path.needle" }) wiki_url_integration = _apigw.HttpIntegration( url=f'http://{wiki_api_endpoint}/api/{{needle}}', http_method='GET', options=wiki_url_integration_options, proxy=False, ) wiki_url_method = wiki_url_path_01.add_method( "GET", wiki_url_integration, request_parameters={ 'method.request.header.Content-Type': False, 'method.request.path.needle': True }, method_responses=[ _apigw.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Timestamp': False, 'method.response.header.Content-Length': False, 'method.response.header.Content-Type': False }, response_models={'application/json': _apigw.EmptyModel()}) ]) ########################################### ################# OUTPUTS ################# ########################################### output_0 = core.CfnOutput( self, "AutomationFrom", value=f"{global_args.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output_1 = core.CfnOutput( self, "GetWikiUrl", value=f"{self.wiki_url_path_00.url}/search term", description=f"Get Wiki Url for given topic using API Gateway") """
def add_post_method( self, api: aws_apigateway.RestApi, resource: aws_apigateway.Resource, table: aws_dynamodb.Table, ) -> aws_apigateway.Method: create_announcement_lambda = create_function( stack=self, id="CreateAnnouncementLambda", settings={ "handler": "create_announcement.main", "runtime": aws_lambda.Runtime.PYTHON_3_8, "timeout": core.Duration.minutes( self.settings.AWS_LAMBDA_CREATE_ANNOUNCEMENT_TIMEOUT), "retry_attempts": self.settings.AWS_LAMBDA_CREATE_ANNOUNCEMENT_RETRY_ATTEMPTS, }, ) create_announcement_lambda.add_environment( "TABLE_NAME", table.table_name, ) table.grant_read_write_data(create_announcement_lambda) create_announcement_request_validator = aws_apigateway.RequestValidator( self, "CreateAnnouncementRequestValidator", rest_api=api, validate_request_body=True, request_validator_name="Create Announcement Request Validator", ) create_announcement_request_model = aws_apigateway.Model( self, "CreateAnnouncementRequestModel", model_name="CreateAnnouncementRequest", rest_api=api, schema=aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.OBJECT, required=["Item"], properties={ "Item": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.OBJECT, required=["title", "date", "description"], properties={ "title": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.STRING), "description": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.STRING, ), "date": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.STRING, min_length=1, format="date", pattern= "^\d{4}-([0]\d|1[0-2])-([0-2]\d|3[01])$", ), }, ) }, ), ) create_announcement_response_success_model = aws_apigateway.Model( self, "CreateAnnouncementResponseSuccess", model_name="CreateAnnouncementResponseSuccess", rest_api=api, schema=aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.OBJECT, required=["id"], properties={ "id": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.STRING) }, ), ) create_announcement_response_error_model = aws_apigateway.Model( self, "CreateAnnouncementResponseError", model_name="CreateAnnouncementResponseError", rest_api=api, schema=aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.OBJECT, required=["error"], properties={ "error": aws_apigateway.JsonSchema( type=aws_apigateway.JsonSchemaType.STRING) }, ), ) create_announcement_method = resource.add_method( "POST", integration=aws_apigateway.LambdaIntegration( create_announcement_lambda, proxy=True, integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Origin": "'*'" }, ), aws_apigateway.IntegrationResponse( status_code="404", response_parameters={ "method.response.header.Access-Control-Allow-Origin": "'*'" }, ), ], passthrough_behavior=aws_apigateway.PassthroughBehavior.NEVER, ), request_validator=create_announcement_request_validator, request_models={ "application/json": create_announcement_request_model }, method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_models={ "application/json": create_announcement_response_success_model }, response_parameters={ "method.response.header.Access-Control-Allow-Origin": True }, ), aws_apigateway.MethodResponse( status_code="404", response_models={ "application/json": create_announcement_response_error_model }, response_parameters={ "method.response.header.Access-Control-Allow-Origin": True }, ), ], ) self.methods_to_deploy.append(create_announcement_method) return create_announcement_method
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) lambda_repository = aws_codecommit.Repository( self, "QuestionsLambdaRepository", repository_name="MythicalMysfits-QuestionsLambdaRepository", ) core.CfnOutput( self, "questionsRepositoryCloneUrlHTTP", value=lambda_repository.repository_clone_url_http, description="Questions Lambda Repository Clone URL HTTP", ) core.CfnOutput( self, "questionsRepositoryCloneUrlSSH", value=lambda_repository.repository_clone_url_ssh, description="Questions Lambda Repository Clone URL SSH", ) table = aws_dynamodb.Table( self, "Table", table_name="MysfitsQuestionsTable", partition_key=aws_dynamodb.Attribute( name="QuestionId", type=aws_dynamodb.AttributeType.STRING), stream=aws_dynamodb.StreamViewType.NEW_IMAGE, ) lambda_function_policy_statement_ddb = aws_iam.PolicyStatement() lambda_function_policy_statement_ddb.add_actions("dynamodb:PutItem") lambda_function_policy_statement_ddb.add_resources(table.table_arn) lambda_function_policy_statement_xray = aws_iam.PolicyStatement() lambda_function_policy_statement_xray.add_actions( "xray:PutTraceSegments", "xray:PutTelemetryRecords", "xray:GetSamplingRules", "xray:GetSamplingTargets", "xray:GetSamplingStatisticSummaries", ) lambda_function_policy_statement_xray.add_all_resources() mysfits_post_question = aws_lambda.Function( self, "PostQuestionFunction", handler="mysfitsPostQuestion.postQuestion", runtime=aws_lambda.Runtime.PYTHON_3_6, description= "A microservice Lambda function that receives a new question submitted to the MythicalMysfits website from a user and inserts it into a DynamoDB database table.", memory_size=128, code=aws_lambda.Code.asset( os.path.join("..", "..", "lambda-questions", "PostQuestionsService")), timeout=core.Duration.seconds(30), initial_policy=[ lambda_function_policy_statement_ddb, lambda_function_policy_statement_xray, ], tracing=aws_lambda.Tracing.ACTIVE, ) topic = aws_sns.Topic( self, "Topic", display_name="MythicalMysfitsQuestionsTopic", topic_name="MythicalMysfitsQuestionsTopic", ) topic.add_subscription(subs.EmailSubscription(os.environ["SNS_EMAIL"])) post_question_lamdaa_function_policy_statement_sns = aws_iam.PolicyStatement( ) post_question_lamdaa_function_policy_statement_sns.add_actions( "sns:Publish") post_question_lamdaa_function_policy_statement_sns.add_resources( topic.topic_arn) mysfits_process_question_stream = aws_lambda.Function( self, "ProcessQuestionStreamFunction", handler="mysfitsProcessStream.processStream", runtime=aws_lambda.Runtime.PYTHON_3_6, description= "An AWS Lambda function that will process all new questions posted to mythical mysfits and notify the site administrator of the question that was asked.", memory_size=128, code=aws_lambda.Code.asset( os.path.join("..", "..", "lambda-questions", "ProcessQuestionsStream")), timeout=core.Duration.seconds(30), initial_policy=[ post_question_lamdaa_function_policy_statement_sns, lambda_function_policy_statement_xray, ], tracing=aws_lambda.Tracing.ACTIVE, environment={"SNS_TOPIC_ARN": topic.topic_arn}, events=[ event.DynamoEventSource( table, starting_position=aws_lambda.StartingPosition.TRIM_HORIZON, batch_size=1, ) ], ) questions_api_role = aws_iam.Role( self, "QuestionsApiRole", assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"), ) api_policy = aws_iam.PolicyStatement() api_policy.add_actions("lambda:InvokeFunction") api_policy.add_resources(mysfits_post_question.function_arn) aws_iam.Policy( self, "QuestionsApiPolicy", policy_name="questions_api_policy", statements=[api_policy], roles=[questions_api_role], ) questions_integration = aws_apigateway.LambdaIntegration( mysfits_post_question, credentials_role=questions_api_role, integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_templates={ "application/json": '{"status": "OK"}' }, ) ], ) api = aws_apigateway.LambdaRestApi( self, "APIEndpoint", handler=mysfits_post_question, options=aws_apigateway.RestApiProps( rest_api_name="Questions API Server"), proxy=False, ) questions_method = api.root.add_resource("questions") questions_method.add_method( "POST", questions_integration, method_responses=[ aws_apigateway.MethodResponse(status_code="200") ], authorization_type=aws_apigateway.AuthorizationType.NONE, ) questions_method.add_method( "OPTIONS", aws_apigateway.MockIntegration( integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'", "method.response.header.Access-Control-Allow-Origin": "'*'", "method.response.header.Access-Control-Allow-Credentials": "'false'", "method.response.header.Access-Control-Allow-Methods": "'OPTIONS,GET,PUT,POST,DELETE'", }, ) ], passthrough_behavior=aws_apigateway.PassthroughBehavior.NEVER, request_templates={"application/json": '{"statusCode": 200}'}, ), method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Credentials": True, "method.response.header.Access-Control-Allow-Origin": True, }, ) ], )
def __init__(self, scope: core.Construct, id: str, back_end_api_name: str, stack_log_level: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create Lambda Destination Queue async_dest_queue = _sqs.Queue( self, "Queue", queue_name="async_get_square_fn_dest_queue") # Create Serverless Event Processor using Lambda): # Read Lambda Code): try: with open( "serverless_async_lambda_api/stacks/back_end/lambda_src/get_square.py", mode="r") as f: get_square_fn_code = f.read() except OSError as e: print("Unable to read Lambda Function Code") raise e get_square_fn = _lambda.Function( self, "getSquareFn", function_name="get_square_fn", runtime=_lambda.Runtime.PYTHON_3_7, handler="index.lambda_handler", code=_lambda.InlineCode(get_square_fn_code), timeout=core.Duration.seconds(15), reserved_concurrent_executions=1, on_success=_lambda_dest.SqsDestination(async_dest_queue), on_failure=_lambda_dest.SqsDestination(async_dest_queue), environment={ "LOG_LEVEL": f"{stack_log_level}", "Environment": "Production", "ANDON_CORD_PULLED": "False" }) get_square_fn_version = get_square_fn.latest_version get_square_fn_version_alias = _lambda.Alias( self, "greeterFnAlias", alias_name="MystiqueAutomation", version=get_square_fn_version) # Add Permissions to lambda to write messags to queue async_dest_queue.grant_send_messages(get_square_fn) # Create Custom Loggroup # /aws/lambda/function-name get_square_fn_fn_lg = _logs.LogGroup( self, "squareFnLoggroup", log_group_name=f"/aws/lambda/{get_square_fn.function_name}", retention=_logs.RetentionDays.ONE_WEEK, removal_policy=core.RemovalPolicy.DESTROY) # Add API GW front end for the Lambda back_end_01_api_stage_options = _apigw.StageOptions( stage_name="miztiik", throttling_rate_limit=10, throttling_burst_limit=100, logging_level=_apigw.MethodLoggingLevel.INFO) # Create API Gateway api_01 = _apigw.RestApi(self, "backEnd01Api", rest_api_name=f"{back_end_api_name}", deploy_options=back_end_01_api_stage_options, endpoint_types=[_apigw.EndpointType.REGIONAL]) # InvocationType='RequestResponse' if async_ else 'Event' back_end_01_api_res = api_01.root.add_resource("square") get_square = back_end_01_api_res.add_resource("{number}") # API VTL Template mapping # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-integration-async.html # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html # https://aws.amazon.com/premiumsupport/knowledge-center/custom-headers-api-gateway-lambda/ req_template = f'{{"number": "$input.params("number")"}}' # We are going to loop through the headers. Find the key:value for asynchrony i.e "InvocationType:Event" # If the headers are found, set is_async to true # If not found, return the response from lambda resp_template = """{ "api_stage": "$context.stage", "api_request_id": "$context.requestId", "api_resource_path": "$context.resourcePath", "http_method": "$context.httpMethod", "source_ip": "$context.identity.sourceIp", "user-agent": "$context.identity.userAgent", #set($num_square = $util.escapeJavaScript($!input.json('$.square'))) #foreach($param in $input.params().header.keySet()) #if($param == "invocationtype" or $param == "InvocationType" && $util.escapeJavaScript($input.params().header.get($param)) == "Event") #set($is_async = "true") #end #end #if($is_async == "true") "asynchronous_invocation":"true", "message":"Event received. Check queue/logs for status" #else "synchronous_invocation":"true", "square_of_your_number_is":$!{num_square} #end } """ get_square_method_get = get_square.add_method( http_method="GET", request_parameters={ "method.request.header.InvocationType": True, "method.request.path.number": True }, integration=_apigw.LambdaIntegration( handler=get_square_fn, proxy=False, request_parameters={ "integration.request.path.number": "method.request.path.number", "integration.request.header.X-Amz-Invocation-Type": "method.request.path.InvocationType", "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'" }, passthrough_behavior=_apigw.PassthroughBehavior. WHEN_NO_TEMPLATES, request_templates={"application/json": f"{req_template}"}, integration_responses=[ _apigw.IntegrationResponse( status_code="200", # selection_pattern="2\d{2}", # Use for mapping Lambda Errors response_parameters={}, response_templates={ "application/json": f"{resp_template}" }) ]), method_responses=[ _apigw.MethodResponse( status_code="200", response_parameters={ "method.response.header.Content-Length": True, }, response_models={"application/json": _apigw.EmptyModel()}) ]) # Outputs output_1 = core.CfnOutput( self, "GetSquareApiUrl", value=f"{get_square.url}", description= "Use a browser to access this url. Change {number} to any value between 1 and 100." )
def __init__(self, scope: core.Construct, id: str, props: KinesisFirehoseStackProps, **kwargs) -> None: super().__init__(scope, id, **kwargs) lambda_repository = aws_codecommit.Repository( self, "ClicksProcessingLambdaRepository", repository_name="MythicalMysfits-ClicksProcessingLambdaRepository", ) core.CfnOutput( self, "kinesisRepositoryCloneUrlHttp", value=lambda_repository.repository_clone_url_http, description="Clicks Processing Lambda Repository Clone URL HTTP", ) core.CfnOutput( self, "kinesisRepositoryCloneUrlSsh", value=lambda_repository.repository_clone_url_ssh, description="Clicks Processing Lambda Repository Clone URL SSH", ) clicks_destination_bucket = aws_s3.Bucket(self, "Bucket", versioned=True) lambda_function_policy = aws_iam.PolicyStatement() lambda_function_policy.add_actions("dynamodb:GetItem") lambda_function_policy.add_resources(props.table.table_arn) mysfits_clicks_processor = aws_lambda.Function( self, "Function", handler="streamProcessor.processRecord", runtime=aws_lambda.Runtime.PYTHON_3_7, description= "An Amazon Kinesis Firehose stream processor that enriches click records to not just include a mysfitId, but also other attributes that can be analyzed later.", memory_size=128, code=aws_lambda.Code.asset("../../lambda-streaming-processor"), timeout=core.Duration.seconds(30), initial_policy=[lambda_function_policy], environment={ # TODO: this seems better than having the user copy/paste it in, but is it the best way? "MYSFITS_API_URL": "https://{}.execute-api.{}.amazonaws.com/prod/".format( props.api_gateway.ref, core.Aws.REGION) }, ) firehose_delivery_role = aws_iam.Role( self, "FirehoseDeliveryRole", role_name="FirehoseDeliveryRole", assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"), external_id=core.Aws.ACCOUNT_ID, ) firehose_delivery_policy_s3_statement = aws_iam.PolicyStatement() firehose_delivery_policy_s3_statement.add_actions( "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject", ) firehose_delivery_policy_s3_statement.add_resources( clicks_destination_bucket.bucket_arn) firehose_delivery_policy_s3_statement.add_resources( clicks_destination_bucket.arn_for_objects("*")) firehose_delivery_policy_lambda_statement = aws_iam.PolicyStatement() firehose_delivery_policy_lambda_statement.add_actions( "lambda:InvokeFunction") firehose_delivery_policy_lambda_statement.add_resources( mysfits_clicks_processor.function_arn) firehose_delivery_role.add_to_policy( firehose_delivery_policy_s3_statement) firehose_delivery_role.add_to_policy( firehose_delivery_policy_lambda_statement) mysfits_firehose_to_s3 = aws_kinesisfirehose.CfnDeliveryStream( self, "DeliveryStream", extended_s3_destination_configuration=aws_kinesisfirehose. CfnDeliveryStream.ExtendedS3DestinationConfigurationProperty( bucket_arn=clicks_destination_bucket.bucket_arn, buffering_hints=aws_kinesisfirehose.CfnDeliveryStream. BufferingHintsProperty(interval_in_seconds=60, size_in_m_bs=50), compression_format="UNCOMPRESSED", prefix="firehose/", role_arn=firehose_delivery_role.role_arn, processing_configuration=aws_kinesisfirehose.CfnDeliveryStream. ProcessingConfigurationProperty( enabled=True, processors=[ aws_kinesisfirehose.CfnDeliveryStream. ProcessorProperty( parameters=[ aws_kinesisfirehose.CfnDeliveryStream. ProcessorParameterProperty( parameter_name="LambdaArn", parameter_value=mysfits_clicks_processor. function_arn, ) ], type="Lambda", ) ], ), ), ) aws_lambda.CfnPermission( self, "Permission", action="lambda:InvokeFunction", function_name=mysfits_clicks_processor.function_arn, principal="firehose.amazonaws.com", source_account=core.Aws.ACCOUNT_ID, source_arn=mysfits_firehose_to_s3.attr_arn, ) click_processing_api_role = aws_iam.Role( self, "ClickProcessingApiRole", assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"), ) api_policy = aws_iam.PolicyStatement() api_policy.add_actions("firehose:PutRecord") api_policy.add_resources(mysfits_firehose_to_s3.attr_arn) aws_iam.Policy( self, "ClickProcessingApiPolicy", policy_name="api_gateway_firehose_proxy_role", statements=[api_policy], roles=[click_processing_api_role], ) api = aws_apigateway.RestApi( self, "APIEndpoint", rest_api_name="ClickProcessing API Service", endpoint_types=[aws_apigateway.EndpointType.REGIONAL], ) clicks = api.root.add_resource("clicks") clicks.add_method( "PUT", aws_apigateway.AwsIntegration( service="firehose", integration_http_method="POST", action="PutRecord", options=aws_apigateway.IntegrationOptions( connection_type=aws_apigateway.ConnectionType.INTERNET, credentials_role=click_processing_api_role, integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_templates={ "application/json": '{"status": "OK"}' }, response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type'", "method.response.header.Access-Control-Allow-Methods": "'OPTIONS,PUT'", "method.response.header.Access-Control-Allow-Origin": "'*'", }, ) ], request_parameters={ "integration.request.header.Content-Type": "'application/x-amz-json-1.1'" }, request_templates={ "application/json": """{ "DeliveryStreamName": "%s", "Record": { "Data": "$util.base64Encode($input.json('$'))" }}""" % mysfits_firehose_to_s3.ref }, ), ), method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Origin": True, }, ) ], ) clicks.add_method( "OPTIONS", aws_apigateway.MockIntegration( integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'", "method.response.header.Access-Control-Allow-Origin": "'*'", "method.response.header.Access-Control-Allow-Credentials": "'false'", "method.response.header.Access-Control-Allow-Methods": "'OPTIONS,GET,PUT,POST,DELETE'", }, ) ], passthrough_behavior=aws_apigateway.PassthroughBehavior.NEVER, request_templates={"application/json": '{"statusCode": 200}'}, ), method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Credentials": True, "method.response.header.Access-Control-Allow-Origin": True, }, ) ], )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # DynamoDB Table # Streaming is enabled to send the whole new object down the pipe table = dynamo_db.Table(self, "TheDynamoStreamer", partition_key=dynamo_db.Attribute( name="message", type=dynamo_db.AttributeType.STRING), stream=dynamo_db.StreamViewType.NEW_IMAGE) # defines an AWS Lambda resource subscriber_lambda = _lambda.Function( self, "DynamoLambdaHandler", runtime=_lambda.Runtime.PYTHON_3_8, handler="lambda.handler", code=_lambda.Code.from_asset("lambda_fns/subscribe")) subscriber_lambda.add_event_source( _event.DynamoEventSource( table=table, starting_position=_lambda.StartingPosition.LATEST)) # API Gateway Creation gateway = api_gw.RestApi( self, 'DynamoStreamerAPI', deploy_options=api_gw.StageOptions( metrics_enabled=True, logging_level=api_gw.MethodLoggingLevel.INFO, data_trace_enabled=True, stage_name='prod')) # Give our gateway permissions to interact with dynamodb api_gw_dynamo_role = iam.Role( self, 'DefaultLambdaHanderRole', assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com')) table.grant_read_write_data(api_gw_dynamo_role) # shortening the lines of later code schema = api_gw.JsonSchema schema_type = api_gw.JsonSchemaType # Because this isn't a proxy integration, we need to define our response model response_model = gateway.add_model( 'ResponseModel', content_type='application/json', model_name='ResponseModel', schema=schema( schema=api_gw.JsonSchemaVersion.DRAFT4, title='pollResponse', type=schema_type.OBJECT, properties={'message': schema(type=schema_type.STRING)})) error_response_model = gateway.add_model( 'ErrorResponseModel', content_type='application/json', model_name='ErrorResponseModel', schema=schema(schema=api_gw.JsonSchemaVersion.DRAFT4, title='errorResponse', type=schema_type.OBJECT, properties={ 'state': schema(type=schema_type.STRING), 'message': schema(type=schema_type.STRING) })) # This is the VTL to transform our incoming JSON to a Dynamo Insert Query request_template = { "TableName": table.table_name, "Item": { "message": { "S": "$input.path('$.message')" } } } request_template_string = json.dumps(request_template, separators=(',', ':')) # This is the VTL to transform the error response error_template = { "state": 'error', "message": "$util.escapeJavaScript($input.path('$.errorMessage'))" } error_template_string = json.dumps(error_template, separators=(',', ':')) # This is how our gateway chooses what response to send based on selection_pattern integration_options = api_gw.IntegrationOptions( credentials_role=api_gw_dynamo_role, request_templates={"application/json": request_template_string}, passthrough_behavior=api_gw.PassthroughBehavior.NEVER, integration_responses=[ api_gw.IntegrationResponse( status_code='200', response_templates={ "application/json": json.dumps({"message": 'item added to db'}) }), api_gw.IntegrationResponse( selection_pattern="^\[BadRequest\].*", status_code='400', response_templates={ "application/json": error_template_string }, response_parameters={ 'method.response.header.Content-Type': "'application/json'", 'method.response.header.Access-Control-Allow-Origin': "'*'", 'method.response.header.Access-Control-Allow-Credentials': "'true'" }) ]) # Add an InsertItem endpoint onto the gateway gateway.root.add_resource('InsertItem') \ .add_method('POST', api_gw.Integration(type=api_gw.IntegrationType.AWS, integration_http_method='POST', uri='arn:aws:apigateway:us-east-1:dynamodb:action/PutItem', options=integration_options ), method_responses=[ api_gw.MethodResponse(status_code='200', response_parameters={ 'method.response.header.Content-Type': True, 'method.response.header.Access-Control-Allow-Origin': True, 'method.response.header.Access-Control-Allow-Credentials': True }, response_models={ 'application/json': response_model }), api_gw.MethodResponse(status_code='400', response_parameters={ 'method.response.header.Content-Type': True, 'method.response.header.Access-Control-Allow-Origin': True, 'method.response.header.Access-Control-Allow-Credentials': True }, response_models={ 'application/json': error_response_model }), ] )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.current_dir = os.path.dirname(__file__) self.bucket = s3.Bucket( self, "qs-migration-bucket", bucket_name=f'quicksight-migration-{core.Aws.ACCOUNT_ID}', block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) self.quicksight_migration_lambda_role = iam.Role( self, 'quicksight-migration-lambda-role', description='Role for the Quicksight dashboard migration Lambdas', role_name='quicksight-migration-lambda-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents' ], resources=[ f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:*' ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["sts:AssumeRole", "iam:ListRoles"], resources=[ "arn:aws:iam::*:role/quicksight-migration-*-assume-role" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:PutObject", "s3:ListBucket"], resources=[ self.bucket.bucket_arn, f"{self.bucket.bucket_arn}/*" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["secrets:GetSecretValue"], resources=[ f"arn:aws:secretsmanager:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:secret:*" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:Create*", "quicksight:Delete*", "quicksight:Describe*", "quicksight:List*", "quicksight:Search*", "quicksight:Update*" ], resources=["*"]) ]) }) self.quicksight_migration_target_assume_role = iam.Role( self, 'quicksight-migration-target-assume-role', description= 'Role for the Quicksight dashboard migration Lambdas to assume', role_name='quicksight-migration-target-assume-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:Create*", "quicksight:Delete*", "quicksight:Describe*", "quicksight:List*", "quicksight:Search*", "quicksight:Update*" ], resources=["*"]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:GetParameter", ], resources=["arn:aws:ssm:*:*:parameter/infra/config"]) ]) }) self.quicksight_migration_target_assume_role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['sts:AssumeRole'], principals=[iam.AccountPrincipal(core.Aws.ACCOUNT_ID)])) # API Gateway to SQS self.rest_api_role = iam.Role( self, "RestAPIRole", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess") ]) self.queue = sqs.Queue(self, "quicksight-migration-sqs-queue", queue_name="quicksight-migration-sqs", visibility_timeout=core.Duration.minutes(15)) self.integration_response = apigw.IntegrationResponse( status_code="200", response_templates={"application/json": ""}, response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'", "method.response.header.Access-Control-Allow-Origin": "'*'", "method.response.header.Access-Control-Allow-Methods": "'POST,OPTIONS'" }) self.api_integration_options = apigw.IntegrationOptions( credentials_role=self.rest_api_role, integration_responses=[self.integration_response], request_templates={ "application/json": 'Action=SendMessage&MessageBody=$util.urlEncode("$input.body")' }, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'" }) self.api_resource_sqs_integration = apigw.AwsIntegration( service="sqs", integration_http_method="POST", path="{}/{}".format(core.Aws.ACCOUNT_ID, self.queue.queue_name), options=self.api_integration_options) self.base_api = apigw.RestApi( self, 'quicksight-migration-sqs', rest_api_name='quicksight-migration-sqs', default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=["POST", "OPTIONS"], allow_headers=[ 'Access-Control-Allow-Origin', 'Access-Control-Allow-Headers', 'Content-Type' ])) self.base_api.root.add_method( "POST", self.api_resource_sqs_integration, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Headers': True, 'method.response.header.Access-Control-Allow-Methods': True, 'method.response.header.Access-Control-Allow-Origin': True } }]) self.quicksight_migration_lambda = _lambda.Function( self, 'quicksight-migration-lambda', handler='quicksight_migration.lambda_function.lambda_handler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( os.path.join(self.current_dir, '../lambda/quicksight_migration/')), function_name='quicksight_migration_lambda', role=self.quicksight_migration_lambda_role, timeout=core.Duration.minutes(15), memory_size=1024, environment={ 'BUCKET_NAME': self.bucket.bucket_name, 'S3_KEY': 'None', 'INFRA_CONFIG_PARAM': '/infra/config', 'SQS_URL': self.queue.queue_url }) self.sqs_event_source = event_sources.SqsEventSource(self.queue) self.quicksight_migration_lambda.add_event_source( self.sqs_event_source) core.CfnOutput(self, "MigrationAPIGatewayURL", value=self.base_api.url, description="Migration API GW URL")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) api_name='voncv' cert_arn='arn:aws:acm:us-east-1:921279086507:certificate/1cdc5fa6-0978-4c3c-96fa-ced4188b4fd0' # Example automatically generated. See https://github.com/aws/jsii/issues/826 cert = acm.Certificate.from_certificate_arn(self,"cert",cert_arn) domain_name='voncv.sema4.com' domain_obj=api.DomainNameOptions(certificate=cert, domain_name=domain_name) #domain_name_obj=api.DefaultDomainMappingOptions( domain_name=domain_obj1, mapping_key=None) integration_uri='http://voncwesprod.sema4.com:3000' #classaws_cdk.aws_apigateway.HttpIntegration(url, *, http_method=None, options=None, proxy=None) #base_api=api.HttpApi(self, 'rootapi', api_name=api_name, cors_preflight=None, create_default_stage=None, default_domain_mapping=domain_name_obj) # , default_integration=None) base_api = api.RestApi(self, "RestApi",rest_api_name=api_name,domain_name=domain_obj, deploy=False ) integration_response_200=api.IntegrationResponse(status_code='200') integration_response_400=api.IntegrationResponse(status_code='400',selection_pattern='400') integration_response_401=api.IntegrationResponse(status_code='401',selection_pattern='401') integration_response_404=api.IntegrationResponse(status_code='400',selection_pattern='404') integration_response_405=api.IntegrationResponse(status_code='401',selection_pattern='405') integration_response_500=api.IntegrationResponse(status_code='401',selection_pattern='500') integration_response_all=[integration_response_200, integration_response_400, integration_response_401, integration_response_404, integration_response_405, integration_response_500 ] integration_options=api.IntegrationOptions(integration_responses=integration_response_all) #method_reponse=api.IntegrationOptions(integration_responses=integration_response_all) mr=[ api.MethodResponse(status_code='200'), api.MethodResponse(status_code='400'), api.MethodResponse(status_code='401'), api.MethodResponse(status_code='404'), api.MethodResponse(status_code='405'), api.MethodResponse(status_code='500') ] integration=api.Integration(type=api.IntegrationType.HTTP,integration_http_method='POST',uri=integration_uri,options=integration_options) control = base_api.root.add_resource('control') control_get=control.add_method('POST',integration) control_controlid=control.add_resource('{controlid}') control_controlid_post=control_controlid.add_method('POST',integration) control_controlid_sample=control_controlid.add_resource('sample') control_controlid_sample_sampleid=control_controlid_sample.add_resource('sampleid') control_controlid_sample_sampleid_get=control_controlid_sample_sampleid.add_method('GET',integration,method_responses=mr) control_controlid_sample_sampleid_pipelineoutput=control_controlid_sample_sampleid.add_resource('pipeline_output') control_controlid_sample_sampleid_pipelineoutput_post=control_controlid_sample_sampleid_pipelineoutput.add_method('POST',integration) getalllpatients = base_api.root.add_resource('getallpatients') getalllpatients_get=getalllpatients.add_method('GET',integration) patient = base_api.root.add_resource('patient') patient_post = patient.add_method('GET',integration) patient_get = patient.add_method('POST',integration) patientid=patient.add_resource('patientid') patient_put=patientid.add_method('PUT',integration) patient_get=patientid.add_method('GET',integration) patient_case=patientid.add_resource('case') patient_case_post=patient_case.add_method('POST',integration) patient_case_caseid=patient_case.add_resource('caseid') patient_case_caseid_get= patient_case_caseid.add_method('GET',integration) patient_case_caseid_put= patient_case_caseid.add_method('PUT',integration) patient_case_caseid_close=patient_case_caseid.add_resource('close') patient_case_caseid_close_post=patient_case_caseid_close.add_method('PUT',integration) patient_case_caseid_curate=patient_case_caseid.add_resource('curate') patient_case_caseid_curate_post=patient_case_caseid_curate.add_method('POST',integration) patient_case_caseid_fail=patient_case_caseid.add_resource('fail') patient_case_caseid_fail_post=patient_case_caseid_fail.add_method('POST',integration) patient_case_caseid_reopen=patient_case_caseid.add_resource('reopen') patient_case_caseid_reopen_post=patient_case_caseid_reopen.add_method('POST',integration) patient_case_caseid_sample=patient_case_caseid.add_resource('sample') patient_case_caseid_sample_sampleid=patient_case_caseid_sample.add_resource('sampleid') patient_case_caseid_sample_sampleid_get=patient_case_caseid_sample_sampleid.add_method('GET',integration) patient_case_caseid_sample_sampleid_fail=patient_case_caseid_sample_sampleid.add_resource('fail') patient_case_caseid_sample_sampleid_fail_post=patient_case_caseid_sample_sampleid_fail.add_method('POST',integration) patient_case_caseid_sample_sampleid_pipelineoutput=patient_case_caseid_sample_sampleid.add_resource('pipeline-output') patient_case_caseid_sample_sampleid_pipelineoutput_post=patient_case_caseid_sample_sampleid_pipelineoutput.add_method('POST',integration) patient_case_caseid_variant=patient_case_caseid.add_resource('variant') patient_case_caseid_variant_post=patient_case_caseid.add_method('POST',integration)