def __configure_gateway(self) -> None: self.gateway = a.RestApi(self, 'PortfolioMgmt') # Create kinesis integration integration_role = iam.Role( self, 'KinesisIntegrationRole', assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com')) self.updates_stream.grant_write(integration_role) updates = self.gateway.root.add_resource('updates') updates.add_method(http_method='POST', authorization_type=a.AuthorizationType.IAM, integration=a.AwsIntegration( service='kinesis', action='PutRecord', subdomain=self.updates_stream.stream_name, options=a.IntegrationOptions( credentials_role=integration_role))) pmapi = self.gateway.root.add_resource('pmapi') pmapi.add_proxy( any_method=True, default_integration=a.LambdaIntegration(handler=PythonLambda( self, 'PortfolioMgmtAPI', build_prefix='artifacts/FinSurf-PortfolioMgmt-API', handler='handler.app', subnet_group_name='PortfolioMgmt', context=self.context, securityGroups=[self.security_group]).function))
def create_and_integrate_apigw(self, queue: sqs.Queue, dashboard_name_prefix: str) -> str: """Creates API Gateway and integrates with SQS queue :param queue: the SQS queue to integrate with :type queue: aws_cdk.aws_sqs.Queue :param dashboard_name_prefix: the dashboard name to use as the API Gateway resource name :type dashboard_name_prefix: str :returns: the url that the webhooks will post to :rtype: str """ webhook_apigw_role = iam.Role( self, 'WebhookAPIRole', role_name='WebhookAPIRole', assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com')) webhook_apigw_role.add_to_policy( iam.PolicyStatement(resources=['*'], actions=['sqs:SendMessage'])) webhook_apigw = apigw.RestApi( self, 'RepositoryStatusMonitorAPI', rest_api_name='RepositoryStatusMonitorAPI') webhook_apigw_resource = webhook_apigw.root.add_resource( dashboard_name_prefix) apigw_integration_response = apigw.IntegrationResponse( status_code='200', response_templates={'application/json': ""}) apigw_integration_options = apigw.IntegrationOptions( credentials_role=webhook_apigw_role, integration_responses=[apigw_integration_response], request_templates={ 'application/json': 'Action=SendMessage&MessageBody=$input.body' }, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ 'integration.request.header.Content-Type': "'application/x-www-form-urlencoded'" }) webhook_apigw_resource_sqs_integration = apigw.AwsIntegration( service='sqs', integration_http_method='POST', path='{}/{}'.format(core.Aws.ACCOUNT_ID, queue.queue_name), options=apigw_integration_options) webhook_apigw_resource.add_method( 'POST', webhook_apigw_resource_sqs_integration, method_responses=[apigw.MethodResponse(status_code='200')]) path = '/' + dashboard_name_prefix return webhook_apigw.url_for_path(path)
def __init__(self, scope: core.Construct, id: str, stream: ks.Stream, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.gateway = api.RestApi(self, 'EmailServices') resource = self.gateway.root.add_resource('send-mail') role = iam.Role( self, 'Apig-to-Kinesis', assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonKinesisFullAccess') ]) kinesisIntegration = api.AwsIntegration( service='kinesis', action='PutRecord', subdomain=stream.stream_name, options=api.IntegrationOptions(credentials_role=role)) self.post_method = resource.add_method('POST', kinesisIntegration)
def __init__(self, scope: core.Construct, id: str, props: KinesisFirehoseStackProps, **kwargs) -> None: super().__init__(scope, id, **kwargs) lambda_repository = aws_codecommit.Repository( self, "ClicksProcessingLambdaRepository", repository_name="MythicalMysfits-ClicksProcessingLambdaRepository", ) core.CfnOutput( self, "kinesisRepositoryCloneUrlHttp", value=lambda_repository.repository_clone_url_http, description="Clicks Processing Lambda Repository Clone URL HTTP", ) core.CfnOutput( self, "kinesisRepositoryCloneUrlSsh", value=lambda_repository.repository_clone_url_ssh, description="Clicks Processing Lambda Repository Clone URL SSH", ) clicks_destination_bucket = aws_s3.Bucket(self, "Bucket", versioned=True) lambda_function_policy = aws_iam.PolicyStatement() lambda_function_policy.add_actions("dynamodb:GetItem") lambda_function_policy.add_resources(props.table.table_arn) mysfits_clicks_processor = aws_lambda.Function( self, "Function", handler="streamProcessor.processRecord", runtime=aws_lambda.Runtime.PYTHON_3_7, description= "An Amazon Kinesis Firehose stream processor that enriches click records to not just include a mysfitId, but also other attributes that can be analyzed later.", memory_size=128, code=aws_lambda.Code.asset("../../lambda-streaming-processor"), timeout=core.Duration.seconds(30), initial_policy=[lambda_function_policy], environment={ # TODO: this seems better than having the user copy/paste it in, but is it the best way? "MYSFITS_API_URL": "https://{}.execute-api.{}.amazonaws.com/prod/".format( props.api_gateway.ref, core.Aws.REGION) }, ) firehose_delivery_role = aws_iam.Role( self, "FirehoseDeliveryRole", role_name="FirehoseDeliveryRole", assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"), external_id=core.Aws.ACCOUNT_ID, ) firehose_delivery_policy_s3_statement = aws_iam.PolicyStatement() firehose_delivery_policy_s3_statement.add_actions( "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject", ) firehose_delivery_policy_s3_statement.add_resources( clicks_destination_bucket.bucket_arn) firehose_delivery_policy_s3_statement.add_resources( clicks_destination_bucket.arn_for_objects("*")) firehose_delivery_policy_lambda_statement = aws_iam.PolicyStatement() firehose_delivery_policy_lambda_statement.add_actions( "lambda:InvokeFunction") firehose_delivery_policy_lambda_statement.add_resources( mysfits_clicks_processor.function_arn) firehose_delivery_role.add_to_policy( firehose_delivery_policy_s3_statement) firehose_delivery_role.add_to_policy( firehose_delivery_policy_lambda_statement) mysfits_firehose_to_s3 = aws_kinesisfirehose.CfnDeliveryStream( self, "DeliveryStream", extended_s3_destination_configuration=aws_kinesisfirehose. CfnDeliveryStream.ExtendedS3DestinationConfigurationProperty( bucket_arn=clicks_destination_bucket.bucket_arn, buffering_hints=aws_kinesisfirehose.CfnDeliveryStream. BufferingHintsProperty(interval_in_seconds=60, size_in_m_bs=50), compression_format="UNCOMPRESSED", prefix="firehose/", role_arn=firehose_delivery_role.role_arn, processing_configuration=aws_kinesisfirehose.CfnDeliveryStream. ProcessingConfigurationProperty( enabled=True, processors=[ aws_kinesisfirehose.CfnDeliveryStream. ProcessorProperty( parameters=[ aws_kinesisfirehose.CfnDeliveryStream. ProcessorParameterProperty( parameter_name="LambdaArn", parameter_value=mysfits_clicks_processor. function_arn, ) ], type="Lambda", ) ], ), ), ) aws_lambda.CfnPermission( self, "Permission", action="lambda:InvokeFunction", function_name=mysfits_clicks_processor.function_arn, principal="firehose.amazonaws.com", source_account=core.Aws.ACCOUNT_ID, source_arn=mysfits_firehose_to_s3.attr_arn, ) click_processing_api_role = aws_iam.Role( self, "ClickProcessingApiRole", assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"), ) api_policy = aws_iam.PolicyStatement() api_policy.add_actions("firehose:PutRecord") api_policy.add_resources(mysfits_firehose_to_s3.attr_arn) aws_iam.Policy( self, "ClickProcessingApiPolicy", policy_name="api_gateway_firehose_proxy_role", statements=[api_policy], roles=[click_processing_api_role], ) api = aws_apigateway.RestApi( self, "APIEndpoint", rest_api_name="ClickProcessing API Service", endpoint_types=[aws_apigateway.EndpointType.REGIONAL], ) clicks = api.root.add_resource("clicks") clicks.add_method( "PUT", aws_apigateway.AwsIntegration( service="firehose", integration_http_method="POST", action="PutRecord", options=aws_apigateway.IntegrationOptions( connection_type=aws_apigateway.ConnectionType.INTERNET, credentials_role=click_processing_api_role, integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_templates={ "application/json": '{"status": "OK"}' }, response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type'", "method.response.header.Access-Control-Allow-Methods": "'OPTIONS,PUT'", "method.response.header.Access-Control-Allow-Origin": "'*'", }, ) ], request_parameters={ "integration.request.header.Content-Type": "'application/x-amz-json-1.1'" }, request_templates={ "application/json": """{ "DeliveryStreamName": "%s", "Record": { "Data": "$util.base64Encode($input.json('$'))" }}""" % mysfits_firehose_to_s3.ref }, ), ), method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Origin": True, }, ) ], ) clicks.add_method( "OPTIONS", aws_apigateway.MockIntegration( integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'", "method.response.header.Access-Control-Allow-Origin": "'*'", "method.response.header.Access-Control-Allow-Credentials": "'false'", "method.response.header.Access-Control-Allow-Methods": "'OPTIONS,GET,PUT,POST,DELETE'", }, ) ], passthrough_behavior=aws_apigateway.PassthroughBehavior.NEVER, request_templates={"application/json": '{"statusCode": 200}'}, ), method_responses=[ aws_apigateway.MethodResponse( status_code="200", response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Credentials": True, "method.response.header.Access-Control-Allow-Origin": True, }, ) ], )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) env_name = self.node.try_get_context('env') #Create the SQS queue queue = sqs.Queue(self, id=f"{env_name}-SQSQueue", queue_name=f"{env_name}-queue") #Create the API GW service role with permissions to call SQS rest_api_role = iam.Role( self, id=f"{env_name}-RestAPISQSRole", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess") ]) #Create an API GW Rest API base_api = apigw.RestApi( self, id=f'{env_name}-ApiGW', rest_api_name=f'{env_name}SQSTestAPI', api_key_source_type=apigw.ApiKeySourceType.HEADER) usage_api_key_value = ''.join( random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(40)) usage_api_key = base_api.add_api_key(id=f'{env_name}-apikey', value=usage_api_key_value) usage_plan = base_api.add_usage_plan(id=f'{env_name}-usageplan', name=f'{env_name}-usageplan', api_key=usage_api_key, throttle=apigw.ThrottleSettings( rate_limit=10, burst_limit=2)) usage_plan.add_api_stage(stage=base_api.deployment_stage) #Create a resource named "example" on the base API api_resource = base_api.root.add_resource('sqstest') #Create API Integration Response object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html integration_response = apigw.IntegrationResponse( status_code="200", response_templates={"application/json": ""}, ) #Create API Integration Options object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html api_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[integration_response], request_templates={ "application/json": "Action=SendMessage&MessageBody=$input.body" }, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'" }, ) #Create AWS Integration Object for SQS: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/AwsIntegration.html api_resource_sqs_integration = apigw.AwsIntegration( service="sqs", integration_http_method="POST", # must be ACCOUNT_ID. Just the way URL to SQS is created path="{}/{}".format(core.Aws.ACCOUNT_ID, queue.queue_name), options=api_integration_options) #Create a Method Response Object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/MethodResponse.html method_response = apigw.MethodResponse(status_code="200") #Add the API GW Integration to the "example" API GW Resource api_resource.add_method("POST", api_resource_sqs_integration, method_responses=[method_response], api_key_required=True) #Creating Lambda function that will be triggered by the SQS Queue sqs_lambda = _lambda.Function( self, 'SQSTriggerLambda', handler='sqs_lambda.handler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('pr_sqs_lambda'), ) #Create an SQS event source for Lambda sqs_event_source = lambda_event_source.SqsEventSource(queue) #Add SQS event source to the Lambda function sqs_lambda.add_event_source(sqs_event_source) # https://67ixnggm81.execute-api.us-east-1.amazonaws.com/prod/sqstest region = core.Aws.REGION core.CfnOutput(self, 'api-gw-url', value='https://' + base_api.rest_api_id + '.execute-api.' + region + '.amazonaws.com/prod/sqstest', export_name='api-sqs-gw-url') print(f'API Key: {usage_api_key_value}') """
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #XXX: For createing Amazon MWAA in the existing VPC, # remove comments from the below codes and # comments out vpc = aws_ec2.Vpc(..) codes, # then pass -c vpc_name=your-existing-vpc to cdk command # for example, # cdk -c vpc_name=your-existing-vpc syth # # vpc_name = self.node.try_get_context('vpc_name') # vpc = aws_ec2.Vpc.from_lookup(self, 'ExistingVPC', # is_default=True, # vpc_name=vpc_name # ) vpc = aws_ec2.Vpc(self, "ApiGatewayDynamoDBVPC", max_azs=2, gateway_endpoints={ "S3": aws_ec2.GatewayVpcEndpointOptions( service=aws_ec2.GatewayVpcEndpointAwsService.S3 ), "DynamoDB": aws_ec2.GatewayVpcEndpointOptions( service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB ) } ) DDB_TABLE_SUFFIX = ''.join(random.sample((string.ascii_lowercase + string.digits), k=7)) DDB_TABLE_NAME = "Comments-{}".format(DDB_TABLE_SUFFIX) ddb_table = aws_dynamodb.Table(self, "DynamoDbTable", table_name=DDB_TABLE_NAME, removal_policy=cdk.RemovalPolicy.DESTROY, partition_key=aws_dynamodb.Attribute(name="commentId", type=aws_dynamodb.AttributeType.STRING), time_to_live_attribute="ttl", billing_mode=aws_dynamodb.BillingMode.PROVISIONED, read_capacity=15, write_capacity=5, ) ddb_table.add_global_secondary_index( read_capacity=15, write_capacity=5, index_name="pageId-index", partition_key=aws_dynamodb.Attribute(name='pageId', type=aws_dynamodb.AttributeType.STRING), projection_type=aws_dynamodb.ProjectionType.ALL ) user_pool = aws_cognito.UserPool(self, 'UserPool', user_pool_name='UserPoolForApiGateway', removal_policy=cdk.RemovalPolicy.DESTROY, self_sign_up_enabled=True, sign_in_aliases={'email': True}, auto_verify={'email': True}, password_policy={ 'min_length': 8, 'require_lowercase': False, 'require_digits': False, 'require_uppercase': False, 'require_symbols': False, }, account_recovery=aws_cognito.AccountRecovery.EMAIL_ONLY ) user_pool_client = aws_cognito.UserPoolClient(self, 'UserPoolClient', user_pool=user_pool, auth_flows={ 'admin_user_password': True, 'user_password': True, 'custom': True, 'user_srp': True }, supported_identity_providers=[aws_cognito.UserPoolClientIdentityProvider.COGNITO] ) auth = aws_apigateway.CognitoUserPoolsAuthorizer(self, 'AuthorizerForDynamoDBApi', cognito_user_pools=[user_pool] ) ddb_access_policy_doc = aws_iam.PolicyDocument() ddb_access_policy_doc.add_statements(aws_iam.PolicyStatement(**{ "effect": aws_iam.Effect.ALLOW, "resources": [ddb_table.table_arn], "actions": [ "dynamodb:DeleteItem", "dynamodb:PartiQLInsert", "dynamodb:UpdateTimeToLive", "dynamodb:BatchWriteItem", "dynamodb:PutItem", "dynamodb:PartiQLUpdate", "dynamodb:UpdateItem", "dynamodb:PartiQLDelete" ] })) apigw_dynamodb_role = aws_iam.Role(self, "ApiGatewayRoleForDynamoDB", role_name='APIGatewayRoleForDynamoDB', assumed_by=aws_iam.ServicePrincipal('apigateway.amazonaws.com'), inline_policies={ 'DynamoDBAccessPolicy': ddb_access_policy_doc }, managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonDynamoDBReadOnlyAccess'), ] ) dynamodb_api = aws_apigateway.RestApi(self, "DynamoDBProxyAPI", rest_api_name="comments-api", description="An Amazon API Gateway REST API that integrated with an Amazon DynamoDB.", endpoint_types=[aws_apigateway.EndpointType.REGIONAL], default_cors_preflight_options={ "allow_origins": aws_apigateway.Cors.ALL_ORIGINS }, deploy=True, deploy_options=aws_apigateway.StageOptions(stage_name="v1"), endpoint_export_name="DynamoDBProxyAPIEndpoint" ) all_resources = dynamodb_api.root.add_resource("comments") one_resource = all_resources.add_resource("{pageId}") apigw_error_responses = [ aws_apigateway.IntegrationResponse(status_code="400", selection_pattern="4\d{2}"), aws_apigateway.IntegrationResponse(status_code="500", selection_pattern="5\d{2}") ] apigw_ok_responses = [ aws_apigateway.IntegrationResponse( status_code="200" ) ] ddb_put_item_options = aws_apigateway.IntegrationOptions( credentials_role=apigw_dynamodb_role, integration_responses=[*apigw_ok_responses, *apigw_error_responses], request_templates={ 'application/json': json.dumps({ "TableName": DDB_TABLE_NAME, "Item": { "commentId": { "S": "$context.requestId" }, "pageId": { "S": "$input.path('$.pageId')" }, "userName": { "S": "$input.path('$.userName')" }, "message": { "S": "$input.path('$.message')" } } }, indent=2) }, passthrough_behavior=aws_apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES ) create_integration = aws_apigateway.AwsIntegration( service='dynamodb', action='PutItem', integration_http_method='POST', options=ddb_put_item_options ) method_responses = [ aws_apigateway.MethodResponse(status_code='200'), aws_apigateway.MethodResponse(status_code='400'), aws_apigateway.MethodResponse(status_code='500') ] all_resources.add_method('POST', create_integration, method_responses=method_responses, authorization_type=aws_apigateway.AuthorizationType.COGNITO, authorizer=auth ) get_response_templates = ''' #set($inputRoot = $input.path('$')) { "comments": [ #foreach($elem in $inputRoot.Items) { "commentId": "$elem.commentId.S", "userName": "******", "message": "$elem.message.S" }#if($foreach.hasNext),#end #end ] }''' ddb_query_item_options = aws_apigateway.IntegrationOptions( credentials_role=apigw_dynamodb_role, integration_responses=[ aws_apigateway.IntegrationResponse( status_code="200", response_templates={ 'application/json': get_response_templates } ), *apigw_error_responses ], request_templates={ 'application/json': json.dumps({ "TableName": DDB_TABLE_NAME, "IndexName": "pageId-index", "KeyConditionExpression": "pageId = :v1", "ExpressionAttributeValues": { ":v1": { "S": "$input.params('pageId')" } } }, indent=2) }, passthrough_behavior=aws_apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES ) get_integration = aws_apigateway.AwsIntegration( service='dynamodb', action='Query', integration_http_method='POST', options=ddb_query_item_options ) one_resource.add_method('GET', get_integration, method_responses=method_responses, authorization_type=aws_apigateway.AuthorizationType.COGNITO, authorizer=auth ) cdk.CfnOutput(self, 'DynamoDBTableName', value=ddb_table.table_name) cdk.CfnOutput(self, 'UserPoolId', value=user_pool.user_pool_id) cdk.CfnOutput(self, 'UserPoolClientId', value=user_pool_client.user_pool_client_id)
def __init__(self, scope: cdk.Construct, construct_id: str, user_pool=None, user_pool_domain=None, hello_lambda=None, web_bucket=None, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) api = api_gateway.RestApi(self, "demo-api") auth = api_gateway.CognitoUserPoolsAuthorizer(self, "helloAuthorizer", cognito_user_pools=[user_pool] ) hello_resource = api.root.add_resource('hello') hello_resource.add_method("GET", api_gateway.LambdaIntegration(hello_lambda), authorizer=auth, authorization_type=api_gateway.AuthorizationType.COGNITO) login_resource = api.root.add_resource('login') login_resource.add_method("GET", api_gateway.HttpIntegration(user_pool_domain.base_url())) web_resource = api.root.add_resource('web') file_resource = web_resource.add_resource('{file}') s3_access_role = iam.Role(self, "S3AccessRole", assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3ReadOnlyAccess') ] ) file_resource.add_method("GET", api_gateway.AwsIntegration( service="s3", integration_http_method='GET', path="{}/{{item}}".format(web_bucket.bucket_name), options={ 'credentials_role': s3_access_role, 'integration_responses': [ { 'statusCode': "200", 'response_parameters': { 'method.response.header.Content-Type': 'integration.response.header.Content-Type' } } ], 'request_parameters': { 'integration.request.path.item': 'method.request.path.file', 'integration.request.header.Content-Type': 'method.request.header.Content-Type' } }), request_parameters={ 'method.request.path.file': True, 'method.request.header.Content-Type': False }, method_responses=[ { 'statusCode': "200", 'response_parameters': { 'method.response.header.Content-Type': True } } ], ) self.api_gateway =api
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = aws_ec2.Vpc( self, "OctemberVPC", max_azs=2, # subnet_configuration=[{ # "cidrMask": 24, # "name": "Public", # "subnetType": aws_ec2.SubnetType.PUBLIC, # }, # { # "cidrMask": 24, # "name": "Private", # "subnetType": aws_ec2.SubnetType.PRIVATE # }, # { # "cidrMask": 28, # "name": "Isolated", # "subnetType": aws_ec2.SubnetType.ISOLATED, # "reserved": True # } # ], gateway_endpoints={ "S3": aws_ec2.GatewayVpcEndpointOptions( service=aws_ec2.GatewayVpcEndpointAwsService.S3) }) dynamo_db_endpoint = vpc.add_gateway_endpoint( "DynamoDbEndpoint", service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB) s3_bucket = s3.Bucket( self, "s3bucket", bucket_name="octember-bizcard-{region}-{account}".format( region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID)) api = apigw.RestApi( self, "BizcardImageUploader", rest_api_name="BizcardImageUploader", description="This service serves uploading bizcard images into s3.", endpoint_types=[apigw.EndpointType.REGIONAL], binary_media_types=["image/png", "image/jpg"], deploy=True, deploy_options=apigw.StageOptions(stage_name="v1")) rest_api_role = aws_iam.Role( self, "ApiGatewayRoleForS3", role_name="ApiGatewayRoleForS3FullAccess", assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess") ]) list_objects_responses = [ apigw.IntegrationResponse( status_code="200", #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html#aws_cdk.aws_apigateway.IntegrationResponse.response_parameters # The response parameters from the backend response that API Gateway sends to the method response. # Use the destination as the key and the source as the value: # - The destination must be an existing response parameter in the MethodResponse property. # - The source must be an existing method request parameter or a static value. response_parameters={ 'method.response.header.Timestamp': 'integration.response.header.Date', 'method.response.header.Content-Length': 'integration.response.header.Content-Length', 'method.response.header.Content-Type': 'integration.response.header.Content-Type' }), apigw.IntegrationResponse(status_code="400", selection_pattern="4\d{2}"), apigw.IntegrationResponse(status_code="500", selection_pattern="5\d{2}") ] list_objects_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=list_objects_responses) get_s3_integration = apigw.AwsIntegration( service="s3", integration_http_method="GET", path='/', options=list_objects_integration_options) api.root.add_method( "GET", get_s3_integration, authorization_type=apigw.AuthorizationType.IAM, api_key_required=False, method_responses=[ apigw.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Timestamp': False, 'method.response.header.Content-Length': False, 'method.response.header.Content-Type': False }, response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ], request_parameters={'method.request.header.Content-Type': False}) get_s3_folder_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=list_objects_responses, #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html#aws_cdk.aws_apigateway.IntegrationOptions.request_parameters # Specify request parameters as key-value pairs (string-to-string mappings), with a destination as the key and a source as the value. # The source must be an existing method request parameter or a static value. request_parameters={ "integration.request.path.bucket": "method.request.path.folder" }) get_s3_folder_integration = apigw.AwsIntegration( service="s3", integration_http_method="GET", path="{bucket}", options=get_s3_folder_integration_options) s3_folder = api.root.add_resource('{folder}') s3_folder.add_method( "GET", get_s3_folder_integration, authorization_type=apigw.AuthorizationType.IAM, api_key_required=False, method_responses=[ apigw.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Timestamp': False, 'method.response.header.Content-Length': False, 'method.response.header.Content-Type': False }, response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ], request_parameters={ 'method.request.header.Content-Type': False, 'method.request.path.folder': True }) get_s3_item_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=list_objects_responses, request_parameters={ "integration.request.path.bucket": "method.request.path.folder", "integration.request.path.object": "method.request.path.item" }) get_s3_item_integration = apigw.AwsIntegration( service="s3", integration_http_method="GET", path="{bucket}/{object}", options=get_s3_item_integration_options) s3_item = s3_folder.add_resource('{item}') s3_item.add_method( "GET", get_s3_item_integration, authorization_type=apigw.AuthorizationType.IAM, api_key_required=False, method_responses=[ apigw.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Timestamp': False, 'method.response.header.Content-Length': False, 'method.response.header.Content-Type': False }, response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ], request_parameters={ 'method.request.header.Content-Type': False, 'method.request.path.folder': True, 'method.request.path.item': True }) put_s3_item_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[ apigw.IntegrationResponse(status_code="200"), apigw.IntegrationResponse(status_code="400", selection_pattern="4\d{2}"), apigw.IntegrationResponse(status_code="500", selection_pattern="5\d{2}") ], request_parameters={ "integration.request.header.Content-Type": "method.request.header.Content-Type", "integration.request.path.bucket": "method.request.path.folder", "integration.request.path.object": "method.request.path.item" }) put_s3_item_integration = apigw.AwsIntegration( service="s3", integration_http_method="PUT", path="{bucket}/{object}", options=put_s3_item_integration_options) s3_item.add_method( "PUT", put_s3_item_integration, authorization_type=apigw.AuthorizationType.IAM, api_key_required=False, method_responses=[ apigw.MethodResponse( status_code="200", response_parameters={ 'method.response.header.Content-Type': False }, response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ], request_parameters={ 'method.request.header.Content-Type': False, 'method.request.path.folder': True, 'method.request.path.item': True }) ddb_table = dynamodb.Table( self, "BizcardImageMetaInfoDdbTable", table_name="OctemberBizcardImgMeta", partition_key=dynamodb.Attribute( name="image_id", type=dynamodb.AttributeType.STRING), billing_mode=dynamodb.BillingMode.PROVISIONED, read_capacity=15, write_capacity=5) img_kinesis_stream = kinesis.Stream( self, "BizcardImagePath", stream_name="octember-bizcard-image") # create lambda function trigger_textract_lambda_fn = _lambda.Function( self, "TriggerTextExtractorFromImage", runtime=_lambda.Runtime.PYTHON_3_7, function_name="TriggerTextExtractorFromImage", handler="trigger_text_extract_from_s3_image.lambda_handler", description="Trigger to extract text from an image in S3", code=_lambda.Code.asset( "./src/main/python/TriggerTextExtractFromS3Image"), environment={ 'REGION_NAME': core.Aws.REGION, 'DDB_TABLE_NAME': ddb_table.table_name, 'KINESIS_STREAM_NAME': img_kinesis_stream.stream_name }, timeout=core.Duration.minutes(5)) ddb_table_rw_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, resources=[ddb_table.table_arn], actions=[ "dynamodb:BatchGetItem", "dynamodb:Describe*", "dynamodb:List*", "dynamodb:GetItem", "dynamodb:Query", "dynamodb:Scan", "dynamodb:BatchWriteItem", "dynamodb:DeleteItem", "dynamodb:PutItem", "dynamodb:UpdateItem", "dax:Describe*", "dax:List*", "dax:GetItem", "dax:BatchGetItem", "dax:Query", "dax:Scan", "dax:BatchWriteItem", "dax:DeleteItem", "dax:PutItem", "dax:UpdateItem" ]) trigger_textract_lambda_fn.add_to_role_policy( ddb_table_rw_policy_statement) trigger_textract_lambda_fn.add_to_role_policy( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=[img_kinesis_stream.stream_arn], actions=[ "kinesis:Get*", "kinesis:List*", "kinesis:Describe*", "kinesis:PutRecord", "kinesis:PutRecords" ])) # assign notification for the s3 event type (ex: OBJECT_CREATED) s3_event_filter = s3.NotificationKeyFilter(prefix="bizcard-raw-img/", suffix=".jpg") s3_event_source = S3EventSource(s3_bucket, events=[s3.EventType.OBJECT_CREATED], filters=[s3_event_filter]) trigger_textract_lambda_fn.add_event_source(s3_event_source) #XXX: https://github.com/aws/aws-cdk/issues/2240 # To avoid to create extra Lambda Functions with names like LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8a # if log_retention=aws_logs.RetentionDays.THREE_DAYS is added to the constructor props log_group = aws_logs.LogGroup( self, "TriggerTextractLogGroup", log_group_name="/aws/lambda/TriggerTextExtractorFromImage", retention=aws_logs.RetentionDays.THREE_DAYS) log_group.grant_write(trigger_textract_lambda_fn) text_kinesis_stream = kinesis.Stream( self, "BizcardTextData", stream_name="octember-bizcard-txt") textract_lambda_fn = _lambda.Function( self, "GetTextFromImage", runtime=_lambda.Runtime.PYTHON_3_7, function_name="GetTextFromImage", handler="get_text_from_s3_image.lambda_handler", description="extract text from an image in S3", code=_lambda.Code.asset("./src/main/python/GetTextFromS3Image"), environment={ 'REGION_NAME': core.Aws.REGION, 'DDB_TABLE_NAME': ddb_table.table_name, 'KINESIS_STREAM_NAME': text_kinesis_stream.stream_name }, timeout=core.Duration.minutes(5)) textract_lambda_fn.add_to_role_policy(ddb_table_rw_policy_statement) textract_lambda_fn.add_to_role_policy( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=[text_kinesis_stream.stream_arn], actions=[ "kinesis:Get*", "kinesis:List*", "kinesis:Describe*", "kinesis:PutRecord", "kinesis:PutRecords" ])) textract_lambda_fn.add_to_role_policy( aws_iam.PolicyStatement( **{ "effect": aws_iam.Effect.ALLOW, "resources": [ s3_bucket.bucket_arn, "{}/*".format( s3_bucket.bucket_arn) ], "actions": [ "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject" ] })) textract_lambda_fn.add_to_role_policy( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=["*"], actions=["textract:*"])) img_kinesis_event_source = KinesisEventSource( img_kinesis_stream, batch_size=100, starting_position=_lambda.StartingPosition.LATEST) textract_lambda_fn.add_event_source(img_kinesis_event_source) log_group = aws_logs.LogGroup( self, "GetTextFromImageLogGroup", log_group_name="/aws/lambda/GetTextFromImage", retention=aws_logs.RetentionDays.THREE_DAYS) log_group.grant_write(textract_lambda_fn) sg_use_bizcard_es = aws_ec2.SecurityGroup( self, "BizcardSearchClientSG", vpc=vpc, allow_all_outbound=True, description= 'security group for octember bizcard elasticsearch client', security_group_name='use-octember-bizcard-es') core.Tags.of(sg_use_bizcard_es).add('Name', 'use-octember-bizcard-es') sg_bizcard_es = aws_ec2.SecurityGroup( self, "BizcardSearchSG", vpc=vpc, allow_all_outbound=True, description='security group for octember bizcard elasticsearch', security_group_name='octember-bizcard-es') core.Tags.of(sg_bizcard_es).add('Name', 'octember-bizcard-es') sg_bizcard_es.add_ingress_rule(peer=sg_bizcard_es, connection=aws_ec2.Port.all_tcp(), description='octember-bizcard-es') sg_bizcard_es.add_ingress_rule(peer=sg_use_bizcard_es, connection=aws_ec2.Port.all_tcp(), description='use-octember-bizcard-es') sg_ssh_access = aws_ec2.SecurityGroup( self, "BastionHostSG", vpc=vpc, allow_all_outbound=True, description='security group for bastion host', security_group_name='octember-bastion-host-sg') core.Tags.of(sg_ssh_access).add('Name', 'octember-bastion-host') sg_ssh_access.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(), connection=aws_ec2.Port.tcp(22), description='ssh access') bastion_host = aws_ec2.BastionHostLinux( self, "BastionHost", vpc=vpc, instance_type=aws_ec2.InstanceType('t3.nano'), security_group=sg_ssh_access, subnet_selection=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.PUBLIC)) bastion_host.instance.add_security_group(sg_use_bizcard_es) #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873 es_cfn_domain = aws_elasticsearch.CfnDomain( self, 'BizcardSearch', elasticsearch_cluster_config={ "dedicatedMasterCount": 3, "dedicatedMasterEnabled": True, "dedicatedMasterType": "t2.medium.elasticsearch", "instanceCount": 2, "instanceType": "t2.medium.elasticsearch", "zoneAwarenessEnabled": True }, ebs_options={ "ebsEnabled": True, "volumeSize": 10, "volumeType": "gp2" }, domain_name="octember-bizcard", elasticsearch_version="7.9", encryption_at_rest_options={"enabled": False}, access_policies={ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": ["es:Describe*", "es:List*", "es:Get*", "es:ESHttp*"], "Resource": self.format_arn(service="es", resource="domain", resource_name="octember-bizcard/*") }] }, snapshot_options={"automatedSnapshotStartHour": 17}, vpc_options={ "securityGroupIds": [sg_bizcard_es.security_group_id], "subnetIds": vpc.select_subnets( subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids }) core.Tags.of(es_cfn_domain).add('Name', 'octember-bizcard-es') s3_lib_bucket_name = self.node.try_get_context("lib_bucket_name") #XXX: https://github.com/aws/aws-cdk/issues/1342 s3_lib_bucket = s3.Bucket.from_bucket_name(self, id, s3_lib_bucket_name) es_lib_layer = _lambda.LayerVersion( self, "ESLib", layer_version_name="es-lib", compatible_runtimes=[_lambda.Runtime.PYTHON_3_7], code=_lambda.Code.from_bucket(s3_lib_bucket, "var/octember-es-lib.zip")) redis_lib_layer = _lambda.LayerVersion( self, "RedisLib", layer_version_name="redis-lib", compatible_runtimes=[_lambda.Runtime.PYTHON_3_7], code=_lambda.Code.from_bucket(s3_lib_bucket, "var/octember-redis-lib.zip")) #XXX: Deploy lambda in VPC - https://github.com/aws/aws-cdk/issues/1342 upsert_to_es_lambda_fn = _lambda.Function( self, "UpsertBizcardToES", runtime=_lambda.Runtime.PYTHON_3_7, function_name="UpsertBizcardToElasticSearch", handler="upsert_bizcard_to_es.lambda_handler", description="Upsert bizcard text into elasticsearch", code=_lambda.Code.asset("./src/main/python/UpsertBizcardToES"), environment={ 'ES_HOST': es_cfn_domain.attr_domain_endpoint, 'ES_INDEX': 'octember_bizcard', 'ES_TYPE': 'bizcard' }, timeout=core.Duration.minutes(5), layers=[es_lib_layer], security_groups=[sg_use_bizcard_es], vpc=vpc) text_kinesis_event_source = KinesisEventSource( text_kinesis_stream, batch_size=99, starting_position=_lambda.StartingPosition.LATEST) upsert_to_es_lambda_fn.add_event_source(text_kinesis_event_source) log_group = aws_logs.LogGroup( self, "UpsertBizcardToESLogGroup", log_group_name="/aws/lambda/UpsertBizcardToElasticSearch", retention=aws_logs.RetentionDays.THREE_DAYS) log_group.grant_write(upsert_to_es_lambda_fn) firehose_role_policy_doc = aws_iam.PolicyDocument() firehose_role_policy_doc.add_statements( aws_iam.PolicyStatement( **{ "effect": aws_iam.Effect.ALLOW, "resources": [ s3_bucket.bucket_arn, "{}/*".format( s3_bucket.bucket_arn) ], "actions": [ "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject" ] })) firehose_role_policy_doc.add_statements( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=["*"], actions=[ "glue:GetTable", "glue:GetTableVersion", "glue:GetTableVersions" ])) firehose_role_policy_doc.add_statements( aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, resources=[text_kinesis_stream.stream_arn], actions=[ "kinesis:DescribeStream", "kinesis:GetShardIterator", "kinesis:GetRecords" ])) firehose_log_group_name = "/aws/kinesisfirehose/octember-bizcard-txt-to-s3" firehose_role_policy_doc.add_statements( aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, #XXX: The ARN will be formatted as follows: # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name} resources=[ self.format_arn(service="logs", resource="log-group", resource_name="{}:log-stream:*".format( firehose_log_group_name), sep=":") ], actions=["logs:PutLogEvents"])) firehose_role = aws_iam.Role( self, "FirehoseDeliveryRole", role_name="FirehoseDeliveryRole", assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"), #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221 inline_policies={"firehose_role_policy": firehose_role_policy_doc}) bizcard_text_to_s3_delivery_stream = aws_kinesisfirehose.CfnDeliveryStream( self, "BizcardTextToS3", delivery_stream_name="octember-bizcard-txt-to-s3", delivery_stream_type="KinesisStreamAsSource", kinesis_stream_source_configuration={ "kinesisStreamArn": text_kinesis_stream.stream_arn, "roleArn": firehose_role.role_arn }, extended_s3_destination_configuration={ "bucketArn": s3_bucket.bucket_arn, "bufferingHints": { "intervalInSeconds": 60, "sizeInMBs": 1 }, "cloudWatchLoggingOptions": { "enabled": True, "logGroupName": firehose_log_group_name, "logStreamName": "S3Delivery" }, "compressionFormat": "GZIP", "prefix": "bizcard-text/", "roleArn": firehose_role.role_arn }) sg_use_bizcard_es_cache = aws_ec2.SecurityGroup( self, "BizcardSearchCacheClientSG", vpc=vpc, allow_all_outbound=True, description= 'security group for octember bizcard search query cache client', security_group_name='use-octember-bizcard-es-cache') core.Tags.of(sg_use_bizcard_es_cache).add( 'Name', 'use-octember-bizcard-es-cache') sg_bizcard_es_cache = aws_ec2.SecurityGroup( self, "BizcardSearchCacheSG", vpc=vpc, allow_all_outbound=True, description= 'security group for octember bizcard search query cache', security_group_name='octember-bizcard-es-cache') core.Tags.of(sg_bizcard_es_cache).add('Name', 'octember-bizcard-es-cache') sg_bizcard_es_cache.add_ingress_rule( peer=sg_use_bizcard_es_cache, connection=aws_ec2.Port.tcp(6379), description='use-octember-bizcard-es-cache') es_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup( self, "QueryCacheSubnetGroup", description="subnet group for octember-bizcard-es-cache", subnet_ids=vpc.select_subnets( subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids, cache_subnet_group_name='octember-bizcard-es-cache') es_query_cache = aws_elasticache.CfnCacheCluster( self, "BizcardSearchQueryCache", cache_node_type="cache.t3.small", num_cache_nodes=1, engine="redis", engine_version="5.0.5", auto_minor_version_upgrade=False, cluster_name="octember-bizcard-es-cache", snapshot_retention_limit=3, snapshot_window="17:00-19:00", preferred_maintenance_window="mon:19:00-mon:20:30", #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098 #cache_subnet_group_name=es_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC cache_subnet_group_name='octember-bizcard-es-cache', vpc_security_group_ids=[sg_bizcard_es_cache.security_group_id]) #XXX: If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-cache-cluster.html#cfn-elasticache-cachecluster-cachesubnetgroupname es_query_cache.add_depends_on(es_query_cache_subnet_group) #XXX: add more than 2 security groups # https://github.com/aws/aws-cdk/blob/ea10f0d141a48819ec0000cd7905feda993870a9/packages/%40aws-cdk/aws-lambda/lib/function.ts#L387 # https://github.com/aws/aws-cdk/issues/1555 # https://github.com/aws/aws-cdk/pull/5049 bizcard_search_lambda_fn = _lambda.Function( self, "BizcardSearchServer", runtime=_lambda.Runtime.PYTHON_3_7, function_name="BizcardSearchProxy", handler="es_search_bizcard.lambda_handler", description="Proxy server to search bizcard text", code=_lambda.Code.asset("./src/main/python/SearchBizcard"), environment={ 'ES_HOST': es_cfn_domain.attr_domain_endpoint, 'ES_INDEX': 'octember_bizcard', 'ES_TYPE': 'bizcard', 'ELASTICACHE_HOST': es_query_cache.attr_redis_endpoint_address }, timeout=core.Duration.minutes(1), layers=[es_lib_layer, redis_lib_layer], security_groups=[sg_use_bizcard_es, sg_use_bizcard_es_cache], vpc=vpc) #XXX: create API Gateway + LambdaProxy search_api = apigw.LambdaRestApi( self, "BizcardSearchAPI", handler=bizcard_search_lambda_fn, proxy=False, rest_api_name="BizcardSearch", description="This service serves searching bizcard text.", endpoint_types=[apigw.EndpointType.REGIONAL], deploy=True, deploy_options=apigw.StageOptions(stage_name="v1")) bizcard_search = search_api.root.add_resource('search') bizcard_search.add_method( "GET", method_responses=[ apigw.MethodResponse( status_code="200", response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ]) sg_use_bizcard_graph_db = aws_ec2.SecurityGroup( self, "BizcardGraphDbClientSG", vpc=vpc, allow_all_outbound=True, description='security group for octember bizcard graph db client', security_group_name='use-octember-bizcard-neptune') core.Tags.of(sg_use_bizcard_graph_db).add( 'Name', 'use-octember-bizcard-neptune') sg_bizcard_graph_db = aws_ec2.SecurityGroup( self, "BizcardGraphDbSG", vpc=vpc, allow_all_outbound=True, description='security group for octember bizcard graph db', security_group_name='octember-bizcard-neptune') core.Tags.of(sg_bizcard_graph_db).add('Name', 'octember-bizcard-neptune') sg_bizcard_graph_db.add_ingress_rule( peer=sg_bizcard_graph_db, connection=aws_ec2.Port.tcp(8182), description='octember-bizcard-neptune') sg_bizcard_graph_db.add_ingress_rule( peer=sg_use_bizcard_graph_db, connection=aws_ec2.Port.tcp(8182), description='use-octember-bizcard-neptune') bizcard_graph_db_subnet_group = aws_neptune.CfnDBSubnetGroup( self, "NeptuneSubnetGroup", db_subnet_group_description= "subnet group for octember-bizcard-neptune", subnet_ids=vpc.select_subnets( subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids, db_subnet_group_name='octember-bizcard-neptune') bizcard_graph_db = aws_neptune.CfnDBCluster( self, "BizcardGraphDB", availability_zones=vpc.availability_zones, db_subnet_group_name=bizcard_graph_db_subnet_group. db_subnet_group_name, db_cluster_identifier="octember-bizcard", backup_retention_period=1, preferred_backup_window="08:45-09:15", preferred_maintenance_window="sun:18:00-sun:18:30", vpc_security_group_ids=[sg_bizcard_graph_db.security_group_id]) bizcard_graph_db.add_depends_on(bizcard_graph_db_subnet_group) bizcard_graph_db_instance = aws_neptune.CfnDBInstance( self, "BizcardGraphDBInstance", db_instance_class="db.r5.large", allow_major_version_upgrade=False, auto_minor_version_upgrade=False, availability_zone=vpc.availability_zones[0], db_cluster_identifier=bizcard_graph_db.db_cluster_identifier, db_instance_identifier="octember-bizcard", preferred_maintenance_window="sun:18:00-sun:18:30") bizcard_graph_db_instance.add_depends_on(bizcard_graph_db) bizcard_graph_db_replica_instance = aws_neptune.CfnDBInstance( self, "BizcardGraphDBReplicaInstance", db_instance_class="db.r5.large", allow_major_version_upgrade=False, auto_minor_version_upgrade=False, availability_zone=vpc.availability_zones[-1], db_cluster_identifier=bizcard_graph_db.db_cluster_identifier, db_instance_identifier="octember-bizcard-replica", preferred_maintenance_window="sun:18:00-sun:18:30") bizcard_graph_db_replica_instance.add_depends_on(bizcard_graph_db) bizcard_graph_db_replica_instance.add_depends_on( bizcard_graph_db_instance) gremlinpython_lib_layer = _lambda.LayerVersion( self, "GremlinPythonLib", layer_version_name="gremlinpython-lib", compatible_runtimes=[_lambda.Runtime.PYTHON_3_7], code=_lambda.Code.from_bucket( s3_lib_bucket, "var/octember-gremlinpython-lib.zip")) #XXX: https://github.com/aws/aws-cdk/issues/1342 upsert_to_neptune_lambda_fn = _lambda.Function( self, "UpsertBizcardToGraphDB", runtime=_lambda.Runtime.PYTHON_3_7, function_name="UpsertBizcardToNeptune", handler="upsert_bizcard_to_graph_db.lambda_handler", description="Upsert bizcard into neptune", code=_lambda.Code.asset( "./src/main/python/UpsertBizcardToGraphDB"), environment={ 'REGION_NAME': core.Aws.REGION, 'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_endpoint, 'NEPTUNE_PORT': bizcard_graph_db.attr_port }, timeout=core.Duration.minutes(5), layers=[gremlinpython_lib_layer], security_groups=[sg_use_bizcard_graph_db], vpc=vpc) upsert_to_neptune_lambda_fn.add_event_source(text_kinesis_event_source) log_group = aws_logs.LogGroup( self, "UpsertBizcardToGraphDBLogGroup", log_group_name="/aws/lambda/UpsertBizcardToNeptune", retention=aws_logs.RetentionDays.THREE_DAYS) log_group.grant_write(upsert_to_neptune_lambda_fn) sg_use_bizcard_neptune_cache = aws_ec2.SecurityGroup( self, "BizcardNeptuneCacheClientSG", vpc=vpc, allow_all_outbound=True, description= 'security group for octember bizcard recommendation query cache client', security_group_name='use-octember-bizcard-neptune-cache') core.Tags.of(sg_use_bizcard_neptune_cache).add( 'Name', 'use-octember-bizcard-es-cache') sg_bizcard_neptune_cache = aws_ec2.SecurityGroup( self, "BizcardNeptuneCacheSG", vpc=vpc, allow_all_outbound=True, description= 'security group for octember bizcard recommendation query cache', security_group_name='octember-bizcard-neptune-cache') core.Tags.of(sg_bizcard_neptune_cache).add( 'Name', 'octember-bizcard-neptune-cache') sg_bizcard_neptune_cache.add_ingress_rule( peer=sg_use_bizcard_neptune_cache, connection=aws_ec2.Port.tcp(6379), description='use-octember-bizcard-neptune-cache') recomm_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup( self, "RecommQueryCacheSubnetGroup", description="subnet group for octember-bizcard-neptune-cache", subnet_ids=vpc.select_subnets( subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids, cache_subnet_group_name='octember-bizcard-neptune-cache') recomm_query_cache = aws_elasticache.CfnCacheCluster( self, "BizcardRecommQueryCache", cache_node_type="cache.t3.small", num_cache_nodes=1, engine="redis", engine_version="5.0.5", auto_minor_version_upgrade=False, cluster_name="octember-bizcard-neptune-cache", snapshot_retention_limit=3, snapshot_window="17:00-19:00", preferred_maintenance_window="mon:19:00-mon:20:30", #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098 #cache_subnet_group_name=recomm_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC cache_subnet_group_name='octember-bizcard-neptune-cache', vpc_security_group_ids=[ sg_bizcard_neptune_cache.security_group_id ]) recomm_query_cache.add_depends_on(recomm_query_cache_subnet_group) bizcard_recomm_lambda_fn = _lambda.Function( self, "BizcardRecommender", runtime=_lambda.Runtime.PYTHON_3_7, function_name="BizcardRecommender", handler="neptune_recommend_bizcard.lambda_handler", description="This service serves PYMK(People You May Know).", code=_lambda.Code.asset("./src/main/python/RecommendBizcard"), environment={ 'REGION_NAME': core.Aws.REGION, 'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_read_endpoint, 'NEPTUNE_PORT': bizcard_graph_db.attr_port, 'ELASTICACHE_HOST': recomm_query_cache.attr_redis_endpoint_address }, timeout=core.Duration.minutes(1), layers=[gremlinpython_lib_layer, redis_lib_layer], security_groups=[ sg_use_bizcard_graph_db, sg_use_bizcard_neptune_cache ], vpc=vpc) #XXX: create API Gateway + LambdaProxy recomm_api = apigw.LambdaRestApi( self, "BizcardRecommendAPI", handler=bizcard_recomm_lambda_fn, proxy=False, rest_api_name="BizcardRecommend", description="This service serves PYMK(People You May Know).", endpoint_types=[apigw.EndpointType.REGIONAL], deploy=True, deploy_options=apigw.StageOptions(stage_name="v1")) bizcard_recomm = recomm_api.root.add_resource('pymk') bizcard_recomm.add_method( "GET", method_responses=[ apigw.MethodResponse( status_code="200", response_models={'application/json': apigw.EmptyModel()}), apigw.MethodResponse(status_code="400"), apigw.MethodResponse(status_code="500") ]) sagemaker_notebook_role_policy_doc = aws_iam.PolicyDocument() sagemaker_notebook_role_policy_doc.add_statements( aws_iam.PolicyStatement( **{ "effect": aws_iam.Effect.ALLOW, "resources": [ "arn:aws:s3:::aws-neptune-notebook", "arn:aws:s3:::aws-neptune-notebook/*" ], "actions": ["s3:GetObject", "s3:ListBucket"] })) sagemaker_notebook_role_policy_doc.add_statements( aws_iam.PolicyStatement( **{ "effect": aws_iam.Effect.ALLOW, "resources": [ "arn:aws:neptune-db:{region}:{account}:{cluster_id}/*". format(region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID, cluster_id=bizcard_graph_db. attr_cluster_resource_id) ], "actions": ["neptune-db:connect"] })) sagemaker_notebook_role = aws_iam.Role( self, 'SageMakerNotebookForNeptuneWorkbenchRole', role_name='AWSNeptuneNotebookRole-OctemberBizcard', assumed_by=aws_iam.ServicePrincipal('sagemaker.amazonaws.com'), #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221 inline_policies={ 'AWSNeptuneNotebook': sagemaker_notebook_role_policy_doc }) neptune_wb_lifecycle_content = '''#!/bin/bash sudo -u ec2-user -i <<'EOF' echo "export GRAPH_NOTEBOOK_AUTH_MODE=DEFAULT" >> ~/.bashrc echo "export GRAPH_NOTEBOOK_HOST={NeptuneClusterEndpoint}" >> ~/.bashrc echo "export GRAPH_NOTEBOOK_PORT={NeptuneClusterPort}" >> ~/.bashrc echo "export NEPTUNE_LOAD_FROM_S3_ROLE_ARN=''" >> ~/.bashrc echo "export AWS_REGION={AWS_Region}" >> ~/.bashrc aws s3 cp s3://aws-neptune-notebook/graph_notebook.tar.gz /tmp/graph_notebook.tar.gz rm -rf /tmp/graph_notebook tar -zxvf /tmp/graph_notebook.tar.gz -C /tmp /tmp/graph_notebook/install.sh EOF '''.format(NeptuneClusterEndpoint=bizcard_graph_db.attr_endpoint, NeptuneClusterPort=bizcard_graph_db.attr_port, AWS_Region=core.Aws.REGION) neptune_wb_lifecycle_config_prop = aws_sagemaker.CfnNotebookInstanceLifecycleConfig.NotebookInstanceLifecycleHookProperty( content=core.Fn.base64(neptune_wb_lifecycle_content)) neptune_wb_lifecycle_config = aws_sagemaker.CfnNotebookInstanceLifecycleConfig( self, 'NpetuneWorkbenchLifeCycleConfig', notebook_instance_lifecycle_config_name= 'AWSNeptuneWorkbenchOctemberBizcardLCConfig', on_start=[neptune_wb_lifecycle_config_prop]) neptune_workbench = aws_sagemaker.CfnNotebookInstance( self, 'NeptuneWorkbench', instance_type='ml.t2.medium', role_arn=sagemaker_notebook_role.role_arn, lifecycle_config_name=neptune_wb_lifecycle_config. notebook_instance_lifecycle_config_name, notebook_instance_name='OctemberBizcard-NeptuneWorkbench', root_access='Disabled', security_group_ids=[sg_use_bizcard_graph_db.security_group_name], subnet_id=bizcard_graph_db_subnet_group.subnet_ids[0])
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here repository = codecommit.Repository( self, "slackops-repository", repository_name="slackops-pipeline-repo", description="Repo for the SlackOps Pipeline Demo", ) website_bucket = s3.Bucket(self, "website-bucket", removal_policy=core.RemovalPolicy.DESTROY, auto_delete_objects=True, public_read_access=True, website_index_document="index.html") manual_approval_topic = sns.Topic( self, "manual-approval-notification", ) artifact_bucket = s3.Bucket(self, "artifact-bucket", removal_policy=core.RemovalPolicy.DESTROY) source_artifact = codepipeline.Artifact(artifact_name="Source") deployment_artifact = codepipeline.Artifact(artifact_name="Deployment") pipeline = codepipeline.Pipeline( self, "slackops-pipeline", artifact_bucket=artifact_bucket, stages=[ codepipeline.StageOptions( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( repository=repository, branch="master", output=source_artifact, action_name="Source") ]), codepipeline.StageOptions( stage_name="Build", actions=[ codepipeline_actions.CodeBuildAction( input=source_artifact, action_name="Build", project=codebuild.PipelineProject( self, "build-project", build_spec=codebuild.BuildSpec. from_source_filename("buildspec.yml"), environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage. STANDARD_5_0), ), outputs=[deployment_artifact]) ]), codepipeline.StageOptions( stage_name=MANUAL_APPROVAL_STAGE_NAME, actions=[ codepipeline_actions.ManualApprovalAction( action_name=MANUAL_APPROVAL_ACTION_NAME, additional_information= "Please Approve the Deployment", notification_topic=manual_approval_topic, ) ]), codepipeline.StageOptions( stage_name="Deploy", actions=[ codepipeline_actions.S3DeployAction( bucket=website_bucket, input=deployment_artifact, access_control=s3.BucketAccessControl.PUBLIC_READ, action_name="deploy-to-s3") ]) ]) # Build the API Gateway to record the approval or rejection rest_api = apigateway.RestApi(self, "slackops-apigw", deploy_options=apigateway.StageOptions( stage_name="prod", )) root_resource = rest_api.root.add_resource("v1") approval_resource = root_resource.add_resource("approval") api_gateway_role = iam.Role(self, "slackops-apigw-role", assumed_by=iam.ServicePrincipal( service="apigateway.amazonaws.com", )) api_gateway_role.add_to_policy( iam.PolicyStatement(actions=["codepipeline:PutApprovalResult"], resources=[pipeline.pipeline_arn + "/*"])) # Double curlies to make str.format work mapping_template = """ #set($token = $input.params("token")) #set($response = $input.params("response")) {{ "actionName": "{action_name}", "pipelineName": "{pipeline_name}", "result": {{ "status": "$response", "summary": "" }}, "stageName": "{stage_name}", "token": "$token" }} """.format( action_name="approve-before-publication", pipeline_name=pipeline.pipeline_name, stage_name="Approval", ) approval_integration = apigateway.AwsIntegration( service="codepipeline", action="PutApprovalResult", integration_http_method="POST", options=apigateway.IntegrationOptions( credentials_role=api_gateway_role, request_parameters={ "integration.request.header.x-amz-target": "'CodePipeline_20150709.PutApprovalResult'", "integration.request.header.Content-Type": "'application/x-amz-json-1.1'", }, passthrough_behavior=apigateway.PassthroughBehavior.NEVER, request_templates={"application/json": mapping_template}, integration_responses=[ apigateway.IntegrationResponse( status_code='400', selection_pattern="4\d{2}", response_parameters={ 'method.response.header.error': 'integration.response.body' }), apigateway.IntegrationResponse( status_code='500', selection_pattern="5\d{2}", response_parameters={ 'method.response.header.error': 'integration.response.body' }), apigateway.IntegrationResponse( status_code='200', selection_pattern="2\d{2}", response_parameters={ 'method.response.header.response': 'integration.response.body' }), ])) approval_method = approval_resource.add_method( http_method="GET", request_validator=apigateway.RequestValidator( self, "request-validator", rest_api=rest_api, request_validator_name="ParamValidator", validate_request_parameters=True), request_parameters={ "method.request.querystring.token": True, "method.request.querystring.response": True, # Approved / Rejected }, method_responses=[ apigateway.MethodResponse( status_code='400', response_parameters={'method.response.header.error': True}), apigateway.MethodResponse( status_code='500', response_parameters={'method.response.header.error': True}), apigateway.MethodResponse( status_code='200', response_parameters={ 'method.response.header.response': True }), ], integration=approval_integration, ) # Notification mechanism ssm_parameter_webhook = ssm.StringParameter( self, "slackops-webhook-parameter", string_value="<replace-me>", parameter_name="/slackops/webhook-url") notification_lambda = _lambda.PythonFunction( self, "slackops-notification", entry=os.path.join(os.path.dirname(__file__), "..", "src"), index="index.py", handler="notification_handler", environment={ "WEBHOOK_URL_PARAMETER": ssm_parameter_webhook.parameter_name, "API_ENDPOINT": rest_api.url_for_path("/v1/approval"), }) notification_lambda.add_event_source( lambda_event_sources.SnsEventSource(topic=manual_approval_topic)) ssm_parameter_webhook.grant_read(notification_lambda) # Outputs core.CfnOutput(self, "repositoryHttps", value=repository.repository_clone_url_http) core.CfnOutput(self, "repositorySSH", value=repository.repository_clone_url_ssh) core.CfnOutput(self, "websiteUrl", value=website_bucket.bucket_website_url)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) organization_id = self.node.try_get_context("organizationId") # create dynamo table allocation_table = aws_dynamodb.Table( self, "CidrBlockTable", partition_key=aws_dynamodb.Attribute( name="vpcCidrBlock", type=aws_dynamodb.AttributeType.STRING)) # create producer lambda function create_lambda = aws_lambda.Function( self, "create_lambda_function", runtime=aws_lambda.Runtime.PYTHON_3_6, handler="create.lambda_handler", code=aws_lambda.Code.asset("./src/")) create_lambda.add_environment("TABLE_NAME", allocation_table.table_name) create_lambda.add_environment("MASTER_CIDR_BLOCK", "10.0.0.0/12") create_lambda.add_environment("VPC_NETMASK", "24") create_lambda.add_environment("SUBNET_NETMASK", "26") # grant permission to lambda to write to demo table allocation_table.grant_write_data(create_lambda) allocation_table.grant_read_data(create_lambda) # API gateway ... Allow own Organizations api_policy = aws_iam.PolicyDocument() api_policy.add_statements( aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, principals=[aws_iam.AnyPrincipal()], actions=["execute-api:Invoke"], resources=[core.Fn.join('', ['execute-api:/', '*'])])) api_policy.add_statements( aws_iam.PolicyStatement( effect=aws_iam.Effect.DENY, actions=["execute-api:Invoke"], conditions={ "StringNotEquals": { "aws:PrincipalOrgID": [organization_id] } }, principals=[aws_iam.AnyPrincipal()], resources=[core.Fn.join('', ['execute-api:/', '*'])])) base_api = aws_apigateway.RestApi(self, 'ApiGateway', rest_api_name='cidr_vending_machine', policy=api_policy) vpc_api = base_api.root.add_resource('vpc') rest_api_role = aws_iam.Role( self, 'RestAPIRole', assumed_by=aws_iam.ServicePrincipal('apigateway.amazonaws.com'), ) allocation_table.grant_read_write_data(rest_api_role) patch_request_string = """ {{ "TableName": "{}", "Key": {{ "vpcCidrBlock": {{ "S": "$input.params('cidr_block')" }} }}, "UpdateExpression": "set vpcId = :v", "ConditionExpression": "accountId = :v2", "ExpressionAttributeValues" : {{ ":v": {{"S": "$input.params('vpc_id')"}}, ":v2": {{"S": "$context.identity.accountId"}} }}, "ReturnValues": "ALL_NEW" }}""" delete_request_string = """ {{ "TableName": "{}", "Key": {{ "vpcCidrBlock": {{ "S": "$input.params('cidr_block')" }} }}, "ConditionExpression": "accountId = :v2", "ExpressionAttributeValues" : {{ ":v2": {{"S": "$context.identity.accountId"}} }} }}""" network_integration = aws_apigateway.LambdaIntegration(create_lambda) update_integration = aws_apigateway.AwsIntegration( service='dynamodb', action='UpdateItem', integration_http_method='POST', options=aws_apigateway.IntegrationOptions( request_templates={ "application/json": patch_request_string.format(allocation_table.table_name) }, integration_responses=[ aws_apigateway.IntegrationResponse(status_code="200") ], credentials_role=rest_api_role)) delete_integration = aws_apigateway.AwsIntegration( service='dynamodb', action='DeleteItem', integration_http_method='POST', options=aws_apigateway.IntegrationOptions( request_templates={ "application/json": delete_request_string.format(allocation_table.table_name) }, integration_responses=[ aws_apigateway.IntegrationResponse(status_code="200") ], credentials_role=rest_api_role)) vpc_api.add_method( 'POST', network_integration, authorization_type=aws_apigateway.AuthorizationType.IAM) vpc_api.add_method( 'DELETE', delete_integration, authorization_type=aws_apigateway.AuthorizationType.IAM, method_responses=[ aws_apigateway.MethodResponse(status_code="200") ]) vpc_api.add_method( 'PATCH', update_integration, authorization_type=aws_apigateway.AuthorizationType.IAM, method_responses=[ aws_apigateway.MethodResponse(status_code="200") ])
def _add_get_integration(self, rest_api: apigw.Resource, asset_bucket, s3_integration_role) -> apigw.Resource: """ Add integration for /ui/ """ s3_integration = apigw.AwsIntegration( service='s3', path=f'{asset_bucket.bucket_name}/index.html', integration_http_method='GET', options=apigw.IntegrationOptions( credentials_role=s3_integration_role, integration_responses=[ apigw.IntegrationResponse( status_code='200', response_parameters={ 'method.response.header.Content-Type': 'integration.response.header.Content-Type', 'method.response.header.Content-Length': 'integration.response.header.Content-Length', 'method.response.header.Timestamp': 'integration.response.header.Date' }), apigw.IntegrationResponse( status_code='400', selection_pattern=r'4\d{2}', response_parameters={ 'method.response.header.Content-Type': 'integration.response.header.Content-Type', 'method.response.header.Content-Length': 'integration.response.header.Content-Length' }), apigw.IntegrationResponse( status_code='500', selection_pattern=r'5\d{2}', response_parameters={ 'method.response.header.Content-Type': 'integration.response.header.Content-Type', 'method.response.header.Content-Length': 'integration.response.header.Content-Length' }) ])) ui = rest_api.add_resource('ui') ui.add_method('GET', integration=s3_integration, method_responses=[ apigw.MethodResponse( status_code='200', response_parameters={ 'method.response.header.Content-Type': True, 'method.response.header.Content-Length': True, 'method.response.header.Timestamp': True }), apigw.MethodResponse( status_code='400', response_parameters={ 'method.response.header.Content-Type': True, 'method.response.header.Content-Length': True }), apigw.MethodResponse( status_code='500', response_parameters={ 'method.response.header.Content-Type': True, 'method.response.header.Content-Length': True }) ]) return ui
def _add_item_integration(self, ui_resource: apigw.Resource, asset_bucket, s3_integration_role) -> apigw.Resource: """ Add integration for /ui/{object} """ s3_integration = apigw.AwsIntegration( service='s3', path=f'{asset_bucket.bucket_name}/{{object}}', integration_http_method='GET', options=apigw.IntegrationOptions( credentials_role=s3_integration_role, integration_responses=[ apigw.IntegrationResponse( status_code='200', response_parameters={ 'method.response.header.Content-Type': 'integration.response.header.Content-Type', 'method.response.header.Content-Length': 'integration.response.header.Content-Length', 'method.response.header.Timestamp': 'integration.response.header.Date' }), apigw.IntegrationResponse( status_code='400', selection_pattern=r'4\d{2}', response_parameters={ 'method.response.header.Content-Type': 'integration.response.header.Content-Type', 'method.response.header.Content-Length': 'integration.response.header.Content-Length' }), apigw.IntegrationResponse( status_code='500', selection_pattern=r'5\d{2}', response_parameters={ 'method.response.header.Content-Type': 'integration.response.header.Content-Type', 'method.response.header.Content-Length': 'integration.response.header.Content-Length' }) ], request_parameters={ 'integration.request.path.object': 'method.request.path.item' })) ui_item = ui_resource.add_resource('{item}') ui_item.add_method( 'GET', integration=s3_integration, method_responses=[ apigw.MethodResponse( status_code='200', response_parameters={ 'method.response.header.Content-Type': True, 'method.response.header.Content-Length': True, 'method.response.header.Timestamp': True }), apigw.MethodResponse( status_code='400', response_parameters={ 'method.response.header.Content-Type': True, 'method.response.header.Content-Length': True }), apigw.MethodResponse( status_code='500', response_parameters={ 'method.response.header.Content-Type': True, 'method.response.header.Content-Length': True }) ], request_parameters={'method.request.path.item': True}) return ui_item
def __init__(self, app: App, id: str, **kwargs) -> None: super().__init__(app, id) bucket: s3.Bucket = s3.Bucket(self, "WidgetStore") api: apigw.RestApi = apigw.RestApi( self, "widgets-api", rest_api_name="Widget Service", description="This service serves widgets.") rest_api_role: iam.Role = iam.Role( self, "RestAPIRole", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess") ]) list_objects_response: apigw.IntegrationResponse = apigw.IntegrationResponse( status_code="200") list_objects_integration_options: apigw.IntegrationOptions = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[list_objects_response], ) get_widget_integration_options: apigw.IntegrationOptions = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[list_objects_response], request_templates={ "application/json": "#set($context.requestOverride.path.object = $input.params('id'))" }) put_widget_integration_options: apigw.IntegrationOptions = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[list_objects_response], passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.path.object": "method.request.path.id" }, request_templates={ "application/json": "#set($now=$context.requestTimeEpoch)\n" "#set($body=\"$input.params('id') created $now\")" "\n$util.base64Encode($body)" }) get_widgets_integration: apigw.AwsIntegration = apigw.AwsIntegration( service="s3", integration_http_method="GET", path=bucket.bucket_name, options=list_objects_integration_options) get_widget_integration: apigw.AwsIntegration = apigw.AwsIntegration( service="s3", integration_http_method="GET", path="{}/{{object}}".format(bucket.bucket_name), options=get_widget_integration_options) put_widget_integration: apigw.AwsIntegration = apigw.AwsIntegration( service="s3", integration_http_method="PUT", path="{}/{{object}}".format(bucket.bucket_name), options=put_widget_integration_options) delete_widget_integration: apigw.AwsIntegration = apigw.AwsIntegration( service="s3", integration_http_method="DELETE", path="{}/{{object}}".format(bucket.bucket_name), options=get_widget_integration_options) method_response: apigw.MethodResponse = apigw.MethodResponse( status_code="200") api.root.add_method("GET", get_widgets_integration, method_responses=[method_response]) widget = api.root.add_resource('{id}') widget.add_method("GET", get_widget_integration, method_responses=[method_response]) widget.add_method("POST", put_widget_integration, method_responses=[method_response], request_parameters={"method.request.path.id": True}) widget.add_method("DELETE", delete_widget_integration, method_responses=[method_response])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ## Create S3 Bucket. We'll populate it seperately. bucket_name="{}-s3-ecs-apigw-test".format(core.Aws.ACCOUNT_ID) bucket = s3.Bucket(self, "s3-ecs-apigw-test", bucket_name=bucket_name, versioned=True, public_read_access=False ) ## Create ECS Cluster, Taks and Service ### Create the VPC for the demo vpc = ec2.Vpc(self, "MyVpc", max_azs=3) ### Create the ECS Cluster cluster = ecs.Cluster(self, "ecs-apigw-test-cluster", cluster_name="ecs-apigw-test-cluster", container_insights=True, vpc=vpc) ### Using the Network LoadBalance Fargate patterm, this wills create the container definition, the task definition, the service and the Network load balancer for it. ecs_deploy = ecsp.NetworkLoadBalancedFargateService(self, "ecs-apigw-test", cluster=cluster, cpu=512, desired_count=2, public_load_balancer=False, memory_limit_mib=2048, task_image_options=ecsp.NetworkLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_registry("strm/helloworld-http") ), health_check_grace_period=core.Duration.minutes(5) ) ### Have to add an Ingress rule to allow traffic through. This applies the CIDR of the VPC. ecs_deploy.service.connections.security_groups[0].add_ingress_rule( ec2.Peer.ipv4(vpc.vpc_cidr_block), ec2.Port( protocol=ec2.Protocol.TCP, string_representation="", from_port=80, to_port=80 ) ) ## Create API Gateway resources ### Create the VPC Link to the Network Load Balancer vpc_link =apigw.VpcLink(self, "ecs-test-vpc-link", targets = [ecs_deploy.load_balancer]) ### Create the API api = apigw.RestApi(self, "ecs-s3-test-api", rest_api_name="ECS S3 Test API", description="Test API for distributing traffic to S3 and ECS", binary_media_types=["image/png"]) ### Create the execution role for the API methods. rest_api_role = iam.Role( self, "RestAPIRole", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess")]) ### Generic Method Response that can be used with each API method method_response = apigw.MethodResponse(status_code="200", response_parameters={"method.response.header.Content-Type": True}) ### Root URI root_integration_response = apigw.IntegrationResponse( status_code="200", response_templates={"text/html": "$input.path('$')"}, response_parameters={"method.response.header.Content-Type": "'text/html'"}) root_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[root_integration_response], request_templates={"application/json": "Action=SendMessage&MessageBody=$input.body"}, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'"}) root_resource_s3_integration = apigw.AwsIntegration( service="s3", integration_http_method="GET", subdomain=bucket_name, path="index.html", options=root_integration_options) root_method = api.root.add_method("GET", root_resource_s3_integration, method_responses=[method_response]) ### API URI api_integration = apigw.Integration( type=apigw.IntegrationType.HTTP_PROXY, uri="http://{}".format(ecs_deploy.load_balancer.load_balancer_dns_name), integration_http_method="GET", options={ "connection_type": apigw.ConnectionType.VPC_LINK, "vpc_link": vpc_link } ) apis = api.root.add_resource("apis") apii = apis.add_resource("{api}") # apis = api.root.add_resource("apis") apii_get = apii.add_method("GET", api_integration, method_responses=[method_response], request_parameters={ "method.request.path.api": True,}) ## Add Images URI image_integration_response = apigw.IntegrationResponse( status_code="200", content_handling=apigw.ContentHandling.CONVERT_TO_BINARY, # response_templates={"text/html": "$input.path('$')"}, response_parameters={"method.response.header.Content-Type": "'image/png'"}) image_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[image_integration_response], request_templates={"application/json": "Action=SendMessage&MessageBody=$input.body"}, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'", "integration.request.path.image": "method.request.path.image"}) images_resource_s3_integration = apigw.AwsIntegration( service="s3", integration_http_method="GET", subdomain=bucket_name, path="images/{image}", options=image_integration_options) images_resource = api.root.add_resource("images") image_resource = images_resource.add_resource("{image}") images_get = image_resource.add_method("GET", images_resource_s3_integration, method_responses=[method_response], request_parameters={ "method.request.path.image": True,}) ## Fall Through folder = api.root.add_resource("{folder}") item = folder.add_resource("{item}") integration_response = apigw.IntegrationResponse( status_code="200", response_templates={"text/html": "$input.path('$')"}, response_parameters={"method.response.header.Content-Type": "'text/html'"}) s3_proxy_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[integration_response], request_templates={"application/json": "Action=SendMessage&MessageBody=$input.body"}, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'", "integration.request.path.item": "method.request.path.item", "integration.request.path.folder": "method.request.path.folder"}) s3_proxy_resource_s3_integration = apigw.AwsIntegration( service="s3", integration_http_method="GET", subdomain=bucket_name, path="{folder}/{item}", options=s3_proxy_integration_options) item_get = item.add_method("GET", s3_proxy_resource_s3_integration, method_responses=[method_response], request_parameters={ "method.request.path.item": True, "method.request.path.folder": True } )
def __init__(self, scope: Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) #Create the SQS queue queue = sqs.Queue(self, "SQSQueue") #Create the API GW service role with permissions to call SQS rest_api_role = iam.Role( self, "RestAPIRole", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess") ]) #Create an API GW Rest API base_api = apigw.RestApi(self, 'ApiGW', rest_api_name='TestAPI') base_api.root.add_method("ANY") #Create a resource named "example" on the base API api_resource = base_api.root.add_resource('example') #Create API Integration Response object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html integration_response = apigw.IntegrationResponse( status_code="200", response_templates={"application/json": ""}, ) #Create API Integration Options object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html api_integration_options = apigw.IntegrationOptions( credentials_role=rest_api_role, integration_responses=[integration_response], request_templates={ "application/json": "Action=SendMessage&MessageBody=$input.body" }, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'" }, ) #Create AWS Integration Object for SQS: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/AwsIntegration.html api_resource_sqs_integration = apigw.AwsIntegration( service="sqs", integration_http_method="POST", path="{}/{}".format(Aws.ACCOUNT_ID, queue.queue_name), options=api_integration_options) #Create a Method Response Object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/MethodResponse.html method_response = apigw.MethodResponse(status_code="200") #Add the API GW Integration to the "example" API GW Resource api_resource.add_method("POST", api_resource_sqs_integration, method_responses=[method_response]) #Creating Lambda function that will be triggered by the SQS Queue sqs_lambda = _lambda.Function( self, 'SQSTriggerLambda', handler='lambda-handler.handler', runtime=_lambda.Runtime.PYTHON_3_7, code=_lambda.Code.from_asset('lambda'), ) #Create an SQS event source for Lambda sqs_event_source = lambda_event_source.SqsEventSource(queue) #Add SQS event source to the Lambda function sqs_lambda.add_event_source(sqs_event_source)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.current_dir = os.path.dirname(__file__) self.bucket = s3.Bucket( self, "qs-migration-bucket", bucket_name=f'quicksight-migration-{core.Aws.ACCOUNT_ID}', block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) self.quicksight_migration_lambda_role = iam.Role( self, 'quicksight-migration-lambda-role', description='Role for the Quicksight dashboard migration Lambdas', role_name='quicksight-migration-lambda-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents' ], resources=[ f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:*' ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["sts:AssumeRole", "iam:ListRoles"], resources=[ "arn:aws:iam::*:role/quicksight-migration-*-assume-role" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:PutObject", "s3:ListBucket"], resources=[ self.bucket.bucket_arn, f"{self.bucket.bucket_arn}/*" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["secrets:GetSecretValue"], resources=[ f"arn:aws:secretsmanager:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:secret:*" ]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:Create*", "quicksight:Delete*", "quicksight:Describe*", "quicksight:List*", "quicksight:Search*", "quicksight:Update*" ], resources=["*"]) ]) }) self.quicksight_migration_target_assume_role = iam.Role( self, 'quicksight-migration-target-assume-role', description= 'Role for the Quicksight dashboard migration Lambdas to assume', role_name='quicksight-migration-target-assume-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:Create*", "quicksight:Delete*", "quicksight:Describe*", "quicksight:List*", "quicksight:Search*", "quicksight:Update*" ], resources=["*"]), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:GetParameter", ], resources=["arn:aws:ssm:*:*:parameter/infra/config"]) ]) }) self.quicksight_migration_target_assume_role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['sts:AssumeRole'], principals=[iam.AccountPrincipal(core.Aws.ACCOUNT_ID)])) # API Gateway to SQS self.rest_api_role = iam.Role( self, "RestAPIRole", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess") ]) self.queue = sqs.Queue(self, "quicksight-migration-sqs-queue", queue_name="quicksight-migration-sqs", visibility_timeout=core.Duration.minutes(15)) self.integration_response = apigw.IntegrationResponse( status_code="200", response_templates={"application/json": ""}, response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'", "method.response.header.Access-Control-Allow-Origin": "'*'", "method.response.header.Access-Control-Allow-Methods": "'POST,OPTIONS'" }) self.api_integration_options = apigw.IntegrationOptions( credentials_role=self.rest_api_role, integration_responses=[self.integration_response], request_templates={ "application/json": 'Action=SendMessage&MessageBody=$util.urlEncode("$input.body")' }, passthrough_behavior=apigw.PassthroughBehavior.NEVER, request_parameters={ "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'" }) self.api_resource_sqs_integration = apigw.AwsIntegration( service="sqs", integration_http_method="POST", path="{}/{}".format(core.Aws.ACCOUNT_ID, self.queue.queue_name), options=self.api_integration_options) self.base_api = apigw.RestApi( self, 'quicksight-migration-sqs', rest_api_name='quicksight-migration-sqs', default_cors_preflight_options=apigw.CorsOptions( allow_origins=apigw.Cors.ALL_ORIGINS, allow_methods=["POST", "OPTIONS"], allow_headers=[ 'Access-Control-Allow-Origin', 'Access-Control-Allow-Headers', 'Content-Type' ])) self.base_api.root.add_method( "POST", self.api_resource_sqs_integration, method_responses=[{ 'statusCode': '200', 'responseParameters': { 'method.response.header.Access-Control-Allow-Headers': True, 'method.response.header.Access-Control-Allow-Methods': True, 'method.response.header.Access-Control-Allow-Origin': True } }]) self.quicksight_migration_lambda = _lambda.Function( self, 'quicksight-migration-lambda', handler='quicksight_migration.lambda_function.lambda_handler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset( os.path.join(self.current_dir, '../lambda/quicksight_migration/')), function_name='quicksight_migration_lambda', role=self.quicksight_migration_lambda_role, timeout=core.Duration.minutes(15), memory_size=1024, environment={ 'BUCKET_NAME': self.bucket.bucket_name, 'S3_KEY': 'None', 'INFRA_CONFIG_PARAM': '/infra/config', 'SQS_URL': self.queue.queue_url }) self.sqs_event_source = event_sources.SqsEventSource(self.queue) self.quicksight_migration_lambda.add_event_source( self.sqs_event_source) core.CfnOutput(self, "MigrationAPIGatewayURL", value=self.base_api.url, description="Migration API GW URL")
def __init__(self, scope: cdk.Construct, construct_id: str, table: dynamo_db.Table, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Bucket for the processed stream events # -------------------------------------- clicks_destination_bucket = _s3.Bucket( self, 'Bucket', versioned=False # True ) # Lambda function for processing the stream # ----------------------------------------- # Policy statement for accessing the DynamoDB table lambda_function_policy_stm = _iam.PolicyStatement() lambda_function_policy_stm.add_actions('dynamodb:GetItem') lambda_function_policy_stm.add_resources(table.table_arn) # Lambda processor function mysfits_click_processor = _lambda.Function( self, 'Function', handler="streamProcessor.processRecord", runtime=_lambda.Runtime.PYTHON_3_6, description= 'An Amazon Kinesis Firehose stream processor that enriches click records to not just ' 'include a mysfitId, but also other attributes that can be analyzed later.', memory_size=128, code=_lambda.Code.asset('./lambda_streaming_processor'), timeout=cdk.Duration.seconds(60), initial_policy=[lambda_function_policy_stm], environment={'MYSFITS_API_URL': mysfits_api_url}) # Firehose delivery stream # ------------------------ # Initialize role firehose_delivery_role = _iam.Role( self, "FirehoseDeliveryRole", role_name='FirehoseDeliveryRole', assumed_by=_iam.ServicePrincipal('firehose.amazonaws.com'), ) # Statement with access to S3 bucket firehose_delivery_policy_s3_stm = _iam.PolicyStatement() firehose_delivery_policy_s3_stm.add_actions( "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject") firehose_delivery_policy_s3_stm.add_resources( clicks_destination_bucket.bucket_arn) firehose_delivery_policy_s3_stm.add_resources( clicks_destination_bucket.arn_for_objects('*')) firehose_delivery_policy_s3_stm.effect = _iam.Effect.ALLOW # Statement with access to Lambda function firehose_delivery_policy_lambda_stm = _iam.PolicyStatement() firehose_delivery_policy_lambda_stm.add_actions( "lambda:InvokeFunction") firehose_delivery_policy_lambda_stm.add_actions( "lambda:GetFunctionConfiguration") firehose_delivery_policy_lambda_stm.add_resources( mysfits_click_processor.function_arn) firehose_delivery_policy_lambda_stm.effect = _iam.Effect.ALLOW # Add policies to role firehose_delivery_role.add_to_policy(firehose_delivery_policy_s3_stm) firehose_delivery_role.add_to_policy( firehose_delivery_policy_lambda_stm) # Create delivery stream mysfits_firehose_to_s3 = kinfire.CfnDeliveryStream( self, "DeliveryStream", delivery_stream_name="DeliveryStream", delivery_stream_type="DirectPut", extended_s3_destination_configuration=kinfire.CfnDeliveryStream. ExtendedS3DestinationConfigurationProperty( bucket_arn=clicks_destination_bucket.bucket_arn, buffering_hints=kinfire.CfnDeliveryStream. BufferingHintsProperty(interval_in_seconds=60, size_in_m_bs=1), compression_format="UNCOMPRESSED", error_output_prefix="errors/", prefix="firehose/", processing_configuration=kinfire.CfnDeliveryStream. ProcessingConfigurationProperty( enabled=True, processors=[ kinfire.CfnDeliveryStream.ProcessorProperty( type="Lambda", parameters=[ kinfire.CfnDeliveryStream. ProcessorParameterProperty( parameter_name="LambdaArn", parameter_value=mysfits_click_processor. function_arn) ]) ]), role_arn=firehose_delivery_role.role_arn, )) # API Gateway as proxy to the Firehose stream # ------------------------------------------- # Initialize role click_processing_api_role = _iam.Role( self, "ClickProcessingApiRole", role_name="ClickProcessingApiRole", assumed_by=_iam.ServicePrincipal("apigateway.amazonaws.com")) api_policy = _iam.PolicyStatement() api_policy.add_actions("firehose:PutRecord") api_policy.add_resources(mysfits_firehose_to_s3.attr_arn) api_policy.effect = _iam.Effect.ALLOW # Associate policy to role _iam.Policy(self, "ClickProcessingApiPolicy", policy_name="api_gateway_firehose_proxy_role", statements=[api_policy], roles=[click_processing_api_role]) # Create API gateway api = apigw.RestApi(self, "APIEndpoint", rest_api_name="ClickProcessingApi", endpoint_types=[apigw.EndpointType.REGIONAL]) # Add the resource endpoint and the method used to send clicks to Firehose clicks = api.root.add_resource('clicks') clicks.add_method( 'PUT', integration=apigw.AwsIntegration( service='firehose', integration_http_method='POST', action='PutRecord', options=apigw.IntegrationOptions( connection_type=apigw.ConnectionType.INTERNET, credentials_role=click_processing_api_role, integration_responses=[ apigw.IntegrationResponse( status_code='200', response_templates={ "application/json": '{"status":"OK"}' }, response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type'", "method.response.header.Access-Control-Allow-Methods": "'OPTIONS,PUT'", "method.response.header.Access-Control-Allow-Origin": "'*'" }) ], request_parameters={ "integration.request.header.Content-Type": "'application/x-amz-json-1.1'" }, request_templates={ "application/json": "{ \"DeliveryStreamName\": \"" + mysfits_firehose_to_s3.ref + "\", \"Record\": { \"Data\": \"$util.base64Encode($input.json('$'))\" } }" }, )), method_responses=[ apigw.MethodResponse( status_code='200', response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Origin": True }) ]) clicks.add_method( 'OPTIONS', integration=apigw.MockIntegration(integration_responses=[ apigw.IntegrationResponse( status_code='200', response_parameters={ "method.response.header.Access-Control-Allow-Headers": "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'", "method.response.header.Access-Control-Allow-Origin": "'*'", "method.response.header.Access-Control-Allow-Credentials": "'false'", "method.response.header.Access-Control-Allow-Methods": "'OPTIONS,GET,PUT,POST,DELETE'" }) ], passthrough_behavior=apigw. PassthroughBehavior.NEVER, request_templates={ "application/json": '{"statusCode": 200}' }), method_responses=[ apigw.MethodResponse( status_code='200', response_parameters={ "method.response.header.Access-Control-Allow-Headers": True, "method.response.header.Access-Control-Allow-Methods": True, "method.response.header.Access-Control-Allow-Credentials": True, "method.response.header.Access-Control-Allow-Origin": True }) ])