def __configure_gateway(self) -> None:
        self.gateway = a.RestApi(self, 'PortfolioMgmt')

        # Create kinesis integration
        integration_role = iam.Role(
            self,
            'KinesisIntegrationRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'))

        self.updates_stream.grant_write(integration_role)

        updates = self.gateway.root.add_resource('updates')
        updates.add_method(http_method='POST',
                           authorization_type=a.AuthorizationType.IAM,
                           integration=a.AwsIntegration(
                               service='kinesis',
                               action='PutRecord',
                               subdomain=self.updates_stream.stream_name,
                               options=a.IntegrationOptions(
                                   credentials_role=integration_role)))

        pmapi = self.gateway.root.add_resource('pmapi')
        pmapi.add_proxy(
            any_method=True,
            default_integration=a.LambdaIntegration(handler=PythonLambda(
                self,
                'PortfolioMgmtAPI',
                build_prefix='artifacts/FinSurf-PortfolioMgmt-API',
                handler='handler.app',
                subnet_group_name='PortfolioMgmt',
                context=self.context,
                securityGroups=[self.security_group]).function))
Example #2
0
 def rest_api(self, event_streams, event_replayer):
     rest_api = _api_gtw.RestApi(self, "{}RestApi".format(self.stack_id))
     rest_api.add_usage_plan("RestApiUsagePlan",
                             api_key=_api_gtw.ApiKey(self, "TestApiKey"),
                             api_stages=[
                                 _api_gtw.UsagePlanPerApiStage(
                                     api=rest_api,
                                     stage=rest_api.deployment_stage)
                             ])
     api_role = _iam.Role(
         self,
         "RestApiRole",
         assumed_by=_iam.ServicePrincipal('apigateway.amazonaws.com'))
     api_role.add_to_policy(
         _iam.PolicyStatement(
             actions=['firehose:PutRecord'],
             resources=[stream.attr_arn for stream in event_streams]))
     for stream in event_streams:
         stream_resource = rest_api.root.add_resource(
             path_part=stream.delivery_stream_name.lower(), )
         stream_resource.add_method(
             'POST',
             api_key_required=True,
             integration=_api_gtw.Integration(
                 type=_api_gtw.IntegrationType.AWS,
                 uri=
                 "arn:aws:apigateway:eu-west-1:firehose:action/PutRecord",
                 integration_http_method='POST',
                 options=_api_gtw.IntegrationOptions(
                     credentials_role=api_role,
                     passthrough_behavior=_api_gtw.PassthroughBehavior.
                     NEVER,
                     request_parameters={
                         'integration.request.header.Content-Type':
                         "'application/x-amz-json-1.1'"
                     },
                     request_templates={
                         'application/json':
                         json.dumps({
                             "DeliveryStreamName":
                             stream.delivery_stream_name,
                             "Record": {
                                 "Data": "$util.base64Encode($input.body)"
                             }
                         })
                     },
                     integration_responses=[
                         _api_gtw.IntegrationResponse(status_code="200")
                     ])),
             method_responses=[_api_gtw.MethodResponse(status_code="200")])
         replay = stream_resource.add_resource(path_part='replay')
         replay.add_method(
             http_method='POST',
             integration=_api_gtw.LambdaIntegration(event_replayer),
             method_responses=[
                 _api_gtw.MethodResponse(status_code="202"),
                 _api_gtw.MethodResponse(status_code="400")
             ])
Example #3
0
    def create_and_integrate_apigw(self, queue: sqs.Queue,
                                   dashboard_name_prefix: str) -> str:
        """Creates API Gateway and integrates with SQS queue

        :param queue: the SQS queue to integrate with
        :type queue: aws_cdk.aws_sqs.Queue
        :param dashboard_name_prefix: the dashboard name to use as the API Gateway resource name
        :type dashboard_name_prefix: str
        :returns: the url that the webhooks will post to
        :rtype: str
        """
        webhook_apigw_role = iam.Role(
            self,
            'WebhookAPIRole',
            role_name='WebhookAPIRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'))
        webhook_apigw_role.add_to_policy(
            iam.PolicyStatement(resources=['*'], actions=['sqs:SendMessage']))

        webhook_apigw = apigw.RestApi(
            self,
            'RepositoryStatusMonitorAPI',
            rest_api_name='RepositoryStatusMonitorAPI')
        webhook_apigw_resource = webhook_apigw.root.add_resource(
            dashboard_name_prefix)

        apigw_integration_response = apigw.IntegrationResponse(
            status_code='200', response_templates={'application/json': ""})
        apigw_integration_options = apigw.IntegrationOptions(
            credentials_role=webhook_apigw_role,
            integration_responses=[apigw_integration_response],
            request_templates={
                'application/json':
                'Action=SendMessage&MessageBody=$input.body'
            },
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                'integration.request.header.Content-Type':
                "'application/x-www-form-urlencoded'"
            })
        webhook_apigw_resource_sqs_integration = apigw.AwsIntegration(
            service='sqs',
            integration_http_method='POST',
            path='{}/{}'.format(core.Aws.ACCOUNT_ID, queue.queue_name),
            options=apigw_integration_options)

        webhook_apigw_resource.add_method(
            'POST',
            webhook_apigw_resource_sqs_integration,
            method_responses=[apigw.MethodResponse(status_code='200')])

        path = '/' + dashboard_name_prefix
        return webhook_apigw.url_for_path(path)
Example #4
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 apigw_role: _iam.Role, eventBus: _events.EventBus, **kwargs):
        super().__init__(scope, construct_id, **kwargs)

        integrationOptions = \
            _apigw.IntegrationOptions(
                credentials_role=apigw_role,
                request_parameters={
                    "integration.request.header.X-Amz-Target": "'AWSEvents.PutEvents'",
                    "integration.request.header.Content-Type": "'application/x-amz-json-1.1'",
                },
                request_templates={
                    "application/json":'#set($language=$input.params(\'language\'))\n{"Entries": [{"Source": "com.amazon.alexa.$language", "Detail": ' + \
                                       '"$util.escapeJavaScript($input.body)",' + \
                                       ' "Resources": ["resource1", "resource2"], "DetailType": "myDetailType", "EventBusName": "' + eventBus.event_bus_name + '"}]}'
                },
                integration_responses=[_apigw.IntegrationResponse(
                    status_code="200",
                    response_templates={"application/json": ""},
                )]
            )

        # Integration API Gateway with EventBridge
        integrationEventBridge = _apigw.Integration(
            type=_apigw.IntegrationType("AWS"),
            integration_http_method="POST",
            options=integrationOptions,
            uri=
            f"arn:aws:apigateway:{os.environ['CDK_DEFAULT_REGION']}:events:path//"
        )

        myApi = _apigw.RestApi(self, construct_id)
        # myApi.root.add_method("POST",
        #     integrationEventBridge,
        #     method_responses=[
        #         _apigw.MethodResponse(
        #             status_code="200"
        #         )
        #     ]
        # )

        languageResource = myApi.root.add_resource("{language}")
        languageResource.add_method(
            "POST",
            integrationEventBridge,
            method_responses=[_apigw.MethodResponse(status_code="200")],
            request_models={"application/json": self.getModel(myApi)},
            request_validator=_apigw.RequestValidator(
                self,
                "myValidator",
                rest_api=myApi,
                validate_request_body=True))
Example #5
0
    def __init__(self, scope: core.Construct, id: str, stream: ks.Stream,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.gateway = api.RestApi(self, 'EmailServices')
        resource = self.gateway.root.add_resource('send-mail')

        role = iam.Role(
            self,
            'Apig-to-Kinesis',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonKinesisFullAccess')
            ])

        kinesisIntegration = api.AwsIntegration(
            service='kinesis',
            action='PutRecord',
            subdomain=stream.stream_name,
            options=api.IntegrationOptions(credentials_role=role))

        self.post_method = resource.add_method('POST', kinesisIntegration)
Example #6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        api_name='voncv'
        cert_arn='arn:aws:acm:us-east-1:921279086507:certificate/1cdc5fa6-0978-4c3c-96fa-ced4188b4fd0'
        # Example automatically generated. See https://github.com/aws/jsii/issues/826
        cert = acm.Certificate.from_certificate_arn(self,"cert",cert_arn)
        domain_name='voncv.sema4.com'
        domain_obj=api.DomainNameOptions(certificate=cert, domain_name=domain_name)
        #domain_name_obj=api.DefaultDomainMappingOptions( domain_name=domain_obj1, mapping_key=None)
        integration_uri='http://voncwesprod.sema4.com:3000'


        #classaws_cdk.aws_apigateway.HttpIntegration(url, *, http_method=None, options=None, proxy=None)

        #base_api=api.HttpApi(self, 'rootapi', api_name=api_name, cors_preflight=None, create_default_stage=None, default_domain_mapping=domain_name_obj)
        #                     , default_integration=None)


        base_api = api.RestApi(self, "RestApi",rest_api_name=api_name,domain_name=domain_obj,
                           deploy=False
                           )


        integration_response_200=api.IntegrationResponse(status_code='200')
        integration_response_400=api.IntegrationResponse(status_code='400',selection_pattern='400')
        integration_response_401=api.IntegrationResponse(status_code='401',selection_pattern='401')
        integration_response_404=api.IntegrationResponse(status_code='400',selection_pattern='404')
        integration_response_405=api.IntegrationResponse(status_code='401',selection_pattern='405')
        integration_response_500=api.IntegrationResponse(status_code='401',selection_pattern='500')

        integration_response_all=[integration_response_200,
                                  integration_response_400,
                                  integration_response_401,
                                  integration_response_404,
                                  integration_response_405,
                                  integration_response_500
                                  ]
        integration_options=api.IntegrationOptions(integration_responses=integration_response_all)
        #method_reponse=api.IntegrationOptions(integration_responses=integration_response_all)
        mr=[
            api.MethodResponse(status_code='200'),
            api.MethodResponse(status_code='400'),
            api.MethodResponse(status_code='401'),
            api.MethodResponse(status_code='404'),
            api.MethodResponse(status_code='405'),
            api.MethodResponse(status_code='500')
                      ]







        integration=api.Integration(type=api.IntegrationType.HTTP,integration_http_method='POST',uri=integration_uri,options=integration_options)



        control = base_api.root.add_resource('control')
        control_get=control.add_method('POST',integration)
        control_controlid=control.add_resource('{controlid}')
        control_controlid_post=control_controlid.add_method('POST',integration)
        control_controlid_sample=control_controlid.add_resource('sample')
        control_controlid_sample_sampleid=control_controlid_sample.add_resource('sampleid')
        control_controlid_sample_sampleid_get=control_controlid_sample_sampleid.add_method('GET',integration,method_responses=mr)
        control_controlid_sample_sampleid_pipelineoutput=control_controlid_sample_sampleid.add_resource('pipeline_output')
        control_controlid_sample_sampleid_pipelineoutput_post=control_controlid_sample_sampleid_pipelineoutput.add_method('POST',integration)


        getalllpatients = base_api.root.add_resource('getallpatients')
        getalllpatients_get=getalllpatients.add_method('GET',integration)


        patient = base_api.root.add_resource('patient')
        patient_post = patient.add_method('GET',integration)
        patient_get = patient.add_method('POST',integration)

        patientid=patient.add_resource('patientid')
        patient_put=patientid.add_method('PUT',integration)
        patient_get=patientid.add_method('GET',integration)

        patient_case=patientid.add_resource('case')
        patient_case_post=patient_case.add_method('POST',integration)

        patient_case_caseid=patient_case.add_resource('caseid')
        patient_case_caseid_get= patient_case_caseid.add_method('GET',integration)
        patient_case_caseid_put= patient_case_caseid.add_method('PUT',integration)
        patient_case_caseid_close=patient_case_caseid.add_resource('close')
        patient_case_caseid_close_post=patient_case_caseid_close.add_method('PUT',integration)
        patient_case_caseid_curate=patient_case_caseid.add_resource('curate')
        patient_case_caseid_curate_post=patient_case_caseid_curate.add_method('POST',integration)
        patient_case_caseid_fail=patient_case_caseid.add_resource('fail')
        patient_case_caseid_fail_post=patient_case_caseid_fail.add_method('POST',integration)
        patient_case_caseid_reopen=patient_case_caseid.add_resource('reopen')
        patient_case_caseid_reopen_post=patient_case_caseid_reopen.add_method('POST',integration)
        patient_case_caseid_sample=patient_case_caseid.add_resource('sample')
        patient_case_caseid_sample_sampleid=patient_case_caseid_sample.add_resource('sampleid')
        patient_case_caseid_sample_sampleid_get=patient_case_caseid_sample_sampleid.add_method('GET',integration)
        patient_case_caseid_sample_sampleid_fail=patient_case_caseid_sample_sampleid.add_resource('fail')
        patient_case_caseid_sample_sampleid_fail_post=patient_case_caseid_sample_sampleid_fail.add_method('POST',integration)
        patient_case_caseid_sample_sampleid_pipelineoutput=patient_case_caseid_sample_sampleid.add_resource('pipeline-output')
        patient_case_caseid_sample_sampleid_pipelineoutput_post=patient_case_caseid_sample_sampleid_pipelineoutput.add_method('POST',integration)
        patient_case_caseid_variant=patient_case_caseid.add_resource('variant')
        patient_case_caseid_variant_post=patient_case_caseid.add_method('POST',integration)
Example #7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###
        # SNS Topic Creation
        # Our API Gateway posts messages directly to this
        ###
        topic = sns.Topic(self,
                          'theBigFanTopic',
                          display_name='The Big Fan CDK Pattern Topic')

        ###
        # SQS Subscribers creation for our SNS Topic
        # 2 subscribers, one for messages with a status of created one for any other message
        ###

        # Status:created SNS Subscriber Queue
        created_status_queue = sqs.Queue(
            self,
            'BigFanTopicStatusCreatedSubscriberQueue',
            visibility_timeout=core.Duration.seconds(300),
            queue_name='BigFanTopicStatusCreatedSubscriberQueue')

        # Only send messages to our created_status_queue with a status of created
        created_filter = sns.SubscriptionFilter.string_filter(
            whitelist=['created'])
        topic.add_subscription(
            subscriptions.SqsSubscription(
                created_status_queue,
                raw_message_delivery=True,
                filter_policy={'status': created_filter}))

        # Any other status SNS Subscriber Queue
        other_status_queue = sqs.Queue(
            self,
            'BigFanTopicAnyOtherStatusSubscriberQueue',
            visibility_timeout=core.Duration.seconds(300),
            queue_name='BigFanTopicAnyOtherStatusSubscriberQueue')

        # Only send messages to our other_status_queue that do not have a status of created
        other_filter = sns.SubscriptionFilter.string_filter(
            blacklist=['created'])
        topic.add_subscription(
            subscriptions.SqsSubscription(
                other_status_queue,
                raw_message_delivery=True,
                filter_policy={'status': other_filter}))

        ###
        # Creation of Lambdas that subscribe to above SQS queues
        ###

        # Created status queue lambda
        sqs_created_status_subscriber = _lambda.Function(
            self,
            "SQSCreatedStatusSubscribeLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="createdStatus.handler",
            code=_lambda.Code.from_asset("lambda_fns/subscribe"))
        created_status_queue.grant_consume_messages(
            sqs_created_status_subscriber)
        sqs_created_status_subscriber.add_event_source(
            _event.SqsEventSource(created_status_queue))

        # Any other status queue lambda
        sqs_other_status_subscriber = _lambda.Function(
            self,
            "SQSAnyOtherStatusSubscribeLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="anyOtherStatus.handler",
            code=_lambda.Code.from_asset("lambda_fns/subscribe"))
        other_status_queue.grant_consume_messages(sqs_other_status_subscriber)
        sqs_other_status_subscriber.add_event_source(
            _event.SqsEventSource(other_status_queue))

        ###
        # API Gateway Creation
        # This is complicated because it transforms the incoming json payload into a query string url
        # this url is used to post the payload to sns without a lambda inbetween
        ###

        gateway = api_gw.RestApi(
            self,
            'theBigFanAPI',
            deploy_options=api_gw.StageOptions(
                metrics_enabled=True,
                logging_level=api_gw.MethodLoggingLevel.INFO,
                data_trace_enabled=True,
                stage_name='prod'))

        # Give our gateway permissions to interact with SNS
        api_gw_sns_role = iam.Role(
            self,
            'DefaultLambdaHanderRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'))
        topic.grant_publish(api_gw_sns_role)

        # shortening the lines of later code
        schema = api_gw.JsonSchema
        schema_type = api_gw.JsonSchemaType

        # Because this isn't a proxy integration, we need to define our response model
        response_model = gateway.add_model(
            'ResponseModel',
            content_type='application/json',
            model_name='ResponseModel',
            schema=schema(
                schema=api_gw.JsonSchemaVersion.DRAFT4,
                title='pollResponse',
                type=schema_type.OBJECT,
                properties={'message': schema(type=schema_type.STRING)}))

        error_response_model = gateway.add_model(
            'ErrorResponseModel',
            content_type='application/json',
            model_name='ErrorResponseModel',
            schema=schema(schema=api_gw.JsonSchemaVersion.DRAFT4,
                          title='errorResponse',
                          type=schema_type.OBJECT,
                          properties={
                              'state': schema(type=schema_type.STRING),
                              'message': schema(type=schema_type.STRING)
                          }))

        request_template = "Action=Publish&" + \
                           "TargetArn=$util.urlEncode('" + topic.topic_arn + "')&" + \
                           "Message=$util.urlEncode($input.path('$.message'))&" + \
                           "Version=2010-03-31&" + \
                           "MessageAttributes.entry.1.Name=status&" + \
                           "MessageAttributes.entry.1.Value.DataType=String&" + \
                           "MessageAttributes.entry.1.Value.StringValue=$util.urlEncode($input.path('$.status'))"

        # This is the VTL to transform the error response
        error_template = {
            "state": 'error',
            "message": "$util.escapeJavaScript($input.path('$.errorMessage'))"
        }
        error_template_string = json.dumps(error_template,
                                           separators=(',', ':'))

        # This is how our gateway chooses what response to send based on selection_pattern
        integration_options = api_gw.IntegrationOptions(
            credentials_role=api_gw_sns_role,
            request_parameters={
                'integration.request.header.Content-Type':
                "'application/x-www-form-urlencoded'"
            },
            request_templates={"application/json": request_template},
            passthrough_behavior=api_gw.PassthroughBehavior.NEVER,
            integration_responses=[
                api_gw.IntegrationResponse(
                    status_code='200',
                    response_templates={
                        "application/json":
                        json.dumps({"message": 'message added to topic'})
                    }),
                api_gw.IntegrationResponse(
                    selection_pattern="^\[Error\].*",
                    status_code='400',
                    response_templates={
                        "application/json": error_template_string
                    },
                    response_parameters={
                        'method.response.header.Content-Type':
                        "'application/json'",
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'",
                        'method.response.header.Access-Control-Allow-Credentials':
                        "'true'"
                    })
            ])

        # Add an SendEvent endpoint onto the gateway
        gateway.root.add_resource('SendEvent') \
            .add_method('POST', api_gw.Integration(type=api_gw.IntegrationType.AWS,
                                                   integration_http_method='POST',
                                                   uri='arn:aws:apigateway:us-east-1:sns:path//',
                                                   options=integration_options
                                                   ),
                        method_responses=[
                            api_gw.MethodResponse(status_code='200',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': response_model
                                                  }),
                            api_gw.MethodResponse(status_code='400',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': error_response_model
                                                  }),
                        ]
                        )
Example #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = aws_ec2.Vpc(
            self,
            "OctemberVPC",
            max_azs=2,
            #      subnet_configuration=[{
            #          "cidrMask": 24,
            #          "name": "Public",
            #          "subnetType": aws_ec2.SubnetType.PUBLIC,
            #        },
            #        {
            #          "cidrMask": 24,
            #          "name": "Private",
            #          "subnetType": aws_ec2.SubnetType.PRIVATE
            #        },
            #        {
            #          "cidrMask": 28,
            #          "name": "Isolated",
            #          "subnetType": aws_ec2.SubnetType.ISOLATED,
            #          "reserved": True
            #        }
            #      ],
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        dynamo_db_endpoint = vpc.add_gateway_endpoint(
            "DynamoDbEndpoint",
            service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB)

        s3_bucket = s3.Bucket(
            self,
            "s3bucket",
            bucket_name="octember-bizcard-{region}-{account}".format(
                region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID))

        api = apigw.RestApi(
            self,
            "BizcardImageUploader",
            rest_api_name="BizcardImageUploader",
            description="This service serves uploading bizcard images into s3.",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            binary_media_types=["image/png", "image/jpg"],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        rest_api_role = aws_iam.Role(
            self,
            "ApiGatewayRoleForS3",
            role_name="ApiGatewayRoleForS3FullAccess",
            assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3FullAccess")
            ])

        list_objects_responses = [
            apigw.IntegrationResponse(
                status_code="200",
                #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html#aws_cdk.aws_apigateway.IntegrationResponse.response_parameters
                # The response parameters from the backend response that API Gateway sends to the method response.
                # Use the destination as the key and the source as the value:
                #  - The destination must be an existing response parameter in the MethodResponse property.
                #  - The source must be an existing method request parameter or a static value.
                response_parameters={
                    'method.response.header.Timestamp':
                    'integration.response.header.Date',
                    'method.response.header.Content-Length':
                    'integration.response.header.Content-Length',
                    'method.response.header.Content-Type':
                    'integration.response.header.Content-Type'
                }),
            apigw.IntegrationResponse(status_code="400",
                                      selection_pattern="4\d{2}"),
            apigw.IntegrationResponse(status_code="500",
                                      selection_pattern="5\d{2}")
        ]

        list_objects_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses)

        get_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path='/',
            options=list_objects_integration_options)

        api.root.add_method(
            "GET",
            get_s3_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={'method.request.header.Content-Type': False})

        get_s3_folder_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses,
            #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html#aws_cdk.aws_apigateway.IntegrationOptions.request_parameters
            # Specify request parameters as key-value pairs (string-to-string mappings), with a destination as the key and a source as the value.
            # The source must be an existing method request parameter or a static value.
            request_parameters={
                "integration.request.path.bucket": "method.request.path.folder"
            })

        get_s3_folder_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{bucket}",
            options=get_s3_folder_integration_options)

        s3_folder = api.root.add_resource('{folder}')
        s3_folder.add_method(
            "GET",
            get_s3_folder_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True
            })

        get_s3_item_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses,
            request_parameters={
                "integration.request.path.bucket":
                "method.request.path.folder",
                "integration.request.path.object": "method.request.path.item"
            })

        get_s3_item_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{bucket}/{object}",
            options=get_s3_item_integration_options)

        s3_item = s3_folder.add_resource('{item}')
        s3_item.add_method(
            "GET",
            get_s3_item_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True,
                'method.request.path.item': True
            })

        put_s3_item_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[
                apigw.IntegrationResponse(status_code="200"),
                apigw.IntegrationResponse(status_code="400",
                                          selection_pattern="4\d{2}"),
                apigw.IntegrationResponse(status_code="500",
                                          selection_pattern="5\d{2}")
            ],
            request_parameters={
                "integration.request.header.Content-Type":
                "method.request.header.Content-Type",
                "integration.request.path.bucket":
                "method.request.path.folder",
                "integration.request.path.object": "method.request.path.item"
            })

        put_s3_item_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="PUT",
            path="{bucket}/{object}",
            options=put_s3_item_integration_options)

        s3_item.add_method(
            "PUT",
            put_s3_item_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True,
                'method.request.path.item': True
            })

        ddb_table = dynamodb.Table(
            self,
            "BizcardImageMetaInfoDdbTable",
            table_name="OctemberBizcardImgMeta",
            partition_key=dynamodb.Attribute(
                name="image_id", type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PROVISIONED,
            read_capacity=15,
            write_capacity=5)

        img_kinesis_stream = kinesis.Stream(
            self, "BizcardImagePath", stream_name="octember-bizcard-image")

        # create lambda function
        trigger_textract_lambda_fn = _lambda.Function(
            self,
            "TriggerTextExtractorFromImage",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="TriggerTextExtractorFromImage",
            handler="trigger_text_extract_from_s3_image.lambda_handler",
            description="Trigger to extract text from an image in S3",
            code=_lambda.Code.asset(
                "./src/main/python/TriggerTextExtractFromS3Image"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'DDB_TABLE_NAME': ddb_table.table_name,
                'KINESIS_STREAM_NAME': img_kinesis_stream.stream_name
            },
            timeout=core.Duration.minutes(5))

        ddb_table_rw_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            resources=[ddb_table.table_arn],
            actions=[
                "dynamodb:BatchGetItem", "dynamodb:Describe*",
                "dynamodb:List*", "dynamodb:GetItem", "dynamodb:Query",
                "dynamodb:Scan", "dynamodb:BatchWriteItem",
                "dynamodb:DeleteItem", "dynamodb:PutItem",
                "dynamodb:UpdateItem", "dax:Describe*", "dax:List*",
                "dax:GetItem", "dax:BatchGetItem", "dax:Query", "dax:Scan",
                "dax:BatchWriteItem", "dax:DeleteItem", "dax:PutItem",
                "dax:UpdateItem"
            ])

        trigger_textract_lambda_fn.add_to_role_policy(
            ddb_table_rw_policy_statement)
        trigger_textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[img_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:Get*", "kinesis:List*",
                                        "kinesis:Describe*",
                                        "kinesis:PutRecord",
                                        "kinesis:PutRecords"
                                    ]))

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3_event_filter = s3.NotificationKeyFilter(prefix="bizcard-raw-img/",
                                                   suffix=".jpg")
        s3_event_source = S3EventSource(s3_bucket,
                                        events=[s3.EventType.OBJECT_CREATED],
                                        filters=[s3_event_filter])
        trigger_textract_lambda_fn.add_event_source(s3_event_source)

        #XXX: https://github.com/aws/aws-cdk/issues/2240
        # To avoid to create extra Lambda Functions with names like LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8a
        # if log_retention=aws_logs.RetentionDays.THREE_DAYS is added to the constructor props
        log_group = aws_logs.LogGroup(
            self,
            "TriggerTextractLogGroup",
            log_group_name="/aws/lambda/TriggerTextExtractorFromImage",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(trigger_textract_lambda_fn)

        text_kinesis_stream = kinesis.Stream(
            self, "BizcardTextData", stream_name="octember-bizcard-txt")

        textract_lambda_fn = _lambda.Function(
            self,
            "GetTextFromImage",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="GetTextFromImage",
            handler="get_text_from_s3_image.lambda_handler",
            description="extract text from an image in S3",
            code=_lambda.Code.asset("./src/main/python/GetTextFromS3Image"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'DDB_TABLE_NAME': ddb_table.table_name,
                'KINESIS_STREAM_NAME': text_kinesis_stream.stream_name
            },
            timeout=core.Duration.minutes(5))

        textract_lambda_fn.add_to_role_policy(ddb_table_rw_policy_statement)
        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[text_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:Get*", "kinesis:List*",
                                        "kinesis:Describe*",
                                        "kinesis:PutRecord",
                                        "kinesis:PutRecords"
                                    ]))

        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["textract:*"]))

        img_kinesis_event_source = KinesisEventSource(
            img_kinesis_stream,
            batch_size=100,
            starting_position=_lambda.StartingPosition.LATEST)
        textract_lambda_fn.add_event_source(img_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "GetTextFromImageLogGroup",
            log_group_name="/aws/lambda/GetTextFromImage",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(textract_lambda_fn)

        sg_use_bizcard_es = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard elasticsearch client',
            security_group_name='use-octember-bizcard-es')
        core.Tags.of(sg_use_bizcard_es).add('Name', 'use-octember-bizcard-es')

        sg_bizcard_es = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard elasticsearch',
            security_group_name='octember-bizcard-es')
        core.Tags.of(sg_bizcard_es).add('Name', 'octember-bizcard-es')

        sg_bizcard_es.add_ingress_rule(peer=sg_bizcard_es,
                                       connection=aws_ec2.Port.all_tcp(),
                                       description='octember-bizcard-es')
        sg_bizcard_es.add_ingress_rule(peer=sg_use_bizcard_es,
                                       connection=aws_ec2.Port.all_tcp(),
                                       description='use-octember-bizcard-es')

        sg_ssh_access = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for bastion host',
            security_group_name='octember-bastion-host-sg')
        core.Tags.of(sg_ssh_access).add('Name', 'octember-bastion-host')
        sg_ssh_access.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                                       connection=aws_ec2.Port.tcp(22),
                                       description='ssh access')

        bastion_host = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=aws_ec2.InstanceType('t3.nano'),
            security_group=sg_ssh_access,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))
        bastion_host.instance.add_security_group(sg_use_bizcard_es)

        #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873
        es_cfn_domain = aws_elasticsearch.CfnDomain(
            self,
            'BizcardSearch',
            elasticsearch_cluster_config={
                "dedicatedMasterCount": 3,
                "dedicatedMasterEnabled": True,
                "dedicatedMasterType": "t2.medium.elasticsearch",
                "instanceCount": 2,
                "instanceType": "t2.medium.elasticsearch",
                "zoneAwarenessEnabled": True
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            domain_name="octember-bizcard",
            elasticsearch_version="7.9",
            encryption_at_rest_options={"enabled": False},
            access_policies={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect":
                    "Allow",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action":
                    ["es:Describe*", "es:List*", "es:Get*", "es:ESHttp*"],
                    "Resource":
                    self.format_arn(service="es",
                                    resource="domain",
                                    resource_name="octember-bizcard/*")
                }]
            },
            snapshot_options={"automatedSnapshotStartHour": 17},
            vpc_options={
                "securityGroupIds": [sg_bizcard_es.security_group_id],
                "subnetIds":
                vpc.select_subnets(
                    subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids
            })
        core.Tags.of(es_cfn_domain).add('Name', 'octember-bizcard-es')

        s3_lib_bucket_name = self.node.try_get_context("lib_bucket_name")

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        s3_lib_bucket = s3.Bucket.from_bucket_name(self, id,
                                                   s3_lib_bucket_name)
        es_lib_layer = _lambda.LayerVersion(
            self,
            "ESLib",
            layer_version_name="es-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket,
                                          "var/octember-es-lib.zip"))

        redis_lib_layer = _lambda.LayerVersion(
            self,
            "RedisLib",
            layer_version_name="redis-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket,
                                          "var/octember-redis-lib.zip"))

        #XXX: Deploy lambda in VPC - https://github.com/aws/aws-cdk/issues/1342
        upsert_to_es_lambda_fn = _lambda.Function(
            self,
            "UpsertBizcardToES",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertBizcardToElasticSearch",
            handler="upsert_bizcard_to_es.lambda_handler",
            description="Upsert bizcard text into elasticsearch",
            code=_lambda.Code.asset("./src/main/python/UpsertBizcardToES"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                'ES_INDEX': 'octember_bizcard',
                'ES_TYPE': 'bizcard'
            },
            timeout=core.Duration.minutes(5),
            layers=[es_lib_layer],
            security_groups=[sg_use_bizcard_es],
            vpc=vpc)

        text_kinesis_event_source = KinesisEventSource(
            text_kinesis_stream,
            batch_size=99,
            starting_position=_lambda.StartingPosition.LATEST)
        upsert_to_es_lambda_fn.add_event_source(text_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertBizcardToESLogGroup",
            log_group_name="/aws/lambda/UpsertBizcardToElasticSearch",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_es_lambda_fn)

        firehose_role_policy_doc = aws_iam.PolicyDocument()
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=[
                                        "glue:GetTable",
                                        "glue:GetTableVersion",
                                        "glue:GetTableVersions"
                                    ]))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[text_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:DescribeStream",
                                        "kinesis:GetShardIterator",
                                        "kinesis:GetRecords"
                                    ]))

        firehose_log_group_name = "/aws/kinesisfirehose/octember-bizcard-txt-to-s3"
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: The ARN will be formatted as follows:
                # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
                resources=[
                    self.format_arn(service="logs",
                                    resource="log-group",
                                    resource_name="{}:log-stream:*".format(
                                        firehose_log_group_name),
                                    sep=":")
                ],
                actions=["logs:PutLogEvents"]))

        firehose_role = aws_iam.Role(
            self,
            "FirehoseDeliveryRole",
            role_name="FirehoseDeliveryRole",
            assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={"firehose_role_policy": firehose_role_policy_doc})

        bizcard_text_to_s3_delivery_stream = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "BizcardTextToS3",
            delivery_stream_name="octember-bizcard-txt-to-s3",
            delivery_stream_type="KinesisStreamAsSource",
            kinesis_stream_source_configuration={
                "kinesisStreamArn": text_kinesis_stream.stream_arn,
                "roleArn": firehose_role.role_arn
            },
            extended_s3_destination_configuration={
                "bucketArn": s3_bucket.bucket_arn,
                "bufferingHints": {
                    "intervalInSeconds": 60,
                    "sizeInMBs": 1
                },
                "cloudWatchLoggingOptions": {
                    "enabled": True,
                    "logGroupName": firehose_log_group_name,
                    "logStreamName": "S3Delivery"
                },
                "compressionFormat": "GZIP",
                "prefix": "bizcard-text/",
                "roleArn": firehose_role.role_arn
            })

        sg_use_bizcard_es_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchCacheClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard search query cache client',
            security_group_name='use-octember-bizcard-es-cache')
        core.Tags.of(sg_use_bizcard_es_cache).add(
            'Name', 'use-octember-bizcard-es-cache')

        sg_bizcard_es_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchCacheSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard search query cache',
            security_group_name='octember-bizcard-es-cache')
        core.Tags.of(sg_bizcard_es_cache).add('Name',
                                              'octember-bizcard-es-cache')

        sg_bizcard_es_cache.add_ingress_rule(
            peer=sg_use_bizcard_es_cache,
            connection=aws_ec2.Port.tcp(6379),
            description='use-octember-bizcard-es-cache')

        es_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            "QueryCacheSubnetGroup",
            description="subnet group for octember-bizcard-es-cache",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='octember-bizcard-es-cache')

        es_query_cache = aws_elasticache.CfnCacheCluster(
            self,
            "BizcardSearchQueryCache",
            cache_node_type="cache.t3.small",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.5",
            auto_minor_version_upgrade=False,
            cluster_name="octember-bizcard-es-cache",
            snapshot_retention_limit=3,
            snapshot_window="17:00-19:00",
            preferred_maintenance_window="mon:19:00-mon:20:30",
            #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098
            #cache_subnet_group_name=es_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
            cache_subnet_group_name='octember-bizcard-es-cache',
            vpc_security_group_ids=[sg_bizcard_es_cache.security_group_id])

        #XXX: If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster.
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-cache-cluster.html#cfn-elasticache-cachecluster-cachesubnetgroupname
        es_query_cache.add_depends_on(es_query_cache_subnet_group)

        #XXX: add more than 2 security groups
        # https://github.com/aws/aws-cdk/blob/ea10f0d141a48819ec0000cd7905feda993870a9/packages/%40aws-cdk/aws-lambda/lib/function.ts#L387
        # https://github.com/aws/aws-cdk/issues/1555
        # https://github.com/aws/aws-cdk/pull/5049
        bizcard_search_lambda_fn = _lambda.Function(
            self,
            "BizcardSearchServer",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="BizcardSearchProxy",
            handler="es_search_bizcard.lambda_handler",
            description="Proxy server to search bizcard text",
            code=_lambda.Code.asset("./src/main/python/SearchBizcard"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                'ES_INDEX': 'octember_bizcard',
                'ES_TYPE': 'bizcard',
                'ELASTICACHE_HOST': es_query_cache.attr_redis_endpoint_address
            },
            timeout=core.Duration.minutes(1),
            layers=[es_lib_layer, redis_lib_layer],
            security_groups=[sg_use_bizcard_es, sg_use_bizcard_es_cache],
            vpc=vpc)

        #XXX: create API Gateway + LambdaProxy
        search_api = apigw.LambdaRestApi(
            self,
            "BizcardSearchAPI",
            handler=bizcard_search_lambda_fn,
            proxy=False,
            rest_api_name="BizcardSearch",
            description="This service serves searching bizcard text.",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        bizcard_search = search_api.root.add_resource('search')
        bizcard_search.add_method(
            "GET",
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ])

        sg_use_bizcard_graph_db = aws_ec2.SecurityGroup(
            self,
            "BizcardGraphDbClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard graph db client',
            security_group_name='use-octember-bizcard-neptune')
        core.Tags.of(sg_use_bizcard_graph_db).add(
            'Name', 'use-octember-bizcard-neptune')

        sg_bizcard_graph_db = aws_ec2.SecurityGroup(
            self,
            "BizcardGraphDbSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard graph db',
            security_group_name='octember-bizcard-neptune')
        core.Tags.of(sg_bizcard_graph_db).add('Name',
                                              'octember-bizcard-neptune')

        sg_bizcard_graph_db.add_ingress_rule(
            peer=sg_bizcard_graph_db,
            connection=aws_ec2.Port.tcp(8182),
            description='octember-bizcard-neptune')
        sg_bizcard_graph_db.add_ingress_rule(
            peer=sg_use_bizcard_graph_db,
            connection=aws_ec2.Port.tcp(8182),
            description='use-octember-bizcard-neptune')

        bizcard_graph_db_subnet_group = aws_neptune.CfnDBSubnetGroup(
            self,
            "NeptuneSubnetGroup",
            db_subnet_group_description=
            "subnet group for octember-bizcard-neptune",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            db_subnet_group_name='octember-bizcard-neptune')

        bizcard_graph_db = aws_neptune.CfnDBCluster(
            self,
            "BizcardGraphDB",
            availability_zones=vpc.availability_zones,
            db_subnet_group_name=bizcard_graph_db_subnet_group.
            db_subnet_group_name,
            db_cluster_identifier="octember-bizcard",
            backup_retention_period=1,
            preferred_backup_window="08:45-09:15",
            preferred_maintenance_window="sun:18:00-sun:18:30",
            vpc_security_group_ids=[sg_bizcard_graph_db.security_group_id])
        bizcard_graph_db.add_depends_on(bizcard_graph_db_subnet_group)

        bizcard_graph_db_instance = aws_neptune.CfnDBInstance(
            self,
            "BizcardGraphDBInstance",
            db_instance_class="db.r5.large",
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            availability_zone=vpc.availability_zones[0],
            db_cluster_identifier=bizcard_graph_db.db_cluster_identifier,
            db_instance_identifier="octember-bizcard",
            preferred_maintenance_window="sun:18:00-sun:18:30")
        bizcard_graph_db_instance.add_depends_on(bizcard_graph_db)

        bizcard_graph_db_replica_instance = aws_neptune.CfnDBInstance(
            self,
            "BizcardGraphDBReplicaInstance",
            db_instance_class="db.r5.large",
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            availability_zone=vpc.availability_zones[-1],
            db_cluster_identifier=bizcard_graph_db.db_cluster_identifier,
            db_instance_identifier="octember-bizcard-replica",
            preferred_maintenance_window="sun:18:00-sun:18:30")
        bizcard_graph_db_replica_instance.add_depends_on(bizcard_graph_db)
        bizcard_graph_db_replica_instance.add_depends_on(
            bizcard_graph_db_instance)

        gremlinpython_lib_layer = _lambda.LayerVersion(
            self,
            "GremlinPythonLib",
            layer_version_name="gremlinpython-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(
                s3_lib_bucket, "var/octember-gremlinpython-lib.zip"))

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        upsert_to_neptune_lambda_fn = _lambda.Function(
            self,
            "UpsertBizcardToGraphDB",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertBizcardToNeptune",
            handler="upsert_bizcard_to_graph_db.lambda_handler",
            description="Upsert bizcard into neptune",
            code=_lambda.Code.asset(
                "./src/main/python/UpsertBizcardToGraphDB"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_endpoint,
                'NEPTUNE_PORT': bizcard_graph_db.attr_port
            },
            timeout=core.Duration.minutes(5),
            layers=[gremlinpython_lib_layer],
            security_groups=[sg_use_bizcard_graph_db],
            vpc=vpc)

        upsert_to_neptune_lambda_fn.add_event_source(text_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertBizcardToGraphDBLogGroup",
            log_group_name="/aws/lambda/UpsertBizcardToNeptune",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_neptune_lambda_fn)

        sg_use_bizcard_neptune_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardNeptuneCacheClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard recommendation query cache client',
            security_group_name='use-octember-bizcard-neptune-cache')
        core.Tags.of(sg_use_bizcard_neptune_cache).add(
            'Name', 'use-octember-bizcard-es-cache')

        sg_bizcard_neptune_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardNeptuneCacheSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard recommendation query cache',
            security_group_name='octember-bizcard-neptune-cache')
        core.Tags.of(sg_bizcard_neptune_cache).add(
            'Name', 'octember-bizcard-neptune-cache')

        sg_bizcard_neptune_cache.add_ingress_rule(
            peer=sg_use_bizcard_neptune_cache,
            connection=aws_ec2.Port.tcp(6379),
            description='use-octember-bizcard-neptune-cache')

        recomm_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            "RecommQueryCacheSubnetGroup",
            description="subnet group for octember-bizcard-neptune-cache",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='octember-bizcard-neptune-cache')

        recomm_query_cache = aws_elasticache.CfnCacheCluster(
            self,
            "BizcardRecommQueryCache",
            cache_node_type="cache.t3.small",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.5",
            auto_minor_version_upgrade=False,
            cluster_name="octember-bizcard-neptune-cache",
            snapshot_retention_limit=3,
            snapshot_window="17:00-19:00",
            preferred_maintenance_window="mon:19:00-mon:20:30",
            #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098
            #cache_subnet_group_name=recomm_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
            cache_subnet_group_name='octember-bizcard-neptune-cache',
            vpc_security_group_ids=[
                sg_bizcard_neptune_cache.security_group_id
            ])

        recomm_query_cache.add_depends_on(recomm_query_cache_subnet_group)

        bizcard_recomm_lambda_fn = _lambda.Function(
            self,
            "BizcardRecommender",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="BizcardRecommender",
            handler="neptune_recommend_bizcard.lambda_handler",
            description="This service serves PYMK(People You May Know).",
            code=_lambda.Code.asset("./src/main/python/RecommendBizcard"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_read_endpoint,
                'NEPTUNE_PORT': bizcard_graph_db.attr_port,
                'ELASTICACHE_HOST':
                recomm_query_cache.attr_redis_endpoint_address
            },
            timeout=core.Duration.minutes(1),
            layers=[gremlinpython_lib_layer, redis_lib_layer],
            security_groups=[
                sg_use_bizcard_graph_db, sg_use_bizcard_neptune_cache
            ],
            vpc=vpc)

        #XXX: create API Gateway + LambdaProxy
        recomm_api = apigw.LambdaRestApi(
            self,
            "BizcardRecommendAPI",
            handler=bizcard_recomm_lambda_fn,
            proxy=False,
            rest_api_name="BizcardRecommend",
            description="This service serves PYMK(People You May Know).",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        bizcard_recomm = recomm_api.root.add_resource('pymk')
        bizcard_recomm.add_method(
            "GET",
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ])

        sagemaker_notebook_role_policy_doc = aws_iam.PolicyDocument()
        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "arn:aws:s3:::aws-neptune-notebook",
                        "arn:aws:s3:::aws-neptune-notebook/*"
                    ],
                    "actions": ["s3:GetObject", "s3:ListBucket"]
                }))

        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "arn:aws:neptune-db:{region}:{account}:{cluster_id}/*".
                        format(region=core.Aws.REGION,
                               account=core.Aws.ACCOUNT_ID,
                               cluster_id=bizcard_graph_db.
                               attr_cluster_resource_id)
                    ],
                    "actions": ["neptune-db:connect"]
                }))

        sagemaker_notebook_role = aws_iam.Role(
            self,
            'SageMakerNotebookForNeptuneWorkbenchRole',
            role_name='AWSNeptuneNotebookRole-OctemberBizcard',
            assumed_by=aws_iam.ServicePrincipal('sagemaker.amazonaws.com'),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={
                'AWSNeptuneNotebook': sagemaker_notebook_role_policy_doc
            })

        neptune_wb_lifecycle_content = '''#!/bin/bash
sudo -u ec2-user -i <<'EOF'
echo "export GRAPH_NOTEBOOK_AUTH_MODE=DEFAULT" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_HOST={NeptuneClusterEndpoint}" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_PORT={NeptuneClusterPort}" >> ~/.bashrc
echo "export NEPTUNE_LOAD_FROM_S3_ROLE_ARN=''" >> ~/.bashrc
echo "export AWS_REGION={AWS_Region}" >> ~/.bashrc
aws s3 cp s3://aws-neptune-notebook/graph_notebook.tar.gz /tmp/graph_notebook.tar.gz
rm -rf /tmp/graph_notebook
tar -zxvf /tmp/graph_notebook.tar.gz -C /tmp
/tmp/graph_notebook/install.sh
EOF
'''.format(NeptuneClusterEndpoint=bizcard_graph_db.attr_endpoint,
           NeptuneClusterPort=bizcard_graph_db.attr_port,
           AWS_Region=core.Aws.REGION)

        neptune_wb_lifecycle_config_prop = aws_sagemaker.CfnNotebookInstanceLifecycleConfig.NotebookInstanceLifecycleHookProperty(
            content=core.Fn.base64(neptune_wb_lifecycle_content))

        neptune_wb_lifecycle_config = aws_sagemaker.CfnNotebookInstanceLifecycleConfig(
            self,
            'NpetuneWorkbenchLifeCycleConfig',
            notebook_instance_lifecycle_config_name=
            'AWSNeptuneWorkbenchOctemberBizcardLCConfig',
            on_start=[neptune_wb_lifecycle_config_prop])

        neptune_workbench = aws_sagemaker.CfnNotebookInstance(
            self,
            'NeptuneWorkbench',
            instance_type='ml.t2.medium',
            role_arn=sagemaker_notebook_role.role_arn,
            lifecycle_config_name=neptune_wb_lifecycle_config.
            notebook_instance_lifecycle_config_name,
            notebook_instance_name='OctemberBizcard-NeptuneWorkbench',
            root_access='Disabled',
            security_group_ids=[sg_use_bizcard_graph_db.security_group_name],
            subnet_id=bizcard_graph_db_subnet_group.subnet_ids[0])
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # DynamoDB Table
        # Streaming is enabled to send the whole new object down the pipe
        table = dynamo_db.Table(self,
                                "TheDynamoStreamer",
                                partition_key=dynamo_db.Attribute(
                                    name="message",
                                    type=dynamo_db.AttributeType.STRING),
                                stream=dynamo_db.StreamViewType.NEW_IMAGE)

        # defines an AWS  Lambda resource
        subscriber_lambda = _lambda.Function(
            self,
            "DynamoLambdaHandler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="lambda.handler",
            code=_lambda.Code.from_asset("lambda_fns/subscribe"))

        subscriber_lambda.add_event_source(
            _event.DynamoEventSource(
                table=table,
                starting_position=_lambda.StartingPosition.LATEST))

        # API Gateway Creation
        gateway = api_gw.RestApi(
            self,
            'DynamoStreamerAPI',
            deploy_options=api_gw.StageOptions(
                metrics_enabled=True,
                logging_level=api_gw.MethodLoggingLevel.INFO,
                data_trace_enabled=True,
                stage_name='prod'))

        # Give our gateway permissions to interact with dynamodb
        api_gw_dynamo_role = iam.Role(
            self,
            'DefaultLambdaHanderRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'))
        table.grant_read_write_data(api_gw_dynamo_role)

        # shortening the lines of later code
        schema = api_gw.JsonSchema
        schema_type = api_gw.JsonSchemaType

        # Because this isn't a proxy integration, we need to define our response model
        response_model = gateway.add_model(
            'ResponseModel',
            content_type='application/json',
            model_name='ResponseModel',
            schema=schema(
                schema=api_gw.JsonSchemaVersion.DRAFT4,
                title='pollResponse',
                type=schema_type.OBJECT,
                properties={'message': schema(type=schema_type.STRING)}))

        error_response_model = gateway.add_model(
            'ErrorResponseModel',
            content_type='application/json',
            model_name='ErrorResponseModel',
            schema=schema(schema=api_gw.JsonSchemaVersion.DRAFT4,
                          title='errorResponse',
                          type=schema_type.OBJECT,
                          properties={
                              'state': schema(type=schema_type.STRING),
                              'message': schema(type=schema_type.STRING)
                          }))

        # This is the VTL to transform our incoming JSON to a Dynamo Insert Query
        request_template = {
            "TableName": table.table_name,
            "Item": {
                "message": {
                    "S": "$input.path('$.message')"
                }
            }
        }
        request_template_string = json.dumps(request_template,
                                             separators=(',', ':'))

        # This is the VTL to transform the error response
        error_template = {
            "state": 'error',
            "message": "$util.escapeJavaScript($input.path('$.errorMessage'))"
        }
        error_template_string = json.dumps(error_template,
                                           separators=(',', ':'))

        # This is how our gateway chooses what response to send based on selection_pattern
        integration_options = api_gw.IntegrationOptions(
            credentials_role=api_gw_dynamo_role,
            request_templates={"application/json": request_template_string},
            passthrough_behavior=api_gw.PassthroughBehavior.NEVER,
            integration_responses=[
                api_gw.IntegrationResponse(
                    status_code='200',
                    response_templates={
                        "application/json":
                        json.dumps({"message": 'item added to db'})
                    }),
                api_gw.IntegrationResponse(
                    selection_pattern="^\[BadRequest\].*",
                    status_code='400',
                    response_templates={
                        "application/json": error_template_string
                    },
                    response_parameters={
                        'method.response.header.Content-Type':
                        "'application/json'",
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'",
                        'method.response.header.Access-Control-Allow-Credentials':
                        "'true'"
                    })
            ])

        # Add an InsertItem endpoint onto the gateway
        gateway.root.add_resource('InsertItem') \
            .add_method('POST', api_gw.Integration(type=api_gw.IntegrationType.AWS,
                                                   integration_http_method='POST',
                                                   uri='arn:aws:apigateway:us-east-1:dynamodb:action/PutItem',
                                                   options=integration_options
                                                   ),
                        method_responses=[
                            api_gw.MethodResponse(status_code='200',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': response_model
                                                  }),
                            api_gw.MethodResponse(status_code='400',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': error_response_model
                                                  }),
                        ]
                        )
Example #10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

## Create S3 Bucket. We'll populate it seperately. 
        bucket_name="{}-s3-ecs-apigw-test".format(core.Aws.ACCOUNT_ID)
        bucket = s3.Bucket(self, "s3-ecs-apigw-test",
            bucket_name=bucket_name,
            versioned=True,
            public_read_access=False
            )

## Create ECS  Cluster, Taks and Service
### Create the VPC for the demo
        vpc = ec2.Vpc(self, "MyVpc", max_azs=3)

### Create the ECS Cluster
        cluster = ecs.Cluster(self,
            "ecs-apigw-test-cluster",
            cluster_name="ecs-apigw-test-cluster",
            container_insights=True,
            vpc=vpc)
### Using the Network LoadBalance Fargate patterm, this wills create the container definition, the task definition, the service and the Network load balancer for it. 
        ecs_deploy = ecsp.NetworkLoadBalancedFargateService(self, 
            "ecs-apigw-test",
            cluster=cluster,
            cpu=512,
            desired_count=2,
            public_load_balancer=False,
            memory_limit_mib=2048,
            task_image_options=ecsp.NetworkLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_registry("strm/helloworld-http")
            ),
            health_check_grace_period=core.Duration.minutes(5)
            )
### Have to add an Ingress rule to allow traffic through. This applies the CIDR of the VPC. 
        ecs_deploy.service.connections.security_groups[0].add_ingress_rule(
            ec2.Peer.ipv4(vpc.vpc_cidr_block),
            ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="",
                from_port=80,
                to_port=80
            )
        )
## Create API Gateway resources

### Create the VPC Link to the Network Load Balancer
        vpc_link =apigw.VpcLink(self, 
            "ecs-test-vpc-link",
            targets = [ecs_deploy.load_balancer])
### Create the API
        api = apigw.RestApi(self, "ecs-s3-test-api",
                  rest_api_name="ECS S3 Test API",
                  description="Test API for distributing traffic to S3 and ECS",
                  binary_media_types=["image/png"])
### Create the execution role for the API methods. 
        rest_api_role = iam.Role(
            self,
            "RestAPIRole",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3ReadOnlyAccess")])

### Generic Method Response that can be used with each API method
        method_response = apigw.MethodResponse(status_code="200",
            response_parameters={"method.response.header.Content-Type": True})

### Root URI
        root_integration_response = apigw.IntegrationResponse(
            status_code="200",
            response_templates={"text/html": "$input.path('$')"},
            response_parameters={"method.response.header.Content-Type": "'text/html'"})
        root_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[root_integration_response],
            request_templates={"application/json": "Action=SendMessage&MessageBody=$input.body"},
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'"})
        root_resource_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            subdomain=bucket_name,
            path="index.html",
            options=root_integration_options)

 
        root_method = api.root.add_method("GET",
            root_resource_s3_integration,
            method_responses=[method_response])


### API URI
        api_integration = apigw.Integration(
            type=apigw.IntegrationType.HTTP_PROXY,
            uri="http://{}".format(ecs_deploy.load_balancer.load_balancer_dns_name),
            integration_http_method="GET",
            options={
                "connection_type": apigw.ConnectionType.VPC_LINK,
                "vpc_link": vpc_link
            }
        )
        apis = api.root.add_resource("apis")
        apii = apis.add_resource("{api}")
        # apis = api.root.add_resource("apis")
        apii_get = apii.add_method("GET",
            api_integration,
            method_responses=[method_response],
            request_parameters={
                "method.request.path.api": True,})

## Add Images URI
        image_integration_response = apigw.IntegrationResponse(
            status_code="200",
            content_handling=apigw.ContentHandling.CONVERT_TO_BINARY,
            # response_templates={"text/html": "$input.path('$')"},
            response_parameters={"method.response.header.Content-Type": "'image/png'"})
        image_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[image_integration_response],
            request_templates={"application/json": "Action=SendMessage&MessageBody=$input.body"},
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'",
                "integration.request.path.image": "method.request.path.image"})
        images_resource_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            subdomain=bucket_name,
            path="images/{image}",
            options=image_integration_options)
        images_resource = api.root.add_resource("images")
        image_resource = images_resource.add_resource("{image}")
        images_get = image_resource.add_method("GET",
            images_resource_s3_integration,
            method_responses=[method_response],
            request_parameters={
                "method.request.path.image": True,})

## Fall Through
        folder = api.root.add_resource("{folder}")
        item = folder.add_resource("{item}")
        integration_response = apigw.IntegrationResponse(
            status_code="200",
            response_templates={"text/html": "$input.path('$')"},
            response_parameters={"method.response.header.Content-Type": "'text/html'"})
        s3_proxy_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[integration_response],
            request_templates={"application/json": "Action=SendMessage&MessageBody=$input.body"},
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type": "'application/x-www-form-urlencoded'",
                "integration.request.path.item": "method.request.path.item",
                "integration.request.path.folder": "method.request.path.folder"})
        s3_proxy_resource_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            subdomain=bucket_name,
            path="{folder}/{item}",
            options=s3_proxy_integration_options)
        item_get = item.add_method("GET",
            s3_proxy_resource_s3_integration,
            method_responses=[method_response],
            request_parameters={
                "method.request.path.item": True,
                "method.request.path.folder": True
            }
            )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        env_name = self.node.try_get_context('env')

        #Create the SQS queue
        queue = sqs.Queue(self,
                          id=f"{env_name}-SQSQueue",
                          queue_name=f"{env_name}-queue")

        #Create the API GW service role with permissions to call SQS
        rest_api_role = iam.Role(
            self,
            id=f"{env_name}-RestAPISQSRole",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSQSFullAccess")
            ])

        #Create an API GW Rest API
        base_api = apigw.RestApi(
            self,
            id=f'{env_name}-ApiGW',
            rest_api_name=f'{env_name}SQSTestAPI',
            api_key_source_type=apigw.ApiKeySourceType.HEADER)

        usage_api_key_value = ''.join(
            random.choice(string.ascii_uppercase + string.ascii_lowercase +
                          string.digits) for _ in range(40))

        usage_api_key = base_api.add_api_key(id=f'{env_name}-apikey',
                                             value=usage_api_key_value)
        usage_plan = base_api.add_usage_plan(id=f'{env_name}-usageplan',
                                             name=f'{env_name}-usageplan',
                                             api_key=usage_api_key,
                                             throttle=apigw.ThrottleSettings(
                                                 rate_limit=10, burst_limit=2))
        usage_plan.add_api_stage(stage=base_api.deployment_stage)

        #Create a resource named "example" on the base API
        api_resource = base_api.root.add_resource('sqstest')

        #Create API Integration Response object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html
        integration_response = apigw.IntegrationResponse(
            status_code="200",
            response_templates={"application/json": ""},
        )

        #Create API Integration Options object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html
        api_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[integration_response],
            request_templates={
                "application/json":
                "Action=SendMessage&MessageBody=$input.body"
            },
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type":
                "'application/x-www-form-urlencoded'"
            },
        )

        #Create AWS Integration Object for SQS: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/AwsIntegration.html
        api_resource_sqs_integration = apigw.AwsIntegration(
            service="sqs",
            integration_http_method="POST",
            # must be ACCOUNT_ID. Just the way URL to SQS is created
            path="{}/{}".format(core.Aws.ACCOUNT_ID, queue.queue_name),
            options=api_integration_options)

        #Create a Method Response Object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/MethodResponse.html
        method_response = apigw.MethodResponse(status_code="200")

        #Add the API GW Integration to the "example" API GW Resource
        api_resource.add_method("POST",
                                api_resource_sqs_integration,
                                method_responses=[method_response],
                                api_key_required=True)

        #Creating Lambda function that will be triggered by the SQS Queue
        sqs_lambda = _lambda.Function(
            self,
            'SQSTriggerLambda',
            handler='sqs_lambda.handler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset('pr_sqs_lambda'),
        )

        #Create an SQS event source for Lambda
        sqs_event_source = lambda_event_source.SqsEventSource(queue)

        #Add SQS event source to the Lambda function
        sqs_lambda.add_event_source(sqs_event_source)

        # https://67ixnggm81.execute-api.us-east-1.amazonaws.com/prod/sqstest
        region = core.Aws.REGION
        core.CfnOutput(self,
                       'api-gw-url',
                       value='https://' + base_api.rest_api_id +
                       '.execute-api.' + region +
                       '.amazonaws.com/prod/sqstest',
                       export_name='api-sqs-gw-url')
        print(f'API Key: {usage_api_key_value}')
        """
Example #12
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 stack_name: str,
                 task_definition_cpu: int,
                 task_definition_memory_limit_mib: int,
                 docker_image_name: str,
                 container_port: int,
                 desired_container_count: int,
                 private_subnets: Sequence[aws_ec2.Subnet] = None,
                 public_subnets: Sequence[aws_ec2.Subnet] = None,
                 private_security_group: aws_ec2.SecurityGroup = None,
                 public_security_group: aws_ec2.SecurityGroup = None,
                 vpc: aws_ec2.Vpc = None,
                 fargate_cluster: aws_ecs.Cluster = None,
                 authorizer_lambda_arn: str = None,
                 authorizer_lambda_role_arn: str = None,
                 **kwargs):
        super().__init__(scope, id, **kwargs)

        # Role
        self.role = aws_iam.Role(
            self,
            'Role',
            assumed_by=aws_iam.ServicePrincipal(service='ecs.amazonaws.com'),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name=
                    'service-role/AmazonECSTaskExecutionRolePolicy')
            ],
            inline_policies={
                id:
                aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(
                        effect=aws_iam.Effect.ALLOW,
                        actions=[
                            'kms:Encrypt',
                            'kms:Decrypt',
                            'kms:ReEncrypt*',
                            'kms:GenerateDataKey*',
                            'kms:DescribeKey',
                            'ec2:CreateNetworkInterface',
                            'ec2:DescribeNetworkInterfaces',
                            'ec2:DeleteNetworkInterface',
                            # Remaining actions from https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/quickref-ecs.html
                            'elasticloadbalancing:DeregisterInstancesFromLoadBalancer',
                            'elasticloadbalancing:DeregisterTargets',
                            'elasticloadbalancing:Describe*',
                            'elasticloadbalancing:RegisterInstancesWithLoadBalancer',
                            'elasticloadbalancing:RegisterTargets',
                            'ec2:Describe*',
                            'ec2:AuthorizeSecurityGroupIngress'
                        ],
                        resources=['*'])
                ])
            })
        self.role.assume_role_policy.add_statements(
            aws_iam.PolicyStatement(
                actions=['sts:AssumeRole'],
                principals=[
                    aws_iam.ServicePrincipal(service='ecs-tasks.amazonaws.com')
                ]))

        # Set Defaults if parameters are None
        if vpc is None:
            vpc = aws_ec2.Vpc(self, 'Vpc')

        if private_subnets is None:
            private_subnets = vpc.private_subnets

        if public_subnets is None:
            public_subnets = vpc.public_subnets

        if public_security_group is None:
            public_security_group = aws_ec2.SecurityGroup(
                self, 'PublicSecurityGroup', vpc=vpc, allow_all_outbound=True)
            # Allow inbound HTTP traffic
            public_security_group.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'),
                connection=aws_ec2.Port.tcp(port=80))
            # Allow inbound HTTPS traffic
            public_security_group.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'),
                connection=aws_ec2.Port.tcp(port=443))

        if private_security_group is None:
            private_security_group = aws_ec2.SecurityGroup(
                self, 'PrivateSecurityGroup', vpc=vpc, allow_all_outbound=True)

            public_subnet_cidr_blocks = Utils.get_subnet_cidr_blocks(
                public_subnets)

            # Create an ingress rule for each of the NLB's subnet's CIDR ranges and add the rules to the ECS service's
            # security group.  This will allow requests from the NLB to go into the ECS service.  This allow inbound
            # traffic from public subnets.
            for cidr_block in public_subnet_cidr_blocks:
                private_security_group.add_ingress_rule(
                    peer=aws_ec2.Peer.ipv4(cidr_ip=cidr_block),
                    connection=aws_ec2.Port.tcp(port=container_port))

        if fargate_cluster is None:
            fargate_cluster = aws_ecs.Cluster(
                self,
                'FargateCluster',
            )

        task_def = aws_ecs.FargateTaskDefinition(
            self,
            'TaskDefinition',
            cpu=task_definition_cpu,
            memory_limit_mib=task_definition_memory_limit_mib,
            task_role=self.role,
            execution_role=self.role)

        container = aws_ecs.ContainerDefinition(
            self,
            'Container',
            image=aws_ecs.ContainerImage.from_registry(name=docker_image_name),
            task_definition=task_def,
            logging=aws_ecs.AwsLogDriver(stream_prefix='/ecs'))
        container.add_port_mappings(
            aws_ecs.PortMapping(container_port=container_port,
                                protocol=aws_ec2.Protocol.TCP))

        ecs_service = aws_ecs.FargateService(
            self,
            'FargateService',
            cluster=fargate_cluster,
            task_definition=task_def,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=private_subnets),
            security_group=private_security_group,
            desired_count=desired_container_count)

        target_group = aws_elasticloadbalancingv2.NetworkTargetGroup(
            self,
            'TargetGroup',
            port=80,  # Health check occurs over HTTP
            health_check=aws_elasticloadbalancingv2.HealthCheck(
                protocol=aws_elasticloadbalancingv2.Protocol.TCP),
            targets=[ecs_service],
            vpc=vpc)

        nlb = aws_elasticloadbalancingv2.NetworkLoadBalancer(
            self,
            'NetworkLoadBalancer',
            vpc=vpc,
            internet_facing=False,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=public_subnets),
        )
        nlb.add_listener(
            id='Listener',
            port=80,  # HTTP listener
            default_target_groups=[target_group])

        # nlb.log_access_logs(  # todo:  add this later when you have time to research the correct bucket policy.
        #     bucket=aws_s3.Bucket(
        #         self, 'LoadBalancerLogBucket',
        #         bucket_name='load-balancer-logs',
        #         public_read_access=False,
        #         block_public_access=aws_s3.BlockPublicAccess(
        #             block_public_policy=True,
        #             restrict_public_buckets=True
        #         )
        #     )
        # )

        # Dependencies
        ecs_service.node.add_dependency(nlb)

        # API Gateway
        rest_api = aws_apigateway.RestApi(self, stack_name)
        resource = rest_api.root.add_resource(
            path_part='{proxy+}',
            default_method_options=aws_apigateway.MethodOptions(
                request_parameters={'method.request.path.proxy': True}))

        token_authorizer = None
        if authorizer_lambda_arn and authorizer_lambda_role_arn:
            token_authorizer = aws_apigateway.TokenAuthorizer(  #todo: make this a parameter?
                self,
                'JwtTokenAuthorizer',
                results_cache_ttl=core.Duration.minutes(5),
                identity_source='method.request.header.Authorization',
                assume_role=aws_iam.Role.from_role_arn(
                    self,
                    'AuthorizerLambdaInvokationRole',
                    role_arn=authorizer_lambda_role_arn),
                handler=aws_lambda.Function.from_function_arn(
                    self,
                    'AuthorizerLambda',
                    function_arn=authorizer_lambda_arn))

        resource.add_method(
            http_method='ANY',
            authorization_type=aws_apigateway.AuthorizationType.CUSTOM,
            authorizer=token_authorizer,
            integration=aws_apigateway.HttpIntegration(
                url=f'http://{nlb.load_balancer_dns_name}/{{proxy}}',
                http_method='ANY',
                proxy=True,
                options=aws_apigateway.IntegrationOptions(
                    request_parameters={
                        'integration.request.path.proxy':
                        'method.request.path.proxy'
                    },
                    connection_type=aws_apigateway.ConnectionType.VPC_LINK,
                    vpc_link=aws_apigateway.VpcLink(
                        self,
                        'VpcLink',
                        description=
                        f'API Gateway VPC Link to internal NLB for {stack_name}',
                        vpc_link_name=stack_name,
                        targets=[nlb]))))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        organization_id = self.node.try_get_context("organizationId")

        # create dynamo table
        allocation_table = aws_dynamodb.Table(
            self,
            "CidrBlockTable",
            partition_key=aws_dynamodb.Attribute(
                name="vpcCidrBlock", type=aws_dynamodb.AttributeType.STRING))

        # create producer lambda function
        create_lambda = aws_lambda.Function(
            self,
            "create_lambda_function",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="create.lambda_handler",
            code=aws_lambda.Code.asset("./src/"))

        create_lambda.add_environment("TABLE_NAME",
                                      allocation_table.table_name)
        create_lambda.add_environment("MASTER_CIDR_BLOCK", "10.0.0.0/12")
        create_lambda.add_environment("VPC_NETMASK", "24")
        create_lambda.add_environment("SUBNET_NETMASK", "26")

        # grant permission to lambda to write to demo table
        allocation_table.grant_write_data(create_lambda)
        allocation_table.grant_read_data(create_lambda)

        # API gateway ... Allow own Organizations
        api_policy = aws_iam.PolicyDocument()

        api_policy.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                principals=[aws_iam.AnyPrincipal()],
                actions=["execute-api:Invoke"],
                resources=[core.Fn.join('', ['execute-api:/', '*'])]))

        api_policy.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.DENY,
                actions=["execute-api:Invoke"],
                conditions={
                    "StringNotEquals": {
                        "aws:PrincipalOrgID": [organization_id]
                    }
                },
                principals=[aws_iam.AnyPrincipal()],
                resources=[core.Fn.join('', ['execute-api:/', '*'])]))

        base_api = aws_apigateway.RestApi(self,
                                          'ApiGateway',
                                          rest_api_name='cidr_vending_machine',
                                          policy=api_policy)

        vpc_api = base_api.root.add_resource('vpc')

        rest_api_role = aws_iam.Role(
            self,
            'RestAPIRole',
            assumed_by=aws_iam.ServicePrincipal('apigateway.amazonaws.com'),
        )
        allocation_table.grant_read_write_data(rest_api_role)

        patch_request_string = """
            {{
            "TableName": "{}",
                "Key": {{
                        "vpcCidrBlock": {{
                            "S": "$input.params('cidr_block')"
                        }}
                    }},
                    "UpdateExpression": "set vpcId = :v",
                    "ConditionExpression": "accountId = :v2",
                    "ExpressionAttributeValues" : {{
                        ":v": {{"S": "$input.params('vpc_id')"}},
                        ":v2": {{"S": "$context.identity.accountId"}}
                    }},
                    "ReturnValues": "ALL_NEW"

            }}"""

        delete_request_string = """
            {{
            "TableName": "{}",
                "Key": {{
                        "vpcCidrBlock": {{
                            "S": "$input.params('cidr_block')"
                        }}
                    }},
                "ConditionExpression": "accountId = :v2",
                "ExpressionAttributeValues" : {{
                    ":v2": {{"S": "$context.identity.accountId"}}
                }}
            }}"""

        network_integration = aws_apigateway.LambdaIntegration(create_lambda)
        update_integration = aws_apigateway.AwsIntegration(
            service='dynamodb',
            action='UpdateItem',
            integration_http_method='POST',
            options=aws_apigateway.IntegrationOptions(
                request_templates={
                    "application/json":
                    patch_request_string.format(allocation_table.table_name)
                },
                integration_responses=[
                    aws_apigateway.IntegrationResponse(status_code="200")
                ],
                credentials_role=rest_api_role))

        delete_integration = aws_apigateway.AwsIntegration(
            service='dynamodb',
            action='DeleteItem',
            integration_http_method='POST',
            options=aws_apigateway.IntegrationOptions(
                request_templates={
                    "application/json":
                    delete_request_string.format(allocation_table.table_name)
                },
                integration_responses=[
                    aws_apigateway.IntegrationResponse(status_code="200")
                ],
                credentials_role=rest_api_role))

        vpc_api.add_method(
            'POST',
            network_integration,
            authorization_type=aws_apigateway.AuthorizationType.IAM)
        vpc_api.add_method(
            'DELETE',
            delete_integration,
            authorization_type=aws_apigateway.AuthorizationType.IAM,
            method_responses=[
                aws_apigateway.MethodResponse(status_code="200")
            ])
        vpc_api.add_method(
            'PATCH',
            update_integration,
            authorization_type=aws_apigateway.AuthorizationType.IAM,
            method_responses=[
                aws_apigateway.MethodResponse(status_code="200")
            ])
Example #14
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        my_api = apigw.RestApi(
            self,
            "SageMakerAPI",
            default_cors_preflight_options=apigw.CorsOptions(
                allow_origins=["*"], allow_headers=["*"], allow_methods=["*"]))
        endpoint_name = self.node.try_get_context("endpoint_name")

        options = apigw.IntegrationOptions(
            credentials_role=iam.Role(
                self,
                "SMInvoke",
                assumed_by=iam.ServicePrincipal("apigateway"),
                inline_policies={
                    "FullSageMaker":
                    iam.PolicyDocument(statements=[
                        iam.PolicyStatement(actions=["*"], resources=["*"])
                    ])
                }),
            integration_responses=[
                apigw.IntegrationResponse(
                    status_code="200",
                    response_templates={"application/json": ""},
                    response_parameters={
                        "method.response.header.Access-Control-Allow-Headers":
                        "'*'",
                        "method.response.header.Access-Control-Allow-Methods":
                        "'*'",
                        "method.response.header.Access-Control-Allow-Origin":
                        "'*'"
                    })
            ])

        integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            uri=
            "arn:aws:apigateway:us-east-1:runtime.sagemaker:path/endpoints/{}/invocations"
            .format(self.node.try_get_context("endpoint_name")),
            integration_http_method="POST",
            options=options)

        apigw.Method(
            self,
            "PostRoot",
            http_method="POST",
            resource=my_api.root,
            integration=integration,
            options=apigw.MethodOptions(method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        "method.response.header.Access-Control-Allow-Methods":
                        True,
                        "method.response.header.Access-Control-Allow-Headers":
                        True,
                        "method.response.header.Access-Control-Allow-Origin":
                        True
                    })
            ]))
Example #15
0
 def _add_get_integration(self, rest_api: apigw.Resource, asset_bucket,
                          s3_integration_role) -> apigw.Resource:
     """
     Add integration for /ui/
     """
     s3_integration = apigw.AwsIntegration(
         service='s3',
         path=f'{asset_bucket.bucket_name}/index.html',
         integration_http_method='GET',
         options=apigw.IntegrationOptions(
             credentials_role=s3_integration_role,
             integration_responses=[
                 apigw.IntegrationResponse(
                     status_code='200',
                     response_parameters={
                         'method.response.header.Content-Type':
                         'integration.response.header.Content-Type',
                         'method.response.header.Content-Length':
                         'integration.response.header.Content-Length',
                         'method.response.header.Timestamp':
                         'integration.response.header.Date'
                     }),
                 apigw.IntegrationResponse(
                     status_code='400',
                     selection_pattern=r'4\d{2}',
                     response_parameters={
                         'method.response.header.Content-Type':
                         'integration.response.header.Content-Type',
                         'method.response.header.Content-Length':
                         'integration.response.header.Content-Length'
                     }),
                 apigw.IntegrationResponse(
                     status_code='500',
                     selection_pattern=r'5\d{2}',
                     response_parameters={
                         'method.response.header.Content-Type':
                         'integration.response.header.Content-Type',
                         'method.response.header.Content-Length':
                         'integration.response.header.Content-Length'
                     })
             ]))
     ui = rest_api.add_resource('ui')
     ui.add_method('GET',
                   integration=s3_integration,
                   method_responses=[
                       apigw.MethodResponse(
                           status_code='200',
                           response_parameters={
                               'method.response.header.Content-Type': True,
                               'method.response.header.Content-Length':
                               True,
                               'method.response.header.Timestamp': True
                           }),
                       apigw.MethodResponse(
                           status_code='400',
                           response_parameters={
                               'method.response.header.Content-Type': True,
                               'method.response.header.Content-Length': True
                           }),
                       apigw.MethodResponse(
                           status_code='500',
                           response_parameters={
                               'method.response.header.Content-Type': True,
                               'method.response.header.Content-Length': True
                           })
                   ])
     return ui
Example #16
0
 def _add_item_integration(self, ui_resource: apigw.Resource, asset_bucket,
                           s3_integration_role) -> apigw.Resource:
     """
     Add integration for /ui/{object}
     """
     s3_integration = apigw.AwsIntegration(
         service='s3',
         path=f'{asset_bucket.bucket_name}/{{object}}',
         integration_http_method='GET',
         options=apigw.IntegrationOptions(
             credentials_role=s3_integration_role,
             integration_responses=[
                 apigw.IntegrationResponse(
                     status_code='200',
                     response_parameters={
                         'method.response.header.Content-Type':
                         'integration.response.header.Content-Type',
                         'method.response.header.Content-Length':
                         'integration.response.header.Content-Length',
                         'method.response.header.Timestamp':
                         'integration.response.header.Date'
                     }),
                 apigw.IntegrationResponse(
                     status_code='400',
                     selection_pattern=r'4\d{2}',
                     response_parameters={
                         'method.response.header.Content-Type':
                         'integration.response.header.Content-Type',
                         'method.response.header.Content-Length':
                         'integration.response.header.Content-Length'
                     }),
                 apigw.IntegrationResponse(
                     status_code='500',
                     selection_pattern=r'5\d{2}',
                     response_parameters={
                         'method.response.header.Content-Type':
                         'integration.response.header.Content-Type',
                         'method.response.header.Content-Length':
                         'integration.response.header.Content-Length'
                     })
             ],
             request_parameters={
                 'integration.request.path.object':
                 'method.request.path.item'
             }))
     ui_item = ui_resource.add_resource('{item}')
     ui_item.add_method(
         'GET',
         integration=s3_integration,
         method_responses=[
             apigw.MethodResponse(
                 status_code='200',
                 response_parameters={
                     'method.response.header.Content-Type': True,
                     'method.response.header.Content-Length': True,
                     'method.response.header.Timestamp': True
                 }),
             apigw.MethodResponse(
                 status_code='400',
                 response_parameters={
                     'method.response.header.Content-Type': True,
                     'method.response.header.Content-Length': True
                 }),
             apigw.MethodResponse(
                 status_code='500',
                 response_parameters={
                     'method.response.header.Content-Type': True,
                     'method.response.header.Content-Length': True
                 })
         ],
         request_parameters={'method.request.path.item': True})
     return ui_item
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create two VPCs - one to host our private website, the other to act as a client
        website_vpc = Vpc(
            self,
            "WEBSITEVPC",
            cidr="10.0.0.0/16",
        )
        client_vpc = Vpc(
            self,
            "ClientVPC",
            cidr="10.1.0.0/16",
        )

        # Create a bastion host in the client API which will act like our client workstation
        bastion = BastionHostLinux(
            self,
            "WEBClient",
            vpc=client_vpc,
            instance_name='my-bastion',
            instance_type=InstanceType('t3.micro'),
            machine_image=AmazonLinuxImage(),
            subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE),
            security_group=SecurityGroup(
                scope=self,
                id='bastion-sg',
                security_group_name='bastion-sg',
                description=
                'Security group for the bastion, no inbound open because we should access'
                ' to the bastion via AWS SSM',
                vpc=client_vpc,
                allow_all_outbound=True))

        # Set up a VPC peering connection between client and API VPCs, and adjust
        # the routing table to allow connections back and forth
        VpcPeeringHelper(self, 'Peering', website_vpc, client_vpc)

        # Create VPC endpoints for API gateway
        vpc_endpoint = InterfaceVpcEndpoint(
            self,
            'APIGWVpcEndpoint',
            vpc=website_vpc,
            service=InterfaceVpcEndpointAwsService.APIGATEWAY,
            private_dns_enabled=True,
        )
        vpc_endpoint.connections.allow_from(bastion, Port.tcp(443))
        endpoint_id = vpc_endpoint.vpc_endpoint_id

        api_policy = iam.PolicyDocument(statements=[
            iam.PolicyStatement(principals=[iam.AnyPrincipal()],
                                actions=['execute-api:Invoke'],
                                resources=['execute-api:/*'],
                                effect=iam.Effect.DENY,
                                conditions={
                                    "StringNotEquals": {
                                        "aws:SourceVpce": endpoint_id
                                    }
                                }),
            iam.PolicyStatement(principals=[iam.AnyPrincipal()],
                                actions=['execute-api:Invoke'],
                                resources=['execute-api:/*'],
                                effect=iam.Effect.ALLOW)
        ])

        # Create an s3 bucket to hold the content
        content_bucket = s3.Bucket(self,
                                   "ContentBucket",
                                   removal_policy=core.RemovalPolicy.DESTROY)

        # Upload our static content to the bucket
        s3dep.BucketDeployment(self,
                               "DeployWithInvalidation",
                               sources=[s3dep.Source.asset('website')],
                               destination_bucket=content_bucket)

        # Create a private API GW in the API VPC
        api = apigw.RestApi(self,
                            'PrivateS3Api',
                            endpoint_configuration=apigw.EndpointConfiguration(
                                types=[apigw.EndpointType.PRIVATE],
                                vpc_endpoints=[vpc_endpoint]),
                            policy=api_policy)

        # Create a role to allow API GW to access our S3 bucket contents
        role = iam.Role(
            self,
            "Role",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"))
        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    content_bucket.bucket_arn,
                                    content_bucket.bucket_arn + '/*'
                                ],
                                actions=["s3:Get*"]))

        # Create a proxy resource that captures all non-root resource requests
        resource = api.root.add_resource("{proxy+}")
        # Create an integration with S3
        resource_integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            integration_http_method='GET',
            options=apigw.IntegrationOptions(
                request_parameters=
                {  # map the proxy parameter so we can pass the request path
                    "integration.request.path.proxy":
                    "method.request.path.proxy"
                },
                integration_responses=[
                    apigw.IntegrationResponse(
                        status_code='200',
                        response_parameters=
                        {  # map the content type of the S3 object back to the HTTP response
                            "method.response.header.Content-Type":
                            "integration.response.header.Content-Type"
                        })
                ],
                credentials_role=role),
            # reference the bucket content we want to retrieve
            uri='arn:aws:apigateway:eu-west-1:s3:path/%s/{proxy}' %
            (content_bucket.bucket_name))
        # handle the GET request and map it to our new integration
        resource.add_method(
            "GET",
            resource_integration,
            method_responses=[
                apigw.MethodResponse(status_code='200',
                                     response_parameters={
                                         "method.response.header.Content-Type":
                                         False
                                     })
            ],
            request_parameters={"method.request.path.proxy": True})
        # Handle requests to the root of our site
        # Create another integration with S3 - this time with no proxy parameter
        resource_integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            integration_http_method='GET',
            options=apigw.IntegrationOptions(
                integration_responses=[
                    apigw.IntegrationResponse(
                        status_code='200',
                        response_parameters=
                        {  # map the content type of the S3 object back to the HTTP response
                            "method.response.header.Content-Type":
                            "integration.response.header.Content-Type"
                        })
                ],
                credentials_role=role),
            # reference the bucket content we want to retrieve
            uri='arn:aws:apigateway:eu-west-1:s3:path/%s/index.html' %
            (content_bucket.bucket_name))
        # handle the GET request and map it to our new integration
        api.root.add_method("GET",
                            resource_integration,
                            method_responses=[
                                apigw.MethodResponse(
                                    status_code='200',
                                    response_parameters={
                                        "method.response.header.Content-Type":
                                        False
                                    })
                            ])
    def __init__(self, app: App, id: str, **kwargs) -> None:
        super().__init__(app, id)

        bucket: s3.Bucket = s3.Bucket(self, "WidgetStore")

        api: apigw.RestApi = apigw.RestApi(
            self,
            "widgets-api",
            rest_api_name="Widget Service",
            description="This service serves widgets.")

        rest_api_role: iam.Role = iam.Role(
            self,
            "RestAPIRole",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3FullAccess")
            ])

        list_objects_response: apigw.IntegrationResponse = apigw.IntegrationResponse(
            status_code="200")

        list_objects_integration_options: apigw.IntegrationOptions = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[list_objects_response],
        )

        get_widget_integration_options: apigw.IntegrationOptions = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[list_objects_response],
            request_templates={
                "application/json":
                "#set($context.requestOverride.path.object = $input.params('id'))"
            })

        put_widget_integration_options: apigw.IntegrationOptions = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[list_objects_response],
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.path.object": "method.request.path.id"
            },
            request_templates={
                "application/json":
                "#set($now=$context.requestTimeEpoch)\n"
                "#set($body=\"$input.params('id') created $now\")"
                "\n$util.base64Encode($body)"
            })

        get_widgets_integration: apigw.AwsIntegration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path=bucket.bucket_name,
            options=list_objects_integration_options)

        get_widget_integration: apigw.AwsIntegration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{}/{{object}}".format(bucket.bucket_name),
            options=get_widget_integration_options)

        put_widget_integration: apigw.AwsIntegration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="PUT",
            path="{}/{{object}}".format(bucket.bucket_name),
            options=put_widget_integration_options)

        delete_widget_integration: apigw.AwsIntegration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="DELETE",
            path="{}/{{object}}".format(bucket.bucket_name),
            options=get_widget_integration_options)

        method_response: apigw.MethodResponse = apigw.MethodResponse(
            status_code="200")

        api.root.add_method("GET",
                            get_widgets_integration,
                            method_responses=[method_response])

        widget = api.root.add_resource('{id}')
        widget.add_method("GET",
                          get_widget_integration,
                          method_responses=[method_response])

        widget.add_method("POST",
                          put_widget_integration,
                          method_responses=[method_response],
                          request_parameters={"method.request.path.id": True})

        widget.add_method("DELETE",
                          delete_widget_integration,
                          method_responses=[method_response])
Example #19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###
        # Let's create our own Event Bus for this rather than using default
        ###
        bus = events.EventBus(self,
                              'DestinedEventBus',
                              event_bus_name='the-destined-lambda')

        ###
        # Destinations need invoked Asynchronously so let's use SNS
        ###
        topic = sns.Topic(self,
                          'theDestinedLambdaTopic',
                          display_name='The Destined Lambda CDK Pattern Topic')

        ###
        # Lambda configured with success and failure destinations
        # Note the actual lambda has no EventBridge code inside it
        ###
        destined_lambda = _lambda.Function(
            self,
            "destinedLambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="destinedLambda.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
            retry_attempts=0,
            on_success=destinations.EventBridgeDestination(event_bus=bus),
            on_failure=destinations.EventBridgeDestination(event_bus=bus))
        topic.add_subscription(
            subscriptions.LambdaSubscription(destined_lambda))

        ###
        # This is a lambda that will be called by onSuccess for destinedLambda
        # It simply prints the event it receives to the cloudwatch logs
        ###
        success_lambda = _lambda.Function(
            self,
            "successLambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="success.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
            timeout=core.Duration.seconds(3))
        ###
        # EventBridge Rule to send events to our success lambda
        # Notice how we can still do event filtering based on the json payload returned by the destined lambda
        ###
        success_rule = events.Rule(
            self,
            'successRule',
            event_bus=bus,
            description=
            'all success events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                detail={
                    "requestContext": {
                        "condition": ["Success"]
                    },
                    "responsePayload": {
                        "source": ["cdkpatterns.the-destined-lambda"],
                        "action": ["message"]
                    }
                }))
        success_rule.add_target(targets.LambdaFunction(success_lambda))

        ###
        # This is a lambda that will be called by onFailure for destinedLambda
        # It simply prints the event it receives to the cloudwatch logs.
        # Notice how it includes the message that came into destined lambda to make it fail so you have
        # everything you need to do retries or manually investigate
        ###
        failure_lambda = _lambda.Function(
            self,
            "failureLambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="failure.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
            timeout=core.Duration.seconds(3))

        ###
        # EventBridge Rule to send events to our failure lambda
        ###
        failure_rule = events.Rule(
            self,
            'failureRule',
            event_bus=bus,
            description=
            'all failure events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                detail={"responsePayload": {
                    "errorType": ["Error"]
                }}))
        failure_rule.add_target(targets.LambdaFunction(failure_lambda))

        ###
        # API Gateway Creation
        # This is complicated because it transforms the incoming json payload into a query string url
        # this url is used to post the payload to sns without a lambda inbetween
        ###

        gateway = api_gw.RestApi(
            self,
            'theDestinedLambdaAPI',
            deploy_options=api_gw.StageOptions(
                metrics_enabled=True,
                logging_level=api_gw.MethodLoggingLevel.INFO,
                data_trace_enabled=True,
                stage_name='prod'))
        # Give our gateway permissions to interact with SNS
        api_gw_sns_role = iam.Role(
            self,
            'ApiGatewaySNSRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'))
        topic.grant_publish(api_gw_sns_role)

        # shortening the lines of later code
        schema = api_gw.JsonSchema
        schema_type = api_gw.JsonSchemaType

        # Because this isn't a proxy integration, we need to define our response model
        response_model = gateway.add_model(
            'ResponseModel',
            content_type='application/json',
            model_name='ResponseModel',
            schema=schema(
                schema=api_gw.JsonSchemaVersion.DRAFT4,
                title='pollResponse',
                type=schema_type.OBJECT,
                properties={'message': schema(type=schema_type.STRING)}))

        error_response_model = gateway.add_model(
            'ErrorResponseModel',
            content_type='application/json',
            model_name='ErrorResponseModel',
            schema=schema(schema=api_gw.JsonSchemaVersion.DRAFT4,
                          title='errorResponse',
                          type=schema_type.OBJECT,
                          properties={
                              'state': schema(type=schema_type.STRING),
                              'message': schema(type=schema_type.STRING)
                          }))

        request_template = "Action=Publish&" + \
                           "TargetArn=$util.urlEncode('" + topic.topic_arn + "')&" + \
                           "Message=please $input.params().querystring.get('mode')&" + \
                           "Version=2010-03-31"

        # This is the VTL to transform the error response
        error_template = {
            "state": 'error',
            "message": "$util.escapeJavaScript($input.path('$.errorMessage'))"
        }
        error_template_string = json.dumps(error_template,
                                           separators=(',', ':'))

        # This is how our gateway chooses what response to send based on selection_pattern
        integration_options = api_gw.IntegrationOptions(
            credentials_role=api_gw_sns_role,
            request_parameters={
                'integration.request.header.Content-Type':
                "'application/x-www-form-urlencoded'"
            },
            request_templates={"application/json": request_template},
            passthrough_behavior=api_gw.PassthroughBehavior.NEVER,
            integration_responses=[
                api_gw.IntegrationResponse(
                    status_code='200',
                    response_templates={
                        "application/json":
                        json.dumps({"message": 'Message added to SNS topic'})
                    }),
                api_gw.IntegrationResponse(
                    selection_pattern="^\[Error\].*",
                    status_code='400',
                    response_templates={
                        "application/json": error_template_string
                    },
                    response_parameters={
                        'method.response.header.Content-Type':
                        "'application/json'",
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'",
                        'method.response.header.Access-Control-Allow-Credentials':
                        "'true'"
                    })
            ])

        # Add an SendEvent endpoint onto the gateway
        gateway.root.add_resource('SendEvent') \
            .add_method('GET', api_gw.Integration(type=api_gw.IntegrationType.AWS,
                                                  integration_http_method='POST',
                                                  uri='arn:aws:apigateway:us-east-1:sns:path//',
                                                  options=integration_options
                                                  ),
                        method_responses=[
                            api_gw.MethodResponse(status_code='200',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': response_model
                                                  }),
                            api_gw.MethodResponse(status_code='400',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': error_response_model
                                                  }),
                        ]
                        )
Example #20
0
  def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
    super().__init__(scope, construct_id, **kwargs)

    #XXX: For createing Amazon MWAA in the existing VPC,
    # remove comments from the below codes and
    # comments out vpc = aws_ec2.Vpc(..) codes,
    # then pass -c vpc_name=your-existing-vpc to cdk command
    # for example,
    # cdk -c vpc_name=your-existing-vpc syth
    #
    # vpc_name = self.node.try_get_context('vpc_name')
    # vpc = aws_ec2.Vpc.from_lookup(self, 'ExistingVPC',
    #   is_default=True,
    #   vpc_name=vpc_name
    # )

    vpc = aws_ec2.Vpc(self, "ApiGatewayDynamoDBVPC",
      max_azs=2,
      gateway_endpoints={
        "S3": aws_ec2.GatewayVpcEndpointOptions(
          service=aws_ec2.GatewayVpcEndpointAwsService.S3
        ),
        "DynamoDB": aws_ec2.GatewayVpcEndpointOptions(
          service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB
        )
      }
    )

    DDB_TABLE_SUFFIX = ''.join(random.sample((string.ascii_lowercase + string.digits), k=7))
    DDB_TABLE_NAME = "Comments-{}".format(DDB_TABLE_SUFFIX)

    ddb_table = aws_dynamodb.Table(self, "DynamoDbTable",
      table_name=DDB_TABLE_NAME,
      removal_policy=cdk.RemovalPolicy.DESTROY,
      partition_key=aws_dynamodb.Attribute(name="commentId",
        type=aws_dynamodb.AttributeType.STRING),
      time_to_live_attribute="ttl",
      billing_mode=aws_dynamodb.BillingMode.PROVISIONED,
      read_capacity=15,
      write_capacity=5,
    )

    ddb_table.add_global_secondary_index(
      read_capacity=15,
      write_capacity=5,
      index_name="pageId-index",
      partition_key=aws_dynamodb.Attribute(name='pageId', type=aws_dynamodb.AttributeType.STRING),
      projection_type=aws_dynamodb.ProjectionType.ALL
    )

    user_pool = aws_cognito.UserPool(self, 'UserPool',
      user_pool_name='UserPoolForApiGateway',
      removal_policy=cdk.RemovalPolicy.DESTROY,
      self_sign_up_enabled=True,
      sign_in_aliases={'email': True},
      auto_verify={'email': True},
      password_policy={
        'min_length': 8,
        'require_lowercase': False,
        'require_digits': False,
        'require_uppercase': False,
        'require_symbols': False,
      },
      account_recovery=aws_cognito.AccountRecovery.EMAIL_ONLY
    )

    user_pool_client = aws_cognito.UserPoolClient(self, 'UserPoolClient',
      user_pool=user_pool,
      auth_flows={
        'admin_user_password': True,
        'user_password': True,
        'custom': True,
        'user_srp': True
      },
      supported_identity_providers=[aws_cognito.UserPoolClientIdentityProvider.COGNITO]
    )

    auth = aws_apigateway.CognitoUserPoolsAuthorizer(self, 'AuthorizerForDynamoDBApi',
      cognito_user_pools=[user_pool]
    )

    ddb_access_policy_doc = aws_iam.PolicyDocument()
    ddb_access_policy_doc.add_statements(aws_iam.PolicyStatement(**{
      "effect": aws_iam.Effect.ALLOW,
      "resources": [ddb_table.table_arn],
      "actions": [
        "dynamodb:DeleteItem",
        "dynamodb:PartiQLInsert",
        "dynamodb:UpdateTimeToLive",
        "dynamodb:BatchWriteItem",
        "dynamodb:PutItem",
        "dynamodb:PartiQLUpdate",
        "dynamodb:UpdateItem",
        "dynamodb:PartiQLDelete"
      ]
    }))

    apigw_dynamodb_role = aws_iam.Role(self, "ApiGatewayRoleForDynamoDB",
      role_name='APIGatewayRoleForDynamoDB',
      assumed_by=aws_iam.ServicePrincipal('apigateway.amazonaws.com'),
      inline_policies={
        'DynamoDBAccessPolicy': ddb_access_policy_doc
      },
      managed_policies=[
        aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonDynamoDBReadOnlyAccess'),
      ]
    )

    dynamodb_api = aws_apigateway.RestApi(self, "DynamoDBProxyAPI",
      rest_api_name="comments-api",
      description="An Amazon API Gateway REST API that integrated with an Amazon DynamoDB.",
      endpoint_types=[aws_apigateway.EndpointType.REGIONAL],
      default_cors_preflight_options={
        "allow_origins": aws_apigateway.Cors.ALL_ORIGINS
      },
      deploy=True,
      deploy_options=aws_apigateway.StageOptions(stage_name="v1"),
      endpoint_export_name="DynamoDBProxyAPIEndpoint"
    )

    all_resources = dynamodb_api.root.add_resource("comments")
    one_resource = all_resources.add_resource("{pageId}")

    apigw_error_responses = [
      aws_apigateway.IntegrationResponse(status_code="400", selection_pattern="4\d{2}"),
      aws_apigateway.IntegrationResponse(status_code="500", selection_pattern="5\d{2}")
    ]

    apigw_ok_responses = [
      aws_apigateway.IntegrationResponse(
        status_code="200"
      )
    ]

    ddb_put_item_options = aws_apigateway.IntegrationOptions(
      credentials_role=apigw_dynamodb_role,
      integration_responses=[*apigw_ok_responses, *apigw_error_responses],
      request_templates={
        'application/json': json.dumps({
          "TableName": DDB_TABLE_NAME,
          "Item": {
            "commentId": {
              "S": "$context.requestId"
            },
            "pageId": {
              "S": "$input.path('$.pageId')"
            },
            "userName": {
              "S": "$input.path('$.userName')"
            },
            "message": {
              "S": "$input.path('$.message')"
            }
          }
        }, indent=2)
      },
      passthrough_behavior=aws_apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES
    )

    create_integration = aws_apigateway.AwsIntegration(
      service='dynamodb',
      action='PutItem',
      integration_http_method='POST',
      options=ddb_put_item_options
    )

    method_responses = [
      aws_apigateway.MethodResponse(status_code='200'),
      aws_apigateway.MethodResponse(status_code='400'),
      aws_apigateway.MethodResponse(status_code='500')
    ]

    all_resources.add_method('POST', create_integration,
      method_responses=method_responses,
      authorization_type=aws_apigateway.AuthorizationType.COGNITO,
      authorizer=auth
    )

    get_response_templates = '''
#set($inputRoot = $input.path('$'))
{
  "comments": [
    #foreach($elem in $inputRoot.Items) {
       "commentId": "$elem.commentId.S",
       "userName": "******",
       "message": "$elem.message.S"
     }#if($foreach.hasNext),#end
    #end
  ]
}'''

    ddb_query_item_options = aws_apigateway.IntegrationOptions(
      credentials_role=apigw_dynamodb_role,
      integration_responses=[
        aws_apigateway.IntegrationResponse(
          status_code="200",
          response_templates={
            'application/json': get_response_templates
          }
        ),
        *apigw_error_responses
      ],
      request_templates={
        'application/json': json.dumps({
          "TableName": DDB_TABLE_NAME,
          "IndexName": "pageId-index",
          "KeyConditionExpression": "pageId = :v1",
          "ExpressionAttributeValues": {
            ":v1": {
              "S": "$input.params('pageId')"
            }
          }
        }, indent=2)
      },
      passthrough_behavior=aws_apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES
    )

    get_integration = aws_apigateway.AwsIntegration(
      service='dynamodb',
      action='Query',
      integration_http_method='POST',
      options=ddb_query_item_options
    )

    one_resource.add_method('GET', get_integration,
      method_responses=method_responses,
      authorization_type=aws_apigateway.AuthorizationType.COGNITO,
      authorizer=auth
    )

    cdk.CfnOutput(self, 'DynamoDBTableName', value=ddb_table.table_name)
    cdk.CfnOutput(self, 'UserPoolId', value=user_pool.user_pool_id)
    cdk.CfnOutput(self, 'UserPoolClientId', value=user_pool_client.user_pool_client_id)
Example #21
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #Create the SQS queue
        queue = sqs.Queue(self, "SQSQueue")

        #Create the API GW service role with permissions to call SQS
        rest_api_role = iam.Role(
            self,
            "RestAPIRole",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSQSFullAccess")
            ])

        #Create an API GW Rest API
        base_api = apigw.RestApi(self, 'ApiGW', rest_api_name='TestAPI')
        base_api.root.add_method("ANY")

        #Create a resource named "example" on the base API
        api_resource = base_api.root.add_resource('example')

        #Create API Integration Response object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html
        integration_response = apigw.IntegrationResponse(
            status_code="200",
            response_templates={"application/json": ""},
        )

        #Create API Integration Options object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html
        api_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[integration_response],
            request_templates={
                "application/json":
                "Action=SendMessage&MessageBody=$input.body"
            },
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type":
                "'application/x-www-form-urlencoded'"
            },
        )

        #Create AWS Integration Object for SQS: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/AwsIntegration.html
        api_resource_sqs_integration = apigw.AwsIntegration(
            service="sqs",
            integration_http_method="POST",
            path="{}/{}".format(Aws.ACCOUNT_ID, queue.queue_name),
            options=api_integration_options)

        #Create a Method Response Object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/MethodResponse.html
        method_response = apigw.MethodResponse(status_code="200")

        #Add the API GW Integration to the "example" API GW Resource
        api_resource.add_method("POST",
                                api_resource_sqs_integration,
                                method_responses=[method_response])

        #Creating Lambda function that will be triggered by the SQS Queue
        sqs_lambda = _lambda.Function(
            self,
            'SQSTriggerLambda',
            handler='lambda-handler.handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset('lambda'),
        )

        #Create an SQS event source for Lambda
        sqs_event_source = lambda_event_source.SqsEventSource(queue)

        #Add SQS event source to the Lambda function
        sqs_lambda.add_event_source(sqs_event_source)
    def __init__(self, scope: core.Construct, id: str, wiki_api_endpoint,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create API Gateway):
        api_01 = _apigw.RestApi(self,
                                'apiEndpoint',
                                rest_api_name='mystique-wiki-api',
                                deploy_options=_apigw.StageOptions(
                                    stage_name="myst",
                                    data_trace_enabled=True,
                                    tracing_enabled=True))
        v1 = api_01.root.add_resource("v1")

        # Add resource for HTTP Endpoint: API Hosted on EC2
        self.wiki_url_path_00 = v1.add_resource('wiki_url')
        wiki_url_path_01 = self.wiki_url_path_00.add_resource('{needle}')

        # Create the API Gateway Integration Responses
        list_objects_responses = [
            _apigw.IntegrationResponse(
                status_code="200",
                response_parameters={
                    'method.response.header.Timestamp':
                    'integration.response.header.Date',
                    'method.response.header.Content-Length':
                    'integration.response.header.Content-Length',
                    'method.response.header.Content-Type':
                    'integration.response.header.Content-Type'
                })
        ]

        # Create the API Gateway Integration Request Path mapping
        wiki_url_integration_options = _apigw.IntegrationOptions(
            integration_responses=list_objects_responses,
            request_parameters={
                "integration.request.path.needle": "method.request.path.needle"
            })

        wiki_url_integration = _apigw.HttpIntegration(
            url=f'http://{wiki_api_endpoint}/api/{{needle}}',
            http_method='GET',
            options=wiki_url_integration_options,
            proxy=False,
        )

        wiki_url_method = wiki_url_path_01.add_method(
            "GET",
            wiki_url_integration,
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.needle': True
            },
            method_responses=[
                _apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': _apigw.EmptyModel()})
            ])
        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            "GetWikiUrl",
            value=f"{self.wiki_url_path_00.url}/search term",
            description=f"Get Wiki Url for given topic using API Gateway")
        """
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###
        # SNS Topic Creation
        # Our API Gateway posts messages directly to this
        ###
        topic = sns.Topic(self,
                          'TheXRayTracerSnsFanOutTopic',
                          display_name='The XRay Tracer Fan Out Topic')
        self.sns_topic_arn = topic.topic_arn

        ###
        # API Gateway Creation
        # This is complicated because it is a direct SNS integration through VTL not a proxy integration
        # Tracing is enabled for X-Ray
        ###

        gateway = api_gw.RestApi(
            self,
            'xrayTracerAPI',
            deploy_options=api_gw.StageOptions(
                metrics_enabled=True,
                logging_level=api_gw.MethodLoggingLevel.INFO,
                data_trace_enabled=True,
                tracing_enabled=True,
                stage_name='prod'))

        # Give our gateway permissions to interact with SNS
        api_gw_sns_role = iam.Role(
            self,
            'ApiGatewaySNSRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'))
        topic.grant_publish(api_gw_sns_role)

        # shortening the lines of later code
        schema = api_gw.JsonSchema
        schema_type = api_gw.JsonSchemaType

        # Because this isn't a proxy integration, we need to define our response model
        response_model = gateway.add_model(
            'ResponseModel',
            content_type='application/json',
            model_name='ResponseModel',
            schema=schema(
                schema=api_gw.JsonSchemaVersion.DRAFT4,
                title='pollResponse',
                type=schema_type.OBJECT,
                properties={'message': schema(type=schema_type.STRING)}))

        error_response_model = gateway.add_model(
            'ErrorResponseModel',
            content_type='application/json',
            model_name='ErrorResponseModel',
            schema=schema(schema=api_gw.JsonSchemaVersion.DRAFT4,
                          title='errorResponse',
                          type=schema_type.OBJECT,
                          properties={
                              'state': schema(type=schema_type.STRING),
                              'message': schema(type=schema_type.STRING)
                          }))

        request_template = "Action=Publish&" + \
                           "TargetArn=$util.urlEncode('" + topic.topic_arn + "')&" + \
                           "Message=$util.urlEncode($context.path)&" + \
                           "Version=2010-03-31"

        # This is the VTL to transform the error response
        error_template = {
            "state": 'error',
            "message": "$util.escapeJavaScript($input.path('$.errorMessage'))"
        }
        error_template_string = json.dumps(error_template,
                                           separators=(',', ':'))

        # This is how our gateway chooses what response to send based on selection_pattern
        integration_options = api_gw.IntegrationOptions(
            credentials_role=api_gw_sns_role,
            request_parameters={
                'integration.request.header.Content-Type':
                "'application/x-www-form-urlencoded'"
            },
            request_templates={"application/json": request_template},
            passthrough_behavior=api_gw.PassthroughBehavior.NEVER,
            integration_responses=[
                api_gw.IntegrationResponse(
                    status_code='200',
                    response_templates={
                        "application/json":
                        json.dumps({"message": 'message added to topic'})
                    }),
                api_gw.IntegrationResponse(
                    selection_pattern="^\[Error\].*",
                    status_code='400',
                    response_templates={
                        "application/json": error_template_string
                    },
                    response_parameters={
                        'method.response.header.Content-Type':
                        "'application/json'",
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'",
                        'method.response.header.Access-Control-Allow-Credentials':
                        "'true'"
                    })
            ])

        method_responses = [
            api_gw.MethodResponse(
                status_code='200',
                response_parameters={
                    'method.response.header.Content-Type': True,
                    'method.response.header.Access-Control-Allow-Origin': True,
                    'method.response.header.Access-Control-Allow-Credentials':
                    True
                },
                response_models={'application/json': response_model}),
            api_gw.MethodResponse(
                status_code='400',
                response_parameters={
                    'method.response.header.Content-Type': True,
                    'method.response.header.Access-Control-Allow-Origin': True,
                    'method.response.header.Access-Control-Allow-Credentials':
                    True
                },
                response_models={'application/json': error_response_model}),
        ]

        # Add a / endpoint onto the gateway
        gateway.root \
            .add_method('GET', api_gw.Integration(type=api_gw.IntegrationType.AWS,
                                                  integration_http_method='POST',
                                                  uri='arn:aws:apigateway:us-east-1:sns:path//',
                                                  options=integration_options
                                                  ),
                        method_responses=method_responses
                        )

        # Add a {proxy+} endpoint onto the gateway
        gateway.root.add_resource('{proxy+}') \
            .add_method('GET', api_gw.Integration(type=api_gw.IntegrationType.AWS,
                                                  integration_http_method='POST',
                                                  uri='arn:aws:apigateway:us-east-1:sns:path//',
                                                  options=integration_options
                                                  ),
                        method_responses=method_responses
                        )
    def __init__(self, scope: core.Construct, id: str,
                 props: KinesisFirehoseStackProps, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        lambda_repository = aws_codecommit.Repository(
            self,
            "ClicksProcessingLambdaRepository",
            repository_name="MythicalMysfits-ClicksProcessingLambdaRepository",
        )

        core.CfnOutput(
            self,
            "kinesisRepositoryCloneUrlHttp",
            value=lambda_repository.repository_clone_url_http,
            description="Clicks Processing Lambda Repository Clone URL HTTP",
        )

        core.CfnOutput(
            self,
            "kinesisRepositoryCloneUrlSsh",
            value=lambda_repository.repository_clone_url_ssh,
            description="Clicks Processing Lambda Repository Clone URL SSH",
        )

        clicks_destination_bucket = aws_s3.Bucket(self,
                                                  "Bucket",
                                                  versioned=True)

        lambda_function_policy = aws_iam.PolicyStatement()
        lambda_function_policy.add_actions("dynamodb:GetItem")
        lambda_function_policy.add_resources(props.table.table_arn)

        mysfits_clicks_processor = aws_lambda.Function(
            self,
            "Function",
            handler="streamProcessor.processRecord",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            description=
            "An Amazon Kinesis Firehose stream processor that enriches click records to not just include a mysfitId, but also other attributes that can be analyzed later.",
            memory_size=128,
            code=aws_lambda.Code.asset("../../lambda-streaming-processor"),
            timeout=core.Duration.seconds(30),
            initial_policy=[lambda_function_policy],
            environment={
                # TODO: this seems better than having the user copy/paste it in, but is it the best way?
                "MYSFITS_API_URL":
                "https://{}.execute-api.{}.amazonaws.com/prod/".format(
                    props.api_gateway.ref, core.Aws.REGION)
            },
        )

        firehose_delivery_role = aws_iam.Role(
            self,
            "FirehoseDeliveryRole",
            role_name="FirehoseDeliveryRole",
            assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
            external_id=core.Aws.ACCOUNT_ID,
        )

        firehose_delivery_policy_s3_statement = aws_iam.PolicyStatement()
        firehose_delivery_policy_s3_statement.add_actions(
            "s3:AbortMultipartUpload",
            "s3:GetBucketLocation",
            "s3:GetObject",
            "s3:ListBucket",
            "s3:ListBucketMultipartUploads",
            "s3:PutObject",
        )
        firehose_delivery_policy_s3_statement.add_resources(
            clicks_destination_bucket.bucket_arn)
        firehose_delivery_policy_s3_statement.add_resources(
            clicks_destination_bucket.arn_for_objects("*"))

        firehose_delivery_policy_lambda_statement = aws_iam.PolicyStatement()
        firehose_delivery_policy_lambda_statement.add_actions(
            "lambda:InvokeFunction")
        firehose_delivery_policy_lambda_statement.add_resources(
            mysfits_clicks_processor.function_arn)

        firehose_delivery_role.add_to_policy(
            firehose_delivery_policy_s3_statement)
        firehose_delivery_role.add_to_policy(
            firehose_delivery_policy_lambda_statement)

        mysfits_firehose_to_s3 = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "DeliveryStream",
            extended_s3_destination_configuration=aws_kinesisfirehose.
            CfnDeliveryStream.ExtendedS3DestinationConfigurationProperty(
                bucket_arn=clicks_destination_bucket.bucket_arn,
                buffering_hints=aws_kinesisfirehose.CfnDeliveryStream.
                BufferingHintsProperty(interval_in_seconds=60,
                                       size_in_m_bs=50),
                compression_format="UNCOMPRESSED",
                prefix="firehose/",
                role_arn=firehose_delivery_role.role_arn,
                processing_configuration=aws_kinesisfirehose.CfnDeliveryStream.
                ProcessingConfigurationProperty(
                    enabled=True,
                    processors=[
                        aws_kinesisfirehose.CfnDeliveryStream.
                        ProcessorProperty(
                            parameters=[
                                aws_kinesisfirehose.CfnDeliveryStream.
                                ProcessorParameterProperty(
                                    parameter_name="LambdaArn",
                                    parameter_value=mysfits_clicks_processor.
                                    function_arn,
                                )
                            ],
                            type="Lambda",
                        )
                    ],
                ),
            ),
        )

        aws_lambda.CfnPermission(
            self,
            "Permission",
            action="lambda:InvokeFunction",
            function_name=mysfits_clicks_processor.function_arn,
            principal="firehose.amazonaws.com",
            source_account=core.Aws.ACCOUNT_ID,
            source_arn=mysfits_firehose_to_s3.attr_arn,
        )

        click_processing_api_role = aws_iam.Role(
            self,
            "ClickProcessingApiRole",
            assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"),
        )

        api_policy = aws_iam.PolicyStatement()
        api_policy.add_actions("firehose:PutRecord")
        api_policy.add_resources(mysfits_firehose_to_s3.attr_arn)
        aws_iam.Policy(
            self,
            "ClickProcessingApiPolicy",
            policy_name="api_gateway_firehose_proxy_role",
            statements=[api_policy],
            roles=[click_processing_api_role],
        )

        api = aws_apigateway.RestApi(
            self,
            "APIEndpoint",
            rest_api_name="ClickProcessing API Service",
            endpoint_types=[aws_apigateway.EndpointType.REGIONAL],
        )

        clicks = api.root.add_resource("clicks")

        clicks.add_method(
            "PUT",
            aws_apigateway.AwsIntegration(
                service="firehose",
                integration_http_method="POST",
                action="PutRecord",
                options=aws_apigateway.IntegrationOptions(
                    connection_type=aws_apigateway.ConnectionType.INTERNET,
                    credentials_role=click_processing_api_role,
                    integration_responses=[
                        aws_apigateway.IntegrationResponse(
                            status_code="200",
                            response_templates={
                                "application/json": '{"status": "OK"}'
                            },
                            response_parameters={
                                "method.response.header.Access-Control-Allow-Headers":
                                "'Content-Type'",
                                "method.response.header.Access-Control-Allow-Methods":
                                "'OPTIONS,PUT'",
                                "method.response.header.Access-Control-Allow-Origin":
                                "'*'",
                            },
                        )
                    ],
                    request_parameters={
                        "integration.request.header.Content-Type":
                        "'application/x-amz-json-1.1'"
                    },
                    request_templates={
                        "application/json":
                        """{ "DeliveryStreamName": "%s", "Record": { "Data": "$util.base64Encode($input.json('$'))" }}"""
                        % mysfits_firehose_to_s3.ref
                    },
                ),
            ),
            method_responses=[
                aws_apigateway.MethodResponse(
                    status_code="200",
                    response_parameters={
                        "method.response.header.Access-Control-Allow-Headers":
                        True,
                        "method.response.header.Access-Control-Allow-Methods":
                        True,
                        "method.response.header.Access-Control-Allow-Origin":
                        True,
                    },
                )
            ],
        )

        clicks.add_method(
            "OPTIONS",
            aws_apigateway.MockIntegration(
                integration_responses=[
                    aws_apigateway.IntegrationResponse(
                        status_code="200",
                        response_parameters={
                            "method.response.header.Access-Control-Allow-Headers":
                            "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'",
                            "method.response.header.Access-Control-Allow-Origin":
                            "'*'",
                            "method.response.header.Access-Control-Allow-Credentials":
                            "'false'",
                            "method.response.header.Access-Control-Allow-Methods":
                            "'OPTIONS,GET,PUT,POST,DELETE'",
                        },
                    )
                ],
                passthrough_behavior=aws_apigateway.PassthroughBehavior.NEVER,
                request_templates={"application/json": '{"statusCode": 200}'},
            ),
            method_responses=[
                aws_apigateway.MethodResponse(
                    status_code="200",
                    response_parameters={
                        "method.response.header.Access-Control-Allow-Headers":
                        True,
                        "method.response.header.Access-Control-Allow-Methods":
                        True,
                        "method.response.header.Access-Control-Allow-Credentials":
                        True,
                        "method.response.header.Access-Control-Allow-Origin":
                        True,
                    },
                )
            ],
        )
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

        repository = codecommit.Repository(
            self,
            "slackops-repository",
            repository_name="slackops-pipeline-repo",
            description="Repo for the SlackOps Pipeline Demo",
        )

        website_bucket = s3.Bucket(self,
                                   "website-bucket",
                                   removal_policy=core.RemovalPolicy.DESTROY,
                                   auto_delete_objects=True,
                                   public_read_access=True,
                                   website_index_document="index.html")

        manual_approval_topic = sns.Topic(
            self,
            "manual-approval-notification",
        )

        artifact_bucket = s3.Bucket(self,
                                    "artifact-bucket",
                                    removal_policy=core.RemovalPolicy.DESTROY)

        source_artifact = codepipeline.Artifact(artifact_name="Source")
        deployment_artifact = codepipeline.Artifact(artifact_name="Deployment")

        pipeline = codepipeline.Pipeline(
            self,
            "slackops-pipeline",
            artifact_bucket=artifact_bucket,
            stages=[
                codepipeline.StageOptions(
                    stage_name="Source",
                    actions=[
                        codepipeline_actions.CodeCommitSourceAction(
                            repository=repository,
                            branch="master",
                            output=source_artifact,
                            action_name="Source")
                    ]),
                codepipeline.StageOptions(
                    stage_name="Build",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            input=source_artifact,
                            action_name="Build",
                            project=codebuild.PipelineProject(
                                self,
                                "build-project",
                                build_spec=codebuild.BuildSpec.
                                from_source_filename("buildspec.yml"),
                                environment=codebuild.BuildEnvironment(
                                    build_image=codebuild.LinuxBuildImage.
                                    STANDARD_5_0),
                            ),
                            outputs=[deployment_artifact])
                    ]),
                codepipeline.StageOptions(
                    stage_name=MANUAL_APPROVAL_STAGE_NAME,
                    actions=[
                        codepipeline_actions.ManualApprovalAction(
                            action_name=MANUAL_APPROVAL_ACTION_NAME,
                            additional_information=
                            "Please Approve the Deployment",
                            notification_topic=manual_approval_topic,
                        )
                    ]),
                codepipeline.StageOptions(
                    stage_name="Deploy",
                    actions=[
                        codepipeline_actions.S3DeployAction(
                            bucket=website_bucket,
                            input=deployment_artifact,
                            access_control=s3.BucketAccessControl.PUBLIC_READ,
                            action_name="deploy-to-s3")
                    ])
            ])

        # Build the API Gateway to record the approval or rejection

        rest_api = apigateway.RestApi(self,
                                      "slackops-apigw",
                                      deploy_options=apigateway.StageOptions(
                                          stage_name="prod", ))

        root_resource = rest_api.root.add_resource("v1")

        approval_resource = root_resource.add_resource("approval")

        api_gateway_role = iam.Role(self,
                                    "slackops-apigw-role",
                                    assumed_by=iam.ServicePrincipal(
                                        service="apigateway.amazonaws.com", ))
        api_gateway_role.add_to_policy(
            iam.PolicyStatement(actions=["codepipeline:PutApprovalResult"],
                                resources=[pipeline.pipeline_arn + "/*"]))

        # Double curlies to make str.format work
        mapping_template = """
#set($token = $input.params("token"))
#set($response = $input.params("response"))
{{
   "actionName": "{action_name}",
   "pipelineName": "{pipeline_name}",
   "result": {{ 
      "status": "$response",
      "summary": ""
   }},
   "stageName": "{stage_name}",
   "token": "$token"
}}
        """.format(
            action_name="approve-before-publication",
            pipeline_name=pipeline.pipeline_name,
            stage_name="Approval",
        )

        approval_integration = apigateway.AwsIntegration(
            service="codepipeline",
            action="PutApprovalResult",
            integration_http_method="POST",
            options=apigateway.IntegrationOptions(
                credentials_role=api_gateway_role,
                request_parameters={
                    "integration.request.header.x-amz-target":
                    "'CodePipeline_20150709.PutApprovalResult'",
                    "integration.request.header.Content-Type":
                    "'application/x-amz-json-1.1'",
                },
                passthrough_behavior=apigateway.PassthroughBehavior.NEVER,
                request_templates={"application/json": mapping_template},
                integration_responses=[
                    apigateway.IntegrationResponse(
                        status_code='400',
                        selection_pattern="4\d{2}",
                        response_parameters={
                            'method.response.header.error':
                            'integration.response.body'
                        }),
                    apigateway.IntegrationResponse(
                        status_code='500',
                        selection_pattern="5\d{2}",
                        response_parameters={
                            'method.response.header.error':
                            'integration.response.body'
                        }),
                    apigateway.IntegrationResponse(
                        status_code='200',
                        selection_pattern="2\d{2}",
                        response_parameters={
                            'method.response.header.response':
                            'integration.response.body'
                        }),
                ]))

        approval_method = approval_resource.add_method(
            http_method="GET",
            request_validator=apigateway.RequestValidator(
                self,
                "request-validator",
                rest_api=rest_api,
                request_validator_name="ParamValidator",
                validate_request_parameters=True),
            request_parameters={
                "method.request.querystring.token": True,
                "method.request.querystring.response":
                True,  # Approved / Rejected
            },
            method_responses=[
                apigateway.MethodResponse(
                    status_code='400',
                    response_parameters={'method.response.header.error':
                                         True}),
                apigateway.MethodResponse(
                    status_code='500',
                    response_parameters={'method.response.header.error':
                                         True}),
                apigateway.MethodResponse(
                    status_code='200',
                    response_parameters={
                        'method.response.header.response': True
                    }),
            ],
            integration=approval_integration,
        )

        # Notification mechanism

        ssm_parameter_webhook = ssm.StringParameter(
            self,
            "slackops-webhook-parameter",
            string_value="<replace-me>",
            parameter_name="/slackops/webhook-url")

        notification_lambda = _lambda.PythonFunction(
            self,
            "slackops-notification",
            entry=os.path.join(os.path.dirname(__file__), "..", "src"),
            index="index.py",
            handler="notification_handler",
            environment={
                "WEBHOOK_URL_PARAMETER": ssm_parameter_webhook.parameter_name,
                "API_ENDPOINT": rest_api.url_for_path("/v1/approval"),
            })

        notification_lambda.add_event_source(
            lambda_event_sources.SnsEventSource(topic=manual_approval_topic))

        ssm_parameter_webhook.grant_read(notification_lambda)

        # Outputs

        core.CfnOutput(self,
                       "repositoryHttps",
                       value=repository.repository_clone_url_http)

        core.CfnOutput(self,
                       "repositorySSH",
                       value=repository.repository_clone_url_ssh)

        core.CfnOutput(self,
                       "websiteUrl",
                       value=website_bucket.bucket_website_url)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.current_dir = os.path.dirname(__file__)

        self.bucket = s3.Bucket(
            self,
            "qs-migration-bucket",
            bucket_name=f'quicksight-migration-{core.Aws.ACCOUNT_ID}',
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        self.quicksight_migration_lambda_role = iam.Role(
            self,
            'quicksight-migration-lambda-role',
            description='Role for the Quicksight dashboard migration Lambdas',
            role_name='quicksight-migration-lambda-role',
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={
                'AllowAccess':
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            'logs:CreateLogGroup', 'logs:CreateLogStream',
                            'logs:PutLogEvents'
                        ],
                        resources=[
                            f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:*'
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["sts:AssumeRole", "iam:ListRoles"],
                        resources=[
                            "arn:aws:iam::*:role/quicksight-migration-*-assume-role"
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["s3:PutObject", "s3:ListBucket"],
                        resources=[
                            self.bucket.bucket_arn,
                            f"{self.bucket.bucket_arn}/*"
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["secrets:GetSecretValue"],
                        resources=[
                            f"arn:aws:secretsmanager:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:secret:*"
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            "quicksight:Create*", "quicksight:Delete*",
                            "quicksight:Describe*", "quicksight:List*",
                            "quicksight:Search*", "quicksight:Update*"
                        ],
                        resources=["*"])
                ])
            })

        self.quicksight_migration_target_assume_role = iam.Role(
            self,
            'quicksight-migration-target-assume-role',
            description=
            'Role for the Quicksight dashboard migration Lambdas to assume',
            role_name='quicksight-migration-target-assume-role',
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={
                'AllowAccess':
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            "quicksight:Create*", "quicksight:Delete*",
                            "quicksight:Describe*", "quicksight:List*",
                            "quicksight:Search*", "quicksight:Update*"
                        ],
                        resources=["*"]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            "ssm:GetParameter",
                        ],
                        resources=["arn:aws:ssm:*:*:parameter/infra/config"])
                ])
            })

        self.quicksight_migration_target_assume_role.assume_role_policy.add_statements(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=['sts:AssumeRole'],
                principals=[iam.AccountPrincipal(core.Aws.ACCOUNT_ID)]))

        # API Gateway to SQS
        self.rest_api_role = iam.Role(
            self,
            "RestAPIRole",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSQSFullAccess")
            ])

        self.queue = sqs.Queue(self,
                               "quicksight-migration-sqs-queue",
                               queue_name="quicksight-migration-sqs",
                               visibility_timeout=core.Duration.minutes(15))

        self.integration_response = apigw.IntegrationResponse(
            status_code="200",
            response_templates={"application/json": ""},
            response_parameters={
                "method.response.header.Access-Control-Allow-Headers":
                "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'",
                "method.response.header.Access-Control-Allow-Origin":
                "'*'",
                "method.response.header.Access-Control-Allow-Methods":
                "'POST,OPTIONS'"
            })

        self.api_integration_options = apigw.IntegrationOptions(
            credentials_role=self.rest_api_role,
            integration_responses=[self.integration_response],
            request_templates={
                "application/json":
                'Action=SendMessage&MessageBody=$util.urlEncode("$input.body")'
            },
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type":
                "'application/x-www-form-urlencoded'"
            })

        self.api_resource_sqs_integration = apigw.AwsIntegration(
            service="sqs",
            integration_http_method="POST",
            path="{}/{}".format(core.Aws.ACCOUNT_ID, self.queue.queue_name),
            options=self.api_integration_options)

        self.base_api = apigw.RestApi(
            self,
            'quicksight-migration-sqs',
            rest_api_name='quicksight-migration-sqs',
            default_cors_preflight_options=apigw.CorsOptions(
                allow_origins=apigw.Cors.ALL_ORIGINS,
                allow_methods=["POST", "OPTIONS"],
                allow_headers=[
                    'Access-Control-Allow-Origin',
                    'Access-Control-Allow-Headers', 'Content-Type'
                ]))

        self.base_api.root.add_method(
            "POST",
            self.api_resource_sqs_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Headers':
                    True,
                    'method.response.header.Access-Control-Allow-Methods':
                    True,
                    'method.response.header.Access-Control-Allow-Origin': True
                }
            }])

        self.quicksight_migration_lambda = _lambda.Function(
            self,
            'quicksight-migration-lambda',
            handler='quicksight_migration.lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                os.path.join(self.current_dir,
                             '../lambda/quicksight_migration/')),
            function_name='quicksight_migration_lambda',
            role=self.quicksight_migration_lambda_role,
            timeout=core.Duration.minutes(15),
            memory_size=1024,
            environment={
                'BUCKET_NAME': self.bucket.bucket_name,
                'S3_KEY': 'None',
                'INFRA_CONFIG_PARAM': '/infra/config',
                'SQS_URL': self.queue.queue_url
            })

        self.sqs_event_source = event_sources.SqsEventSource(self.queue)

        self.quicksight_migration_lambda.add_event_source(
            self.sqs_event_source)

        core.CfnOutput(self,
                       "MigrationAPIGatewayURL",
                       value=self.base_api.url,
                       description="Migration API GW URL")
Example #27
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 table: dynamo_db.Table, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Bucket for the processed stream events
        # --------------------------------------
        clicks_destination_bucket = _s3.Bucket(
            self,
            'Bucket',
            versioned=False  # True
        )

        # Lambda function for processing the stream
        # -----------------------------------------
        # Policy statement for accessing the DynamoDB table
        lambda_function_policy_stm = _iam.PolicyStatement()
        lambda_function_policy_stm.add_actions('dynamodb:GetItem')
        lambda_function_policy_stm.add_resources(table.table_arn)

        # Lambda processor function
        mysfits_click_processor = _lambda.Function(
            self,
            'Function',
            handler="streamProcessor.processRecord",
            runtime=_lambda.Runtime.PYTHON_3_6,
            description=
            'An Amazon Kinesis Firehose stream processor that enriches click records to not just '
            'include a mysfitId, but also other attributes that can be analyzed later.',
            memory_size=128,
            code=_lambda.Code.asset('./lambda_streaming_processor'),
            timeout=cdk.Duration.seconds(60),
            initial_policy=[lambda_function_policy_stm],
            environment={'MYSFITS_API_URL': mysfits_api_url})

        # Firehose delivery stream
        # ------------------------
        # Initialize role
        firehose_delivery_role = _iam.Role(
            self,
            "FirehoseDeliveryRole",
            role_name='FirehoseDeliveryRole',
            assumed_by=_iam.ServicePrincipal('firehose.amazonaws.com'),
        )
        # Statement with access to S3 bucket
        firehose_delivery_policy_s3_stm = _iam.PolicyStatement()
        firehose_delivery_policy_s3_stm.add_actions(
            "s3:AbortMultipartUpload", "s3:GetBucketLocation", "s3:GetObject",
            "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject")
        firehose_delivery_policy_s3_stm.add_resources(
            clicks_destination_bucket.bucket_arn)
        firehose_delivery_policy_s3_stm.add_resources(
            clicks_destination_bucket.arn_for_objects('*'))
        firehose_delivery_policy_s3_stm.effect = _iam.Effect.ALLOW
        # Statement with access to Lambda function
        firehose_delivery_policy_lambda_stm = _iam.PolicyStatement()
        firehose_delivery_policy_lambda_stm.add_actions(
            "lambda:InvokeFunction")
        firehose_delivery_policy_lambda_stm.add_actions(
            "lambda:GetFunctionConfiguration")
        firehose_delivery_policy_lambda_stm.add_resources(
            mysfits_click_processor.function_arn)
        firehose_delivery_policy_lambda_stm.effect = _iam.Effect.ALLOW
        # Add policies to role
        firehose_delivery_role.add_to_policy(firehose_delivery_policy_s3_stm)
        firehose_delivery_role.add_to_policy(
            firehose_delivery_policy_lambda_stm)
        # Create delivery stream
        mysfits_firehose_to_s3 = kinfire.CfnDeliveryStream(
            self,
            "DeliveryStream",
            delivery_stream_name="DeliveryStream",
            delivery_stream_type="DirectPut",
            extended_s3_destination_configuration=kinfire.CfnDeliveryStream.
            ExtendedS3DestinationConfigurationProperty(
                bucket_arn=clicks_destination_bucket.bucket_arn,
                buffering_hints=kinfire.CfnDeliveryStream.
                BufferingHintsProperty(interval_in_seconds=60, size_in_m_bs=1),
                compression_format="UNCOMPRESSED",
                error_output_prefix="errors/",
                prefix="firehose/",
                processing_configuration=kinfire.CfnDeliveryStream.
                ProcessingConfigurationProperty(
                    enabled=True,
                    processors=[
                        kinfire.CfnDeliveryStream.ProcessorProperty(
                            type="Lambda",
                            parameters=[
                                kinfire.CfnDeliveryStream.
                                ProcessorParameterProperty(
                                    parameter_name="LambdaArn",
                                    parameter_value=mysfits_click_processor.
                                    function_arn)
                            ])
                    ]),
                role_arn=firehose_delivery_role.role_arn,
            ))

        # API Gateway as proxy to the Firehose stream
        # -------------------------------------------
        # Initialize role
        click_processing_api_role = _iam.Role(
            self,
            "ClickProcessingApiRole",
            role_name="ClickProcessingApiRole",
            assumed_by=_iam.ServicePrincipal("apigateway.amazonaws.com"))

        api_policy = _iam.PolicyStatement()
        api_policy.add_actions("firehose:PutRecord")
        api_policy.add_resources(mysfits_firehose_to_s3.attr_arn)
        api_policy.effect = _iam.Effect.ALLOW
        # Associate policy to role
        _iam.Policy(self,
                    "ClickProcessingApiPolicy",
                    policy_name="api_gateway_firehose_proxy_role",
                    statements=[api_policy],
                    roles=[click_processing_api_role])
        # Create API gateway
        api = apigw.RestApi(self,
                            "APIEndpoint",
                            rest_api_name="ClickProcessingApi",
                            endpoint_types=[apigw.EndpointType.REGIONAL])
        # Add the resource endpoint and the method used to send clicks to Firehose
        clicks = api.root.add_resource('clicks')
        clicks.add_method(
            'PUT',
            integration=apigw.AwsIntegration(
                service='firehose',
                integration_http_method='POST',
                action='PutRecord',
                options=apigw.IntegrationOptions(
                    connection_type=apigw.ConnectionType.INTERNET,
                    credentials_role=click_processing_api_role,
                    integration_responses=[
                        apigw.IntegrationResponse(
                            status_code='200',
                            response_templates={
                                "application/json": '{"status":"OK"}'
                            },
                            response_parameters={
                                "method.response.header.Access-Control-Allow-Headers":
                                "'Content-Type'",
                                "method.response.header.Access-Control-Allow-Methods":
                                "'OPTIONS,PUT'",
                                "method.response.header.Access-Control-Allow-Origin":
                                "'*'"
                            })
                    ],
                    request_parameters={
                        "integration.request.header.Content-Type":
                        "'application/x-amz-json-1.1'"
                    },
                    request_templates={
                        "application/json":
                        "{ \"DeliveryStreamName\": \"" +
                        mysfits_firehose_to_s3.ref +
                        "\", \"Record\": {   \"Data\": \"$util.base64Encode($input.json('$'))\" } }"
                    },
                )),
            method_responses=[
                apigw.MethodResponse(
                    status_code='200',
                    response_parameters={
                        "method.response.header.Access-Control-Allow-Headers":
                        True,
                        "method.response.header.Access-Control-Allow-Methods":
                        True,
                        "method.response.header.Access-Control-Allow-Origin":
                        True
                    })
            ])

        clicks.add_method(
            'OPTIONS',
            integration=apigw.MockIntegration(integration_responses=[
                apigw.IntegrationResponse(
                    status_code='200',
                    response_parameters={
                        "method.response.header.Access-Control-Allow-Headers":
                        "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'",
                        "method.response.header.Access-Control-Allow-Origin":
                        "'*'",
                        "method.response.header.Access-Control-Allow-Credentials":
                        "'false'",
                        "method.response.header.Access-Control-Allow-Methods":
                        "'OPTIONS,GET,PUT,POST,DELETE'"
                    })
            ],
                                              passthrough_behavior=apigw.
                                              PassthroughBehavior.NEVER,
                                              request_templates={
                                                  "application/json":
                                                  '{"statusCode": 200}'
                                              }),
            method_responses=[
                apigw.MethodResponse(
                    status_code='200',
                    response_parameters={
                        "method.response.header.Access-Control-Allow-Headers":
                        True,
                        "method.response.header.Access-Control-Allow-Methods":
                        True,
                        "method.response.header.Access-Control-Allow-Credentials":
                        True,
                        "method.response.header.Access-Control-Allow-Origin":
                        True
                    })
            ])