def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # lambda
        lambda_ = aws_lambda.Function(
            self,
            "ApiLambdaIntegrationLambda",
            code=aws_lambda.Code.asset("lambdas/api_lambda_integration"),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler="lambda_function.lambda_handler",
        )

        # API Gateway
        api = aws_apigateway.RestApi(
            self,
            "ApiLambdaIntegrationApiGateway",
            deploy_options=aws_apigateway.StageOptions(stage_name="hogehoge"))
        hoge_resources = api.root.add_resource("hoge")
        hoge_resources.add_method(
            "GET",
            aws_apigateway.LambdaIntegration(lambda_),
            request_parameters={
                # クエリ文字列(URLパラメータ)の明示的に宣言
                "method.request.querystring.hoge": True,  # 必須
                "method.request.querystring.hogeOption": False,  # 必須ではない
            },
            # 下記設定を入れないと必須フラグは動作しない
            request_validator=api.add_request_validator(
                "ApiLambdaIntegrationValidator",
                validate_request_parameters=True),
        )
Пример #2
0
    def create_root_api_gateway(self, api_domain_name: str):
        """ We need this to create the root API Gateway resource. """
        certificate = self.create_api_certificate(api_domain_name, self.zone)
        domain_options = aws_apigateway.DomainNameOptions(
            domain_name=api_domain_name, certificate=certificate)
        stage_options = aws_apigateway.StageOptions(throttling_rate_limit=10,
                                                    throttling_burst_limit=100)

        rest_api = aws_apigateway.RestApi(self,
                                          'PublicCrudApi',
                                          rest_api_name='PublicCrudApi',
                                          domain_name=domain_options,
                                          deploy_options=stage_options)

        kix.info("Routing A-Record Alias for REST API")
        a_record_target = aws_route53.RecordTarget.from_alias(
            aws_route53_targets.ApiGateway(rest_api))
        aws_route53.ARecord(self,
                            "PublicCrudApiAliasRecord",
                            zone=self.zone,
                            target=a_record_target,
                            record_name=api_domain_name)

        # Also create the authorizer for this API.
        self._api_authorizer = self.create_root_api_authorizer(
            self.user_pool, rest_api)
        return rest_api
def create_api_gateway(stack, id, api_lambda):
    api_name = '%s-api' % id
    options = aws_apigateway.StageOptions(logging_level=aws_apigateway.MethodLoggingLevel.INFO, data_trace_enabled=True)
    return aws_apigateway.LambdaRestApi(stack, api_name,
                                        rest_api_name=api_name,
                                        deploy_options=options,
                                        handler=api_lambda)
Пример #4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        api_log_group = cw_logs.LogGroup(self, "HelloWorldAPILogs")

        # Create the api gateway for this lambda set
        self.target_api = api_gw.RestApi(
            self,
            'HelloWorldAPI',
            rest_api_name='HelloWorld',
            endpoint_types=[api_gw.EndpointType.REGIONAL],
            deploy_options=api_gw.StageOptions(
                access_log_destination=api_gw.LogGroupLogDestination(
                    api_log_group),
                access_log_format=api_gw.AccessLogFormat.clf(),
                method_options={
                    # This special path applies to all resource paths and all HTTP methods
                    "/*/*":
                    api_gw.MethodDeploymentOptions(throttling_rate_limit=100,
                                                   throttling_burst_limit=200)
                }))

        hello_world = _lambda.Function(
            self,
            "HelloWorld",
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='helloworld.lambda_handler',
            code=_lambda.Code.from_asset("lambda_fns"),
            timeout=core.Duration.seconds(60))

        entity = self.target_api.root.add_resource('helloworld')
        this_lambda_integration = api_gw.LambdaIntegration(
            hello_world,
            proxy=False,
            integration_responses=[
                api_gw.IntegrationResponse(
                    status_code='200',
                    response_parameters={
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'"
                    })
            ])
        entity.add_method(
            'GET',
            this_lambda_integration,
            method_responses=[
                api_gw.MethodResponse(
                    status_code='200',
                    response_parameters={
                        'method.response.header.Access-Control-Allow-Origin':
                        True
                    })
            ])

        self.resource_arn = f"arn:aws:apigateway:{core.Stack.of(self).region}::/restapis/{self.target_api.rest_api_id}/stages/{self.target_api.deployment_stage.stage_name}"
Пример #5
0
 def create_rest_apis(self) -> None:
     '''Rest API Gateway integrations with Lambda
     '''
     self.rest_api_blog = aws_apigateway.LambdaRestApi(
         self,
         'sls-blog-rest-api-gateway',
         handler=self.lambda_blog,
         deploy_options=aws_apigateway.StageOptions(
             stage_name='api',
             throttling_rate_limit=self.lambda_param_max_concurrency,
             logging_level=aws_apigateway.MethodLoggingLevel('INFO'),
         ),
     )
Пример #6
0
    def __init__(
        self,
        stack_obj,
        stack_id,
        cdk_resource=aws_apigateway.RestApi,
        version="v1",
    ):
        super().__init__(stack_obj, stack_id, cdk_resource)

        self.cdk_resource = cdk_resource(
            stack_obj,
            f"{stack_id}-api-gateway",
            rest_api_name=f"{stack_id}-api-gateway",
            deploy_options=aws_apigateway.StageOptions(stage_name=version))
Пример #7
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Lambda function

        hello_function = lambda_.Function(
            self,
            "hello-function",
            code=lambda_.Code.from_asset("src/hello/"),
            handler="main.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            tracing=lambda_.Tracing.ACTIVE)

        logs.LogGroup(
            self,
            "hello-logs",
            log_group_name=f"/aws/lambda/{hello_function.function_name}",
            retention=logs.RetentionDays.ONE_WEEK)

        # API Gateway

        api_logs = logs.LogGroup(self,
                                 "hello-api-logs",
                                 retention=logs.RetentionDays.ONE_WEEK)

        api = apigw.RestApi(
            self,
            "hello-api",
            deploy_options=apigw.StageOptions(
                access_log_destination=apigw.LogGroupLogDestination(api_logs),
                access_log_format=apigw.AccessLogFormat.
                json_with_standard_fields(caller=True,
                                          http_method=True,
                                          ip=True,
                                          protocol=True,
                                          request_time=True,
                                          resource_path=True,
                                          response_length=True,
                                          status=True,
                                          user=True),
                throttling_burst_limit=1000,
                throttling_rate_limit=10,
                tracing_enabled=True))

        hello_integration = apigw.LambdaIntegration(hello_function, proxy=True)
        api.root.add_method("GET", hello_integration)
Пример #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs):
        """
        Creates a RestApi with some sensible defaults.

        defaults:
        - rest_api_name -> gen_name(scope, id) if not set
        """
        kwargs.setdefault('rest_api_name', gen_name(scope, id))

        # Set default deploy options
        kwargs.setdefault(
            'deploy_options',
            aws_apigateway.StageOptions(
                logging_level=aws_apigateway.MethodLoggingLevel.INFO,
                metrics_enabled=True))

        super().__init__(scope, id, **kwargs)

        core.CfnOutput(self, "root_url", value=f"ROOT_URL={self.url}")
Пример #9
0
    def __init__(self, app: core.App, cfn_name: str, stack_env):
        super().__init__(scope=app, id=f"{cfn_name}-{stack_env}")

        # lambda
        lambda_function = lambda_.Function(
            scope=self,
            id=f"{cfn_name}-lambda-task",
            code=lambda_.AssetCode.from_asset("lambda_script"),
            handler="lambda_handler.lambda_task",
            timeout=core.Duration.seconds(10),
            runtime=self.LAMBDA_PYTHON_RUNTIME,
            memory_size=128)

        # resource policy
        whitelisted_ips = ["127.0.0./32"]
        api_resource_policy = iam.PolicyDocument(statements=[
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["execute-api:Invoke"],
                principals=[iam.AnyPrincipal()],
                resources=["execute-api:/*/*/*"],
                conditions={"IpAddress": {
                    "aws:SourceIp": whitelisted_ips
                }})
        ])

        # api_gateway
        base_api = apigw_.RestApi(
            scope=self,
            id=f"{cfn_name}-{stack_env}-apigw",
            rest_api_name=f"{cfn_name}-{stack_env}-apigw",
            deploy_options=apigw_.StageOptions(stage_name=stack_env),
            policy=api_resource_policy)

        api_entity = base_api.root.add_resource("task")
        api_entity_lambda = apigw_.LambdaIntegration(
            handler=lambda_function,
            integration_responses=[
                apigw_.IntegrationResponse(status_code="200")
            ])

        api_entity.add_method(http_method="POST",
                              integration=api_entity_lambda)
Пример #10
0
    def create_api_gateway(self) -> None:
        """
        Create API gateway and lambda integration
        """

        # api_stage = core.CfnParameter(self, id="ApiStage", type=str)
        openapi_asset = s3_assets.Asset(
            self,
            "openapi_asset",
            path="cbers2stac/openapi/core-item-search-query-integrated.yaml",
        )
        data = core.Fn.transform("AWS::Include",
                                 {"Location": openapi_asset.s3_object_url})
        definition = apigateway.AssetApiDefinition.from_inline(data)
        apigateway.SpecRestApi(
            self,
            id="stacapi",
            api_definition=definition,
            deploy_options=apigateway.StageOptions(
                logging_level=apigateway.MethodLoggingLevel.INFO),
        )
Пример #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue = _sqs.Queue(self, "queue", queue_name="queue")
        table = _dynamodb.Table(self, "table", partition_key=_dynamodb.Attribute(name="id", type=_dynamodb.AttributeType.NUMBER))
        
        publisherFunction = _lambda.Function(
            self,
            'publisher',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('projectOne/publisher'),
            handler='publisher.handler',
            environment={"QUEUE_URL":queue.queue_url}
        )

        apiGateway = _apigateway.RestApi(
            self,
            "api",
            deploy_options= _apigateway.StageOptions(stage_name="dev")
        )
        lambad_integration = _apigateway.LambdaIntegration(publisherFunction)

        apiGateway.root.add_method('GET', lambad_integration)

        subscriberFunction = _lambda.Function(
            self,
            'subscriber',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('projectOne/subscriber'),
            handler='subscriber.handler',
            environment={
                "QUEUE_URL":queue.queue_url,
                "TABLE_NAME":table.table_name
                },
        )
        subscriberFunction.add_event_source(SqsEventSource(queue, batch_size=10))

        queue.grant_send_messages(publisherFunction)
        table.grant(subscriberFunction, "dynamodb:PutItem")
Пример #12
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        environment = self.node.try_get_context('environment')
        server_domain = self.node.try_get_context('server_domain')
        hosted_zone_id = self.node.try_get_context('hosted_zone_id')

        if environment == 'prod':
            self.domain_name = f'start.{server_domain}'
        else:
            self.domain_name = f'start.{environment}.{server_domain}'

        hosted_zone = r53.HostedZone.from_hosted_zone_attributes(
            self,
            'Zone',
            hosted_zone_id=hosted_zone_id,
            zone_name=server_domain)
        certificate = acm.Certificate(
            self,
            'StartCert',
            domain_name=self.domain_name,
            validation=acm.CertificateValidation.from_dns(
                hosted_zone=hosted_zone))
        self.rest_api = apigw.RestApi(
            self,
            'api',
            domain_name=apigw.DomainNameOptions(certificate=certificate,
                                                domain_name=self.domain_name),
            deploy_options=apigw.StageOptions(stage_name=environment))
        arecord = r53.ARecord(
            self,
            'StartARecord',
            zone=hosted_zone,
            record_name=self.domain_name,
            target=r53.RecordTarget(
                alias_target=r53_targets.ApiGateway(self.rest_api)))
        cdk.CfnOutput(self, 'DNSName', value=arecord.domain_name)
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 is_qa_stack=False,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        def qa_maybe(id_str: str) -> str:
            return id_str if not is_qa_stack else id_str + '-qa'

        # Bucket used to deliver events
        delivery_bucket = aws_s3.Bucket(
            self,
            id=qa_maybe('my-event-storage-bucket'),
            bucket_name=qa_maybe('my-event-storage-bucket'),
            block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL)

        # ---- Below is firehose related code ----
        # Since firehose is not yet cdk ready we need to do everything the old way with defining roles
        role = aws_iam.Role(
            self,
            id=qa_maybe('my-firehose-delivery-role'),
            assumed_by=aws_iam.ServicePrincipal('firehose.amazonaws.com'))
        delivery_bucket.grant_write(role)

        # Everything that is not CDK ready still exists like Cfn (Cloudformation?) objects
        firehose = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            id=qa_maybe('my-pipeline-firehose'),
            delivery_stream_name=qa_maybe('my-pipeline-firehose'),
            delivery_stream_type='DirectPut',
            s3_destination_configuration={
                'bucketArn': delivery_bucket.bucket_arn,
                'bufferingHints': {
                    'intervalInSeconds':
                    900,  # The recommended setting is 900 (maximum for firehose)
                    'sizeInMBs': 5
                },
                'compressionFormat': 'UNCOMPRESSED',
                'prefix':
                'events/',  # This is the folder the events will end up in
                'errorOutputPrefix':
                'delivery_error/',  # Folder in case of delivery error
                'roleArn': role.role_arn
            })

        # Policy statement required for lambda to be able to put records on the firehose stream
        firehose_policy = aws_iam.PolicyStatement(
            actions=['firehose:DescribeDeliveryStream', 'firehose:PutRecord'],
            effect=aws_iam.Effect.ALLOW,
            resources=[firehose.attr_arn])

        # ---- API GW + Lambda code ----
        api_lambda = aws_lambda.Function(
            self,
            id=qa_maybe('my-api-gw-lambda'),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.asset('src/lambda_code/api_gw_lambda'),
            handler='main.handler',
            memory_size=128,
            timeout=core.Duration.seconds(5),
            environment={
                'region': self.region,
                'stream_name': firehose.delivery_stream_name
            })
        # Important to add the firehose postRecord policy to lambda otherwise there will be access errors
        api_lambda.add_to_role_policy(firehose_policy)

        # Create the lambda that will receive the data messages
        api_gw = aws_apigateway.LambdaRestApi(
            self,
            id=qa_maybe('my-api-gw'),
            handler=api_lambda,
            proxy=False,
            deploy_options=aws_apigateway.StageOptions(
                stage_name='qa' if is_qa_stack else 'prod'))

        # Add API query method
        api_gw.root.add_resource('send_data').add_method('GET',
                                                         api_key_required=True)

        # Generate an API key and add it to a usage plan
        api_key = api_gw.add_api_key(qa_maybe('MyPipelinePublicKey'))
        usage_plan = api_gw.add_usage_plan(
            id=qa_maybe('my-pipeline-usage-plan'),
            name='standard',
            api_key=api_key,
            throttle=aws_apigateway.ThrottleSettings(rate_limit=10,
                                                     burst_limit=2))

        # Add the usage plan to the API GW
        usage_plan.add_api_stage(stage=api_gw.deployment_stage)
Пример #14
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = aws_ec2.Vpc(
            self,
            "OctemberVPC",
            max_azs=2,
            #      subnet_configuration=[{
            #          "cidrMask": 24,
            #          "name": "Public",
            #          "subnetType": aws_ec2.SubnetType.PUBLIC,
            #        },
            #        {
            #          "cidrMask": 24,
            #          "name": "Private",
            #          "subnetType": aws_ec2.SubnetType.PRIVATE
            #        },
            #        {
            #          "cidrMask": 28,
            #          "name": "Isolated",
            #          "subnetType": aws_ec2.SubnetType.ISOLATED,
            #          "reserved": True
            #        }
            #      ],
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        dynamo_db_endpoint = vpc.add_gateway_endpoint(
            "DynamoDbEndpoint",
            service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB)

        s3_bucket = s3.Bucket(
            self,
            "s3bucket",
            bucket_name="octember-bizcard-{region}-{account}".format(
                region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID))

        api = apigw.RestApi(
            self,
            "BizcardImageUploader",
            rest_api_name="BizcardImageUploader",
            description="This service serves uploading bizcard images into s3.",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            binary_media_types=["image/png", "image/jpg"],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        rest_api_role = aws_iam.Role(
            self,
            "ApiGatewayRoleForS3",
            role_name="ApiGatewayRoleForS3FullAccess",
            assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3FullAccess")
            ])

        list_objects_responses = [
            apigw.IntegrationResponse(
                status_code="200",
                #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html#aws_cdk.aws_apigateway.IntegrationResponse.response_parameters
                # The response parameters from the backend response that API Gateway sends to the method response.
                # Use the destination as the key and the source as the value:
                #  - The destination must be an existing response parameter in the MethodResponse property.
                #  - The source must be an existing method request parameter or a static value.
                response_parameters={
                    'method.response.header.Timestamp':
                    'integration.response.header.Date',
                    'method.response.header.Content-Length':
                    'integration.response.header.Content-Length',
                    'method.response.header.Content-Type':
                    'integration.response.header.Content-Type'
                }),
            apigw.IntegrationResponse(status_code="400",
                                      selection_pattern="4\d{2}"),
            apigw.IntegrationResponse(status_code="500",
                                      selection_pattern="5\d{2}")
        ]

        list_objects_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses)

        get_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path='/',
            options=list_objects_integration_options)

        api.root.add_method(
            "GET",
            get_s3_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={'method.request.header.Content-Type': False})

        get_s3_folder_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses,
            #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html#aws_cdk.aws_apigateway.IntegrationOptions.request_parameters
            # Specify request parameters as key-value pairs (string-to-string mappings), with a destination as the key and a source as the value.
            # The source must be an existing method request parameter or a static value.
            request_parameters={
                "integration.request.path.bucket": "method.request.path.folder"
            })

        get_s3_folder_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{bucket}",
            options=get_s3_folder_integration_options)

        s3_folder = api.root.add_resource('{folder}')
        s3_folder.add_method(
            "GET",
            get_s3_folder_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True
            })

        get_s3_item_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses,
            request_parameters={
                "integration.request.path.bucket":
                "method.request.path.folder",
                "integration.request.path.object": "method.request.path.item"
            })

        get_s3_item_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{bucket}/{object}",
            options=get_s3_item_integration_options)

        s3_item = s3_folder.add_resource('{item}')
        s3_item.add_method(
            "GET",
            get_s3_item_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True,
                'method.request.path.item': True
            })

        put_s3_item_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[
                apigw.IntegrationResponse(status_code="200"),
                apigw.IntegrationResponse(status_code="400",
                                          selection_pattern="4\d{2}"),
                apigw.IntegrationResponse(status_code="500",
                                          selection_pattern="5\d{2}")
            ],
            request_parameters={
                "integration.request.header.Content-Type":
                "method.request.header.Content-Type",
                "integration.request.path.bucket":
                "method.request.path.folder",
                "integration.request.path.object": "method.request.path.item"
            })

        put_s3_item_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="PUT",
            path="{bucket}/{object}",
            options=put_s3_item_integration_options)

        s3_item.add_method(
            "PUT",
            put_s3_item_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True,
                'method.request.path.item': True
            })

        ddb_table = dynamodb.Table(
            self,
            "BizcardImageMetaInfoDdbTable",
            table_name="OctemberBizcardImgMeta",
            partition_key=dynamodb.Attribute(
                name="image_id", type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PROVISIONED,
            read_capacity=15,
            write_capacity=5)

        img_kinesis_stream = kinesis.Stream(
            self, "BizcardImagePath", stream_name="octember-bizcard-image")

        # create lambda function
        trigger_textract_lambda_fn = _lambda.Function(
            self,
            "TriggerTextExtractorFromImage",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="TriggerTextExtractorFromImage",
            handler="trigger_text_extract_from_s3_image.lambda_handler",
            description="Trigger to extract text from an image in S3",
            code=_lambda.Code.asset(
                "./src/main/python/TriggerTextExtractFromS3Image"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'DDB_TABLE_NAME': ddb_table.table_name,
                'KINESIS_STREAM_NAME': img_kinesis_stream.stream_name
            },
            timeout=core.Duration.minutes(5))

        ddb_table_rw_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            resources=[ddb_table.table_arn],
            actions=[
                "dynamodb:BatchGetItem", "dynamodb:Describe*",
                "dynamodb:List*", "dynamodb:GetItem", "dynamodb:Query",
                "dynamodb:Scan", "dynamodb:BatchWriteItem",
                "dynamodb:DeleteItem", "dynamodb:PutItem",
                "dynamodb:UpdateItem", "dax:Describe*", "dax:List*",
                "dax:GetItem", "dax:BatchGetItem", "dax:Query", "dax:Scan",
                "dax:BatchWriteItem", "dax:DeleteItem", "dax:PutItem",
                "dax:UpdateItem"
            ])

        trigger_textract_lambda_fn.add_to_role_policy(
            ddb_table_rw_policy_statement)
        trigger_textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[img_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:Get*", "kinesis:List*",
                                        "kinesis:Describe*",
                                        "kinesis:PutRecord",
                                        "kinesis:PutRecords"
                                    ]))

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3_event_filter = s3.NotificationKeyFilter(prefix="bizcard-raw-img/",
                                                   suffix=".jpg")
        s3_event_source = S3EventSource(s3_bucket,
                                        events=[s3.EventType.OBJECT_CREATED],
                                        filters=[s3_event_filter])
        trigger_textract_lambda_fn.add_event_source(s3_event_source)

        #XXX: https://github.com/aws/aws-cdk/issues/2240
        # To avoid to create extra Lambda Functions with names like LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8a
        # if log_retention=aws_logs.RetentionDays.THREE_DAYS is added to the constructor props
        log_group = aws_logs.LogGroup(
            self,
            "TriggerTextractLogGroup",
            log_group_name="/aws/lambda/TriggerTextExtractorFromImage",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(trigger_textract_lambda_fn)

        text_kinesis_stream = kinesis.Stream(
            self, "BizcardTextData", stream_name="octember-bizcard-txt")

        textract_lambda_fn = _lambda.Function(
            self,
            "GetTextFromImage",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="GetTextFromImage",
            handler="get_text_from_s3_image.lambda_handler",
            description="extract text from an image in S3",
            code=_lambda.Code.asset("./src/main/python/GetTextFromS3Image"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'DDB_TABLE_NAME': ddb_table.table_name,
                'KINESIS_STREAM_NAME': text_kinesis_stream.stream_name
            },
            timeout=core.Duration.minutes(5))

        textract_lambda_fn.add_to_role_policy(ddb_table_rw_policy_statement)
        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[text_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:Get*", "kinesis:List*",
                                        "kinesis:Describe*",
                                        "kinesis:PutRecord",
                                        "kinesis:PutRecords"
                                    ]))

        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["textract:*"]))

        img_kinesis_event_source = KinesisEventSource(
            img_kinesis_stream,
            batch_size=100,
            starting_position=_lambda.StartingPosition.LATEST)
        textract_lambda_fn.add_event_source(img_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "GetTextFromImageLogGroup",
            log_group_name="/aws/lambda/GetTextFromImage",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(textract_lambda_fn)

        sg_use_bizcard_es = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard elasticsearch client',
            security_group_name='use-octember-bizcard-es')
        core.Tags.of(sg_use_bizcard_es).add('Name', 'use-octember-bizcard-es')

        sg_bizcard_es = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard elasticsearch',
            security_group_name='octember-bizcard-es')
        core.Tags.of(sg_bizcard_es).add('Name', 'octember-bizcard-es')

        sg_bizcard_es.add_ingress_rule(peer=sg_bizcard_es,
                                       connection=aws_ec2.Port.all_tcp(),
                                       description='octember-bizcard-es')
        sg_bizcard_es.add_ingress_rule(peer=sg_use_bizcard_es,
                                       connection=aws_ec2.Port.all_tcp(),
                                       description='use-octember-bizcard-es')

        sg_ssh_access = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for bastion host',
            security_group_name='octember-bastion-host-sg')
        core.Tags.of(sg_ssh_access).add('Name', 'octember-bastion-host')
        sg_ssh_access.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                                       connection=aws_ec2.Port.tcp(22),
                                       description='ssh access')

        bastion_host = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=aws_ec2.InstanceType('t3.nano'),
            security_group=sg_ssh_access,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))
        bastion_host.instance.add_security_group(sg_use_bizcard_es)

        #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873
        es_cfn_domain = aws_elasticsearch.CfnDomain(
            self,
            'BizcardSearch',
            elasticsearch_cluster_config={
                "dedicatedMasterCount": 3,
                "dedicatedMasterEnabled": True,
                "dedicatedMasterType": "t2.medium.elasticsearch",
                "instanceCount": 2,
                "instanceType": "t2.medium.elasticsearch",
                "zoneAwarenessEnabled": True
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            domain_name="octember-bizcard",
            elasticsearch_version="7.9",
            encryption_at_rest_options={"enabled": False},
            access_policies={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect":
                    "Allow",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action":
                    ["es:Describe*", "es:List*", "es:Get*", "es:ESHttp*"],
                    "Resource":
                    self.format_arn(service="es",
                                    resource="domain",
                                    resource_name="octember-bizcard/*")
                }]
            },
            snapshot_options={"automatedSnapshotStartHour": 17},
            vpc_options={
                "securityGroupIds": [sg_bizcard_es.security_group_id],
                "subnetIds":
                vpc.select_subnets(
                    subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids
            })
        core.Tags.of(es_cfn_domain).add('Name', 'octember-bizcard-es')

        s3_lib_bucket_name = self.node.try_get_context("lib_bucket_name")

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        s3_lib_bucket = s3.Bucket.from_bucket_name(self, id,
                                                   s3_lib_bucket_name)
        es_lib_layer = _lambda.LayerVersion(
            self,
            "ESLib",
            layer_version_name="es-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket,
                                          "var/octember-es-lib.zip"))

        redis_lib_layer = _lambda.LayerVersion(
            self,
            "RedisLib",
            layer_version_name="redis-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket,
                                          "var/octember-redis-lib.zip"))

        #XXX: Deploy lambda in VPC - https://github.com/aws/aws-cdk/issues/1342
        upsert_to_es_lambda_fn = _lambda.Function(
            self,
            "UpsertBizcardToES",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertBizcardToElasticSearch",
            handler="upsert_bizcard_to_es.lambda_handler",
            description="Upsert bizcard text into elasticsearch",
            code=_lambda.Code.asset("./src/main/python/UpsertBizcardToES"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                'ES_INDEX': 'octember_bizcard',
                'ES_TYPE': 'bizcard'
            },
            timeout=core.Duration.minutes(5),
            layers=[es_lib_layer],
            security_groups=[sg_use_bizcard_es],
            vpc=vpc)

        text_kinesis_event_source = KinesisEventSource(
            text_kinesis_stream,
            batch_size=99,
            starting_position=_lambda.StartingPosition.LATEST)
        upsert_to_es_lambda_fn.add_event_source(text_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertBizcardToESLogGroup",
            log_group_name="/aws/lambda/UpsertBizcardToElasticSearch",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_es_lambda_fn)

        firehose_role_policy_doc = aws_iam.PolicyDocument()
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=[
                                        "glue:GetTable",
                                        "glue:GetTableVersion",
                                        "glue:GetTableVersions"
                                    ]))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[text_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:DescribeStream",
                                        "kinesis:GetShardIterator",
                                        "kinesis:GetRecords"
                                    ]))

        firehose_log_group_name = "/aws/kinesisfirehose/octember-bizcard-txt-to-s3"
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: The ARN will be formatted as follows:
                # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
                resources=[
                    self.format_arn(service="logs",
                                    resource="log-group",
                                    resource_name="{}:log-stream:*".format(
                                        firehose_log_group_name),
                                    sep=":")
                ],
                actions=["logs:PutLogEvents"]))

        firehose_role = aws_iam.Role(
            self,
            "FirehoseDeliveryRole",
            role_name="FirehoseDeliveryRole",
            assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={"firehose_role_policy": firehose_role_policy_doc})

        bizcard_text_to_s3_delivery_stream = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "BizcardTextToS3",
            delivery_stream_name="octember-bizcard-txt-to-s3",
            delivery_stream_type="KinesisStreamAsSource",
            kinesis_stream_source_configuration={
                "kinesisStreamArn": text_kinesis_stream.stream_arn,
                "roleArn": firehose_role.role_arn
            },
            extended_s3_destination_configuration={
                "bucketArn": s3_bucket.bucket_arn,
                "bufferingHints": {
                    "intervalInSeconds": 60,
                    "sizeInMBs": 1
                },
                "cloudWatchLoggingOptions": {
                    "enabled": True,
                    "logGroupName": firehose_log_group_name,
                    "logStreamName": "S3Delivery"
                },
                "compressionFormat": "GZIP",
                "prefix": "bizcard-text/",
                "roleArn": firehose_role.role_arn
            })

        sg_use_bizcard_es_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchCacheClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard search query cache client',
            security_group_name='use-octember-bizcard-es-cache')
        core.Tags.of(sg_use_bizcard_es_cache).add(
            'Name', 'use-octember-bizcard-es-cache')

        sg_bizcard_es_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchCacheSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard search query cache',
            security_group_name='octember-bizcard-es-cache')
        core.Tags.of(sg_bizcard_es_cache).add('Name',
                                              'octember-bizcard-es-cache')

        sg_bizcard_es_cache.add_ingress_rule(
            peer=sg_use_bizcard_es_cache,
            connection=aws_ec2.Port.tcp(6379),
            description='use-octember-bizcard-es-cache')

        es_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            "QueryCacheSubnetGroup",
            description="subnet group for octember-bizcard-es-cache",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='octember-bizcard-es-cache')

        es_query_cache = aws_elasticache.CfnCacheCluster(
            self,
            "BizcardSearchQueryCache",
            cache_node_type="cache.t3.small",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.5",
            auto_minor_version_upgrade=False,
            cluster_name="octember-bizcard-es-cache",
            snapshot_retention_limit=3,
            snapshot_window="17:00-19:00",
            preferred_maintenance_window="mon:19:00-mon:20:30",
            #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098
            #cache_subnet_group_name=es_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
            cache_subnet_group_name='octember-bizcard-es-cache',
            vpc_security_group_ids=[sg_bizcard_es_cache.security_group_id])

        #XXX: If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster.
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-cache-cluster.html#cfn-elasticache-cachecluster-cachesubnetgroupname
        es_query_cache.add_depends_on(es_query_cache_subnet_group)

        #XXX: add more than 2 security groups
        # https://github.com/aws/aws-cdk/blob/ea10f0d141a48819ec0000cd7905feda993870a9/packages/%40aws-cdk/aws-lambda/lib/function.ts#L387
        # https://github.com/aws/aws-cdk/issues/1555
        # https://github.com/aws/aws-cdk/pull/5049
        bizcard_search_lambda_fn = _lambda.Function(
            self,
            "BizcardSearchServer",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="BizcardSearchProxy",
            handler="es_search_bizcard.lambda_handler",
            description="Proxy server to search bizcard text",
            code=_lambda.Code.asset("./src/main/python/SearchBizcard"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                'ES_INDEX': 'octember_bizcard',
                'ES_TYPE': 'bizcard',
                'ELASTICACHE_HOST': es_query_cache.attr_redis_endpoint_address
            },
            timeout=core.Duration.minutes(1),
            layers=[es_lib_layer, redis_lib_layer],
            security_groups=[sg_use_bizcard_es, sg_use_bizcard_es_cache],
            vpc=vpc)

        #XXX: create API Gateway + LambdaProxy
        search_api = apigw.LambdaRestApi(
            self,
            "BizcardSearchAPI",
            handler=bizcard_search_lambda_fn,
            proxy=False,
            rest_api_name="BizcardSearch",
            description="This service serves searching bizcard text.",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        bizcard_search = search_api.root.add_resource('search')
        bizcard_search.add_method(
            "GET",
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ])

        sg_use_bizcard_graph_db = aws_ec2.SecurityGroup(
            self,
            "BizcardGraphDbClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard graph db client',
            security_group_name='use-octember-bizcard-neptune')
        core.Tags.of(sg_use_bizcard_graph_db).add(
            'Name', 'use-octember-bizcard-neptune')

        sg_bizcard_graph_db = aws_ec2.SecurityGroup(
            self,
            "BizcardGraphDbSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard graph db',
            security_group_name='octember-bizcard-neptune')
        core.Tags.of(sg_bizcard_graph_db).add('Name',
                                              'octember-bizcard-neptune')

        sg_bizcard_graph_db.add_ingress_rule(
            peer=sg_bizcard_graph_db,
            connection=aws_ec2.Port.tcp(8182),
            description='octember-bizcard-neptune')
        sg_bizcard_graph_db.add_ingress_rule(
            peer=sg_use_bizcard_graph_db,
            connection=aws_ec2.Port.tcp(8182),
            description='use-octember-bizcard-neptune')

        bizcard_graph_db_subnet_group = aws_neptune.CfnDBSubnetGroup(
            self,
            "NeptuneSubnetGroup",
            db_subnet_group_description=
            "subnet group for octember-bizcard-neptune",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            db_subnet_group_name='octember-bizcard-neptune')

        bizcard_graph_db = aws_neptune.CfnDBCluster(
            self,
            "BizcardGraphDB",
            availability_zones=vpc.availability_zones,
            db_subnet_group_name=bizcard_graph_db_subnet_group.
            db_subnet_group_name,
            db_cluster_identifier="octember-bizcard",
            backup_retention_period=1,
            preferred_backup_window="08:45-09:15",
            preferred_maintenance_window="sun:18:00-sun:18:30",
            vpc_security_group_ids=[sg_bizcard_graph_db.security_group_id])
        bizcard_graph_db.add_depends_on(bizcard_graph_db_subnet_group)

        bizcard_graph_db_instance = aws_neptune.CfnDBInstance(
            self,
            "BizcardGraphDBInstance",
            db_instance_class="db.r5.large",
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            availability_zone=vpc.availability_zones[0],
            db_cluster_identifier=bizcard_graph_db.db_cluster_identifier,
            db_instance_identifier="octember-bizcard",
            preferred_maintenance_window="sun:18:00-sun:18:30")
        bizcard_graph_db_instance.add_depends_on(bizcard_graph_db)

        bizcard_graph_db_replica_instance = aws_neptune.CfnDBInstance(
            self,
            "BizcardGraphDBReplicaInstance",
            db_instance_class="db.r5.large",
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            availability_zone=vpc.availability_zones[-1],
            db_cluster_identifier=bizcard_graph_db.db_cluster_identifier,
            db_instance_identifier="octember-bizcard-replica",
            preferred_maintenance_window="sun:18:00-sun:18:30")
        bizcard_graph_db_replica_instance.add_depends_on(bizcard_graph_db)
        bizcard_graph_db_replica_instance.add_depends_on(
            bizcard_graph_db_instance)

        gremlinpython_lib_layer = _lambda.LayerVersion(
            self,
            "GremlinPythonLib",
            layer_version_name="gremlinpython-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(
                s3_lib_bucket, "var/octember-gremlinpython-lib.zip"))

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        upsert_to_neptune_lambda_fn = _lambda.Function(
            self,
            "UpsertBizcardToGraphDB",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertBizcardToNeptune",
            handler="upsert_bizcard_to_graph_db.lambda_handler",
            description="Upsert bizcard into neptune",
            code=_lambda.Code.asset(
                "./src/main/python/UpsertBizcardToGraphDB"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_endpoint,
                'NEPTUNE_PORT': bizcard_graph_db.attr_port
            },
            timeout=core.Duration.minutes(5),
            layers=[gremlinpython_lib_layer],
            security_groups=[sg_use_bizcard_graph_db],
            vpc=vpc)

        upsert_to_neptune_lambda_fn.add_event_source(text_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertBizcardToGraphDBLogGroup",
            log_group_name="/aws/lambda/UpsertBizcardToNeptune",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_neptune_lambda_fn)

        sg_use_bizcard_neptune_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardNeptuneCacheClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard recommendation query cache client',
            security_group_name='use-octember-bizcard-neptune-cache')
        core.Tags.of(sg_use_bizcard_neptune_cache).add(
            'Name', 'use-octember-bizcard-es-cache')

        sg_bizcard_neptune_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardNeptuneCacheSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard recommendation query cache',
            security_group_name='octember-bizcard-neptune-cache')
        core.Tags.of(sg_bizcard_neptune_cache).add(
            'Name', 'octember-bizcard-neptune-cache')

        sg_bizcard_neptune_cache.add_ingress_rule(
            peer=sg_use_bizcard_neptune_cache,
            connection=aws_ec2.Port.tcp(6379),
            description='use-octember-bizcard-neptune-cache')

        recomm_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            "RecommQueryCacheSubnetGroup",
            description="subnet group for octember-bizcard-neptune-cache",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='octember-bizcard-neptune-cache')

        recomm_query_cache = aws_elasticache.CfnCacheCluster(
            self,
            "BizcardRecommQueryCache",
            cache_node_type="cache.t3.small",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.5",
            auto_minor_version_upgrade=False,
            cluster_name="octember-bizcard-neptune-cache",
            snapshot_retention_limit=3,
            snapshot_window="17:00-19:00",
            preferred_maintenance_window="mon:19:00-mon:20:30",
            #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098
            #cache_subnet_group_name=recomm_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
            cache_subnet_group_name='octember-bizcard-neptune-cache',
            vpc_security_group_ids=[
                sg_bizcard_neptune_cache.security_group_id
            ])

        recomm_query_cache.add_depends_on(recomm_query_cache_subnet_group)

        bizcard_recomm_lambda_fn = _lambda.Function(
            self,
            "BizcardRecommender",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="BizcardRecommender",
            handler="neptune_recommend_bizcard.lambda_handler",
            description="This service serves PYMK(People You May Know).",
            code=_lambda.Code.asset("./src/main/python/RecommendBizcard"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_read_endpoint,
                'NEPTUNE_PORT': bizcard_graph_db.attr_port,
                'ELASTICACHE_HOST':
                recomm_query_cache.attr_redis_endpoint_address
            },
            timeout=core.Duration.minutes(1),
            layers=[gremlinpython_lib_layer, redis_lib_layer],
            security_groups=[
                sg_use_bizcard_graph_db, sg_use_bizcard_neptune_cache
            ],
            vpc=vpc)

        #XXX: create API Gateway + LambdaProxy
        recomm_api = apigw.LambdaRestApi(
            self,
            "BizcardRecommendAPI",
            handler=bizcard_recomm_lambda_fn,
            proxy=False,
            rest_api_name="BizcardRecommend",
            description="This service serves PYMK(People You May Know).",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        bizcard_recomm = recomm_api.root.add_resource('pymk')
        bizcard_recomm.add_method(
            "GET",
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ])

        sagemaker_notebook_role_policy_doc = aws_iam.PolicyDocument()
        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "arn:aws:s3:::aws-neptune-notebook",
                        "arn:aws:s3:::aws-neptune-notebook/*"
                    ],
                    "actions": ["s3:GetObject", "s3:ListBucket"]
                }))

        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "arn:aws:neptune-db:{region}:{account}:{cluster_id}/*".
                        format(region=core.Aws.REGION,
                               account=core.Aws.ACCOUNT_ID,
                               cluster_id=bizcard_graph_db.
                               attr_cluster_resource_id)
                    ],
                    "actions": ["neptune-db:connect"]
                }))

        sagemaker_notebook_role = aws_iam.Role(
            self,
            'SageMakerNotebookForNeptuneWorkbenchRole',
            role_name='AWSNeptuneNotebookRole-OctemberBizcard',
            assumed_by=aws_iam.ServicePrincipal('sagemaker.amazonaws.com'),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={
                'AWSNeptuneNotebook': sagemaker_notebook_role_policy_doc
            })

        neptune_wb_lifecycle_content = '''#!/bin/bash
sudo -u ec2-user -i <<'EOF'
echo "export GRAPH_NOTEBOOK_AUTH_MODE=DEFAULT" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_HOST={NeptuneClusterEndpoint}" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_PORT={NeptuneClusterPort}" >> ~/.bashrc
echo "export NEPTUNE_LOAD_FROM_S3_ROLE_ARN=''" >> ~/.bashrc
echo "export AWS_REGION={AWS_Region}" >> ~/.bashrc
aws s3 cp s3://aws-neptune-notebook/graph_notebook.tar.gz /tmp/graph_notebook.tar.gz
rm -rf /tmp/graph_notebook
tar -zxvf /tmp/graph_notebook.tar.gz -C /tmp
/tmp/graph_notebook/install.sh
EOF
'''.format(NeptuneClusterEndpoint=bizcard_graph_db.attr_endpoint,
           NeptuneClusterPort=bizcard_graph_db.attr_port,
           AWS_Region=core.Aws.REGION)

        neptune_wb_lifecycle_config_prop = aws_sagemaker.CfnNotebookInstanceLifecycleConfig.NotebookInstanceLifecycleHookProperty(
            content=core.Fn.base64(neptune_wb_lifecycle_content))

        neptune_wb_lifecycle_config = aws_sagemaker.CfnNotebookInstanceLifecycleConfig(
            self,
            'NpetuneWorkbenchLifeCycleConfig',
            notebook_instance_lifecycle_config_name=
            'AWSNeptuneWorkbenchOctemberBizcardLCConfig',
            on_start=[neptune_wb_lifecycle_config_prop])

        neptune_workbench = aws_sagemaker.CfnNotebookInstance(
            self,
            'NeptuneWorkbench',
            instance_type='ml.t2.medium',
            role_arn=sagemaker_notebook_role.role_arn,
            lifecycle_config_name=neptune_wb_lifecycle_config.
            notebook_instance_lifecycle_config_name,
            notebook_instance_name='OctemberBizcard-NeptuneWorkbench',
            root_access='Disabled',
            security_group_ids=[sg_use_bizcard_graph_db.security_group_name],
            subnet_id=bizcard_graph_db_subnet_group.subnet_ids[0])
Пример #15
0
    def __init__(self, scope: core.Construct, id: str, stack_log_level: str,
                 back_end_api_name: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Serverless Event Processor using Lambda):
        # Read Lambda Code):
        try:
            with open(
                    "api_with_stage_variables/stacks/back_end/lambda_src/serverless_greeter.py",
                    mode="r") as f:
                backend_greeter_fn_code = f.read()
        except OSError as e:
            print("Unable to read Lambda Function Code")
            raise e

        backend_greeter_fn = _lambda.Function(
            self,
            "backendGreeterFn",
            function_name=f"greeter_fn_{id}",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(backend_greeter_fn_code),
            timeout=core.Duration.seconds(15),
            reserved_concurrent_executions=20,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "Environment": "Production",
                "ANDON_CORD_PULLED": "False",
                "RANDOM_SLEEP_ENABLED": "False"
            },
            description=
            "A simple greeter function, which responds with a timestamp")
        backend_greeter_fn_version = backend_greeter_fn.latest_version
        backend_greeter_fn_version_alias = _lambda.Alias(
            self,
            "greeterFnAlias",
            alias_name="MystiqueAutomation",
            version=backend_greeter_fn_version)

        # Create Custom Loggroup
        # /aws/lambda/function-name
        backend_greeter_fn_lg = _logs.LogGroup(
            self,
            "greeterFnLoggroup",
            log_group_name=f"/aws/lambda/{backend_greeter_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add API GW front end for the Lambda
        anti_pattern_api_stage_options = _apigw.StageOptions(
            stage_name="prod",
            throttling_rate_limit=10,
            throttling_burst_limit=100,
            logging_level=_apigw.MethodLoggingLevel.INFO)

        # Create API Gateway
        anti_pattern_api = _apigw.RestApi(
            self,
            "backEnd01Api",
            rest_api_name=f"{back_end_api_name}",
            deploy_options=anti_pattern_api_stage_options,
            endpoint_types=[_apigw.EndpointType.EDGE],
            description=
            f"{GlobalArgs.OWNER}: API Best Practices. This stack deploys an API and integrates with Lambda $LATEST alias, which is the default"
        )

        anti_pattern_api_res = anti_pattern_api.root.add_resource(
            "anti-pattern-api")
        greeter = anti_pattern_api_res.add_resource("greeter")

        greeter_method_get = greeter.add_method(
            http_method="GET",
            request_parameters={
                "method.request.header.InvocationType": True,
                "method.request.path.skon": True
            },
            integration=_apigw.LambdaIntegration(handler=backend_greeter_fn,
                                                 proxy=True))

        # Outputs
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            "AntiPatternApiUrl",
            value=f"{greeter.url}",
            description="Use a browser to access this url.")
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        stack_log_level: str,
        back_end_api_name: str,
        back_end_api_datastore_name: str,
        **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # DynamoDB: Key-Value Database):
        if not back_end_api_datastore_name:
            back_end_api_datastore_name = f"{GlobalArgs.REPO_NAME}-api-datastore"

        self.ddb_table_01 = _dynamodb.Table(
            self,
            "apiPerformanceWithCaching",
            partition_key=_dynamodb.Attribute(
                name="id",
                type=_dynamodb.AttributeType.STRING
            ),
            read_capacity=20,
            write_capacity=20,
            table_name=f"{back_end_api_datastore_name}-{id}",
            removal_policy=core.RemovalPolicy.DESTROY
        )

        # Let us use our Cfn Custom Resource to load data into our dynamodb table.
        data_loader_status = DdbDataLoaderStack(
            self,
            "cachedApiDdbLoader",
            Ddb_table_name=self.ddb_table_01.table_name
        )

        # Read Lambda Code):
        try:
            with open("api_performance_with_caching/stacks/back_end/lambda_src/serverless_greeter.py", mode="r") as f:
                greeter_fn_code = f.read()
        except OSError as e:
            print("Unable to read Lambda Function Code")
            raise e

        greeter_fn = _lambda.Function(
            self,
            "greeterFn",
            function_name=f"greeter_fn_{id}",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(greeter_fn_code),
            timeout=core.Duration.seconds(10),
            reserved_concurrent_executions=50,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "Environment": "Production",
                "DDB_TABLE_NAME": self.ddb_table_01.table_name,
                "RANDOM_SLEEP_SECS": "2",
                "ANDON_CORD_PULLED": "False"
            },
            description="Creates a simple greeter function"
        )
        greeter_fn_version = greeter_fn.latest_version
        greeter_fn_version_alias = _lambda.Alias(
            self,
            "greeterFnAlias",
            alias_name="MystiqueAutomation",
            version=greeter_fn_version
        )

        # Create Custom Loggroup
        greeter_fn_lg = _logs.LogGroup(
            self,
            "squareFnLoggroup",
            log_group_name=f"/aws/lambda/{greeter_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY
        )

        # Add DDB Read Write Permission to the Lambda
        self.ddb_table_01.grant_read_write_data(greeter_fn)

        # Add API GW front end for the Lambda
        back_end_api_stage_01_options = _apigw.StageOptions(
            stage_name="miztiik",
            cache_cluster_enabled=True,
            caching_enabled=True,
            cache_cluster_size="0.5",
            cache_ttl=core.Duration.seconds(30),
            # Log full requests/responses data
            data_trace_enabled=True,
            # Enable Detailed CloudWatch Metrics
            metrics_enabled=True,
            logging_level=_apigw.MethodLoggingLevel.INFO,
            method_options={
                "/cached/movie/GET": _apigw.MethodDeploymentOptions(
                    caching_enabled=False
                )
            }
        )

        # Create API Gateway
        cached_api = _apigw.RestApi(
            self,
            "backEnd01Api",
            rest_api_name=f"{back_end_api_name}",
            deploy_options=back_end_api_stage_01_options,
            minimum_compression_size=0,
            endpoint_types=[
                _apigw.EndpointType.EDGE
            ],
            description=f"{GlobalArgs.OWNER}: API Best Practice Demonstration - Cached-vs-UnCached APIs"
        )

        back_end_01_api_res = cached_api.root.add_resource("cached")
        res_movie = back_end_01_api_res.add_resource(
            "movie"
        )

        res_movie_method_get = res_movie.add_method(
            http_method="GET",
            request_parameters={
                "method.request.header.InvocationType": True,
                "method.request.path.number": True
            },
            integration=_apigw.LambdaIntegration(
                handler=greeter_fn,
                proxy=True
            )
        )

        # Add Method for getting Movie by {id}
        res_movie_by_id = res_movie.add_resource("{id}")

        # Because this is NOT a proxy integration, we need to define our response model
        response_model = cached_api.add_model(
            "ResponseModel",
            content_type="application/json",
            model_name="MiztiikResponseModel",
            schema=_apigw.JsonSchema(
                schema=_apigw.JsonSchemaVersion.DRAFT4,
                title="updateResponse",
                type=_apigw.JsonSchemaType.OBJECT,
                properties={
                    "message": _apigw.JsonSchema(type=_apigw.JsonSchemaType.STRING)
                }
            )
        )

        res_movie_by_id_validator_request = cached_api.add_request_validator(
            "apiReqValidator",
            validate_request_parameters=True
        )

        req_template = {
            "id": "$input.params('id')"
        }
        request_template_string = json.dumps(
            req_template, separators=(',', ':'))

        # resp_template = """$input.path('$.body.message')"""
        resp_template = """$input.path('$.body')"""

        res_movie_by_id_method_get = res_movie_by_id.add_method(
            http_method="GET",
            request_parameters={
                "method.request.header.InvocationType": False,
                "method.request.path.id": True
            },
            request_validator=res_movie_by_id_validator_request,
            integration=_apigw.LambdaIntegration(
                handler=greeter_fn,
                proxy=False,
                request_parameters={
                    "integration.request.path.id": "method.request.path.id"
                },
                cache_key_parameters=[
                    "method.request.path.id"
                ],
                request_templates={
                    "application/json": request_template_string
                },
                passthrough_behavior=_apigw.PassthroughBehavior.NEVER,
                integration_responses=[
                    _apigw.IntegrationResponse(
                        status_code="200",
                        # selection_pattern="2\d{2}",  # Use for mapping Lambda Errors
                        response_parameters={
                            "method.response.header.Access-Control-Allow-Headers": "'cache-control,Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'",
                            "method.response.header.Content-Type": "'application/json'",
                        },
                        response_templates={
                            "application/json": f"{resp_template}"
                        }
                    )
                ]
            ),
            method_responses=[
                _apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        "method.response.header.Content-Type": True,
                        "method.response.header.Access-Control-Allow-Headers": True,
                    },
                    response_models={
                        "application/json": response_model
                    }
                )
            ]
        )
        self.cached_api_url = res_movie.url

        # Outputs
        output_1 = core.CfnOutput(
            self,
            "CachedApiUrl",
            value=f"{res_movie.url}",
            description="Use an utility like curl/Postman to access this API."
        )
        output_2 = core.CfnOutput(
            self,
            "ddbDataLoaderStatus",
            value=f"{data_loader_status.response}",
            description="Waf Rate Rule Creator Status"
        )
Пример #17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###
        # Let's create our own Event Bus for this rather than using default
        ###
        bus = events.EventBus(self,
                              'DestinedEventBus',
                              event_bus_name='the-destined-lambda')

        ###
        # Destinations need invoked Asynchronously so let's use SNS
        ###
        topic = sns.Topic(self,
                          'theDestinedLambdaTopic',
                          display_name='The Destined Lambda CDK Pattern Topic')

        ###
        # Lambda configured with success and failure destinations
        # Note the actual lambda has no EventBridge code inside it
        ###
        destined_lambda = _lambda.Function(
            self,
            "destinedLambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="destinedLambda.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
            retry_attempts=0,
            on_success=destinations.EventBridgeDestination(event_bus=bus),
            on_failure=destinations.EventBridgeDestination(event_bus=bus))
        topic.add_subscription(
            subscriptions.LambdaSubscription(destined_lambda))

        ###
        # This is a lambda that will be called by onSuccess for destinedLambda
        # It simply prints the event it receives to the cloudwatch logs
        ###
        success_lambda = _lambda.Function(
            self,
            "successLambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="success.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
            timeout=core.Duration.seconds(3))
        ###
        # EventBridge Rule to send events to our success lambda
        # Notice how we can still do event filtering based on the json payload returned by the destined lambda
        ###
        success_rule = events.Rule(
            self,
            'successRule',
            event_bus=bus,
            description=
            'all success events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                detail={
                    "requestContext": {
                        "condition": ["Success"]
                    },
                    "responsePayload": {
                        "source": ["cdkpatterns.the-destined-lambda"],
                        "action": ["message"]
                    }
                }))
        success_rule.add_target(targets.LambdaFunction(success_lambda))

        ###
        # This is a lambda that will be called by onFailure for destinedLambda
        # It simply prints the event it receives to the cloudwatch logs.
        # Notice how it includes the message that came into destined lambda to make it fail so you have
        # everything you need to do retries or manually investigate
        ###
        failure_lambda = _lambda.Function(
            self,
            "failureLambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="failure.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
            timeout=core.Duration.seconds(3))

        ###
        # EventBridge Rule to send events to our failure lambda
        ###
        failure_rule = events.Rule(
            self,
            'failureRule',
            event_bus=bus,
            description=
            'all failure events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                detail={"responsePayload": {
                    "errorType": ["Error"]
                }}))
        failure_rule.add_target(targets.LambdaFunction(failure_lambda))

        ###
        # API Gateway Creation
        # This is complicated because it transforms the incoming json payload into a query string url
        # this url is used to post the payload to sns without a lambda inbetween
        ###

        gateway = api_gw.RestApi(
            self,
            'theDestinedLambdaAPI',
            deploy_options=api_gw.StageOptions(
                metrics_enabled=True,
                logging_level=api_gw.MethodLoggingLevel.INFO,
                data_trace_enabled=True,
                stage_name='prod'))
        # Give our gateway permissions to interact with SNS
        api_gw_sns_role = iam.Role(
            self,
            'ApiGatewaySNSRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'))
        topic.grant_publish(api_gw_sns_role)

        # shortening the lines of later code
        schema = api_gw.JsonSchema
        schema_type = api_gw.JsonSchemaType

        # Because this isn't a proxy integration, we need to define our response model
        response_model = gateway.add_model(
            'ResponseModel',
            content_type='application/json',
            model_name='ResponseModel',
            schema=schema(
                schema=api_gw.JsonSchemaVersion.DRAFT4,
                title='pollResponse',
                type=schema_type.OBJECT,
                properties={'message': schema(type=schema_type.STRING)}))

        error_response_model = gateway.add_model(
            'ErrorResponseModel',
            content_type='application/json',
            model_name='ErrorResponseModel',
            schema=schema(schema=api_gw.JsonSchemaVersion.DRAFT4,
                          title='errorResponse',
                          type=schema_type.OBJECT,
                          properties={
                              'state': schema(type=schema_type.STRING),
                              'message': schema(type=schema_type.STRING)
                          }))

        request_template = "Action=Publish&" + \
                           "TargetArn=$util.urlEncode('" + topic.topic_arn + "')&" + \
                           "Message=please $input.params().querystring.get('mode')&" + \
                           "Version=2010-03-31"

        # This is the VTL to transform the error response
        error_template = {
            "state": 'error',
            "message": "$util.escapeJavaScript($input.path('$.errorMessage'))"
        }
        error_template_string = json.dumps(error_template,
                                           separators=(',', ':'))

        # This is how our gateway chooses what response to send based on selection_pattern
        integration_options = api_gw.IntegrationOptions(
            credentials_role=api_gw_sns_role,
            request_parameters={
                'integration.request.header.Content-Type':
                "'application/x-www-form-urlencoded'"
            },
            request_templates={"application/json": request_template},
            passthrough_behavior=api_gw.PassthroughBehavior.NEVER,
            integration_responses=[
                api_gw.IntegrationResponse(
                    status_code='200',
                    response_templates={
                        "application/json":
                        json.dumps({"message": 'Message added to SNS topic'})
                    }),
                api_gw.IntegrationResponse(
                    selection_pattern="^\[Error\].*",
                    status_code='400',
                    response_templates={
                        "application/json": error_template_string
                    },
                    response_parameters={
                        'method.response.header.Content-Type':
                        "'application/json'",
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'",
                        'method.response.header.Access-Control-Allow-Credentials':
                        "'true'"
                    })
            ])

        # Add an SendEvent endpoint onto the gateway
        gateway.root.add_resource('SendEvent') \
            .add_method('GET', api_gw.Integration(type=api_gw.IntegrationType.AWS,
                                                  integration_http_method='POST',
                                                  uri='arn:aws:apigateway:us-east-1:sns:path//',
                                                  options=integration_options
                                                  ),
                        method_responses=[
                            api_gw.MethodResponse(status_code='200',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': response_model
                                                  }),
                            api_gw.MethodResponse(status_code='400',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': error_response_model
                                                  }),
                        ]
                        )
    def __init__(self, scope: core.Construct, id: str, wiki_api_endpoint,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create API Gateway):
        api_01 = _apigw.RestApi(self,
                                'apiEndpoint',
                                rest_api_name='mystique-wiki-api',
                                deploy_options=_apigw.StageOptions(
                                    stage_name="myst",
                                    data_trace_enabled=True,
                                    tracing_enabled=True))
        v1 = api_01.root.add_resource("v1")

        # Add resource for HTTP Endpoint: API Hosted on EC2
        self.wiki_url_path_00 = v1.add_resource('wiki_url')
        wiki_url_path_01 = self.wiki_url_path_00.add_resource('{needle}')

        # Create the API Gateway Integration Responses
        list_objects_responses = [
            _apigw.IntegrationResponse(
                status_code="200",
                response_parameters={
                    'method.response.header.Timestamp':
                    'integration.response.header.Date',
                    'method.response.header.Content-Length':
                    'integration.response.header.Content-Length',
                    'method.response.header.Content-Type':
                    'integration.response.header.Content-Type'
                })
        ]

        # Create the API Gateway Integration Request Path mapping
        wiki_url_integration_options = _apigw.IntegrationOptions(
            integration_responses=list_objects_responses,
            request_parameters={
                "integration.request.path.needle": "method.request.path.needle"
            })

        wiki_url_integration = _apigw.HttpIntegration(
            url=f'http://{wiki_api_endpoint}/api/{{needle}}',
            http_method='GET',
            options=wiki_url_integration_options,
            proxy=False,
        )

        wiki_url_method = wiki_url_path_01.add_method(
            "GET",
            wiki_url_integration,
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.needle': True
            },
            method_responses=[
                _apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': _apigw.EmptyModel()})
            ])
        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            "GetWikiUrl",
            value=f"{self.wiki_url_path_00.url}/search term",
            description=f"Get Wiki Url for given topic using API Gateway")
        """
Пример #19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # get VPC
        vpc = ec2.Vpc.from_lookup(self, "Vpc", is_default=True)

        vpc.add_gateway_endpoint(
            id="DynamoDBEndpoint",
            service=ec2.GatewayVpcEndpointAwsService.DYNAMODB)

        # security group
        security_group = ec2.SecurityGroup(self,
                                           "SecurityGroup",
                                           vpc=vpc,
                                           security_group_name="project-sg")

        # define role for the API
        lambda_role = iam.Role(
            self,
            "LambdaHandlerRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"))
        lambda_role.add_to_policy(
            iam.PolicyStatement(actions=["dynamodb:*"], resources=["*"]))
        lambda_role.add_to_policy(
            iam.PolicyStatement(actions=[
                "logs:CreateLogGroup", "logs:CreateLogStream",
                "logs:PutLogEvents", "ec2:CreateNetworkInterface",
                "ec2:DescribeNetworkInterfaces", "ec2:DeleteNetworkInterface",
                "ec2:AssignPrivateIpAddresses",
                "ec2:UnassignPrivateIpAddresses"
            ],
                                resources=["*"]))

        # define role for the Authorizer
        authorizer_role = iam.Role(
            self,
            "ProjectAuthRole",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"),
            role_name="ProjectAuthorizerRole")

        # define role for the Authorizer lambda function
        authorizer_handler_role = iam.Role(
            self,
            "ProjectAuthHandlerRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            role_name="ProjectAuthorizerHandlerRole")

        authorizer_handler_role.add_to_policy(
            iam.PolicyStatement(actions=["apigateway:GET"], resources=["*"]))

        authorizer_handler_role.add_to_policy(
            iam.PolicyStatement(
                actions=["logs:CreateLogStream", "logs:CreateLogGroup"],
                resources=[
                    f"arn:aws:logs:*:{kwargs.get('env').account}:log-group:*"
                ]))

        authorizer_handler_role.add_to_policy(
            iam.PolicyStatement(
                actions=["logs:PutLogEvents"],
                resources=[
                    f"arn:aws:logs:*:{kwargs.get('env').account}:log-group:*:log-stream:*"
                ]))

        # build layer for the API
        # layer = _lambda.LayerVersion(
        #     self, "ProjectLayer",
        #     code=_lambda.Code.from_asset("layer.zip")
        # )

        lambda_handler = _lambda.Function(
            self,
            'ProjectHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('src'),
            handler='routes.get_tables_names',
            # layers=[layer], role=lambda_role,
            role=lambda_role,
            allow_public_subnet=True,
            vpc=vpc,
            retry_attempts=0,
            security_groups=[security_group],
            tracing=_lambda.Tracing.ACTIVE,
            timeout=core.Duration.seconds(60))

        authorizer_handler = _lambda.Function(
            self,
            'ProjectAuthorizerHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('authorizer'),
            handler='auth.handler',
            timeout=core.Duration.seconds(30),
            role=authorizer_handler_role,
            retry_attempts=0)

        # enable authorizer to invoke lambda function
        authorizer_role.add_to_policy(
            iam.PolicyStatement(actions=["lambda:InvokeFunction"],
                                resources=[authorizer_handler.function_arn]))

        # setup authorizer and api
        authorizer = apigw.TokenAuthorizer(
            self,
            "Authorizer",
            identity_source=apigw.IdentitySource.header('Authorization'),
            handler=authorizer_handler,
            assume_role=authorizer_role,
            authorizer_name="ProjectAuthorizer",
            results_cache_ttl=core.Duration.seconds(0))

        api = apigw.RestApi(self,
                            "ProjectRestApi",
                            rest_api_name="ProjectRestApi",
                            deploy_options=apigw.StageOptions(
                                tracing_enabled=True,
                                data_trace_enabled=True,
                                stage_name="v1"))

        dynamodb_resource = api.root.add_resource("dynamodb")

        get_tables_node_integration = apigw.LambdaIntegration(lambda_handler)
        dynamodb_resource.add_method(
            "GET",
            get_tables_node_integration,
            authorizer=authorizer,
            authorization_type=apigw.AuthorizationType.CUSTOM)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # Read Lambda Code):
        try:
            with open(
                    "cloudwatch_embedded_metric/lambda_src/embedded_metric_log_generator.py",
                    mode="r") as f:
                konstone_embedded_metric_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")

        konstone_embedded_metric_fn = _lambda.Function(
            self,
            "konstoneFunction",
            function_name="konstone_embedded_metric_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(konstone_embedded_metric_fn_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={"LOG_LEVEL": "INFO"})

        # Create Custom Loggroup
        # /aws/lambda/function-name
        konstone_lg = _logs.LogGroup(
            self,
            "konstoneLoggroup",
            log_group_name=
            f"/aws/lambda/{konstone_embedded_metric_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add API GW front end for the Lambda
        lambda_embedded_metrics_api_stage_options = _apigw.StageOptions(
            stage_name="myst",
            logging_level=_apigw.MethodLoggingLevel.INFO,
            # data_trace_enabled=True,
            metrics_enabled=True,
            # tracing_enabled=True # Enable if you want AWS Xray Tracing
        )

        # Create API Gateway
        api_01 = _apigw.LambdaRestApi(
            self,
            "lambda-embedded-metrics-api",
            rest_api_name="lambda-embedded-metrics-api",
            deploy_options=lambda_embedded_metrics_api_stage_options,
            handler=konstone_embedded_metric_fn,
            proxy=False)

        user_id = api_01.root.add_resource("user_id")
        add_user_likes = user_id.add_resource("{likes}")
        add_user_likes.add_method("GET")
        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )
        output_1 = core.CfnOutput(
            self,
            "ApiUrl",
            value=f"{add_user_likes.url}",
            description=
            "Use a browser to push the user likes to CW Logs, replace {likes} with your like value"
        )
    def __init__(self, scope: core.Construct, id: str,
                 unicorn_user_pool_secrets_arn, premium_content_api_url,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Serverless Event Processor using Lambda):
        # Read Lambda Code
        try:
            with open("api_consumers/lambda_src/content_consumers.py",
                      mode="r") as f:
                content_consumers_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        content_consumers_fn = _lambda.Function(
            self,
            "contentConsumersFn",
            function_name="content_consumers",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(content_consumers_fn_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "Environment": "Production",
                "USER_POOL_SECRETS_ARN": unicorn_user_pool_secrets_arn,
                "PREMIUM_CONTENT_API_URL": premium_content_api_url,
            })

        roleStmt1 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[unicorn_user_pool_secrets_arn],
            actions=["secretsmanager:GetSecretValue"])
        roleStmt1.sid = "AllowLambdaToReadSecrets"

        content_consumers_fn.add_to_role_policy(roleStmt1)

        # Create Custom Loggroup
        # /aws/lambda/function-name
        content_consumers_fn_lg = _logs.LogGroup(
            self,
            "contentConsumersFnLoggroup",
            log_group_name=f"/aws/lambda/{content_consumers_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add API GW front end for the Lambda
        miztiik_world_api_stage_options = _apigw.StageOptions(
            stage_name="miztiik",
            throttling_rate_limit=10,
            throttling_burst_limit=100,
            logging_level=_apigw.MethodLoggingLevel.INFO)

        # Create API Gateway
        api_01 = _apigw.LambdaRestApi(
            self,
            "miztiikWorld",
            rest_api_name="miztiik-world-api",
            deploy_options=miztiik_world_api_stage_options,
            endpoint_types=[_apigw.EndpointType.REGIONAL],
            handler=content_consumers_fn,
            proxy=False)

        get_content = api_01.root.add_resource("content")

        # GET Unauthorized Request
        get_unauthorized_request = get_content.add_resource(
            "unauthorized-read")
        get_unauthorized_request_method = get_unauthorized_request.add_method(
            "GET")
        # GET Authorized Request
        get_authorized_request = get_content.add_resource("authorized-read")
        get_authorized_request_method = get_authorized_request.add_method(
            "GET")
        # PUT Authorized Request
        post_authorized_request = get_content.add_resource("authorized-write")
        post_authorized_request_method = post_authorized_request.add_method(
            "GET")

        output_1 = core.CfnOutput(
            self,
            "UnauthorizedUrl",
            value=f"{get_unauthorized_request.url}",
            description="Use a browser to access this url")
        output_2 = core.CfnOutput(
            self,
            "AuthorizedReadUrl",
            value=f"{get_authorized_request.url}",
            description="Use a browser to access this url")
        output_2 = core.CfnOutput(
            self,
            "AuthorizedWriteUrl",
            value=f"{post_authorized_request.url}",
            description="Use a browser to access this url")
Пример #22
0
    def __init__(self, scope: core.Construct, id: str, unicorn_user_pool_arn,
                 unicorn_user_pool_res_srv_identifier, unicorn_read_scope,
                 unicorn_write_scope, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Serverless Event Processor using Lambda):
        # Read Lambda Code
        try:
            with open("premium_api/lambda_src/premium_content.py",
                      mode="r") as f:
                premium_content_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        premium_content_fn = _lambda.Function(
            self,
            "premiumContentFunction",
            function_name="premium_function",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(premium_content_fn_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "Environment": "Production"
            })

        # Create Custom Loggroup
        # /aws/lambda/function-name
        premium_content_fn_lg = _logs.LogGroup(
            self,
            "premiumContentFnLoggroup",
            log_group_name=f"/aws/lambda/{premium_content_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add API GW front end for the Lambda
        walled_garden_api_stage_options = _apigw.StageOptions(
            stage_name="prod",
            throttling_rate_limit=10,
            throttling_burst_limit=100,
            logging_level=_apigw.MethodLoggingLevel.INFO)

        # Create API Gateway
        api_01 = _apigw.LambdaRestApi(
            self,
            "walledGardenApi",
            rest_api_name="walled-garden-api",
            deploy_options=walled_garden_api_stage_options,
            endpoint_types=[_apigw.EndpointType.REGIONAL],
            handler=premium_content_fn,
            proxy=False)

        # Add the wall to the garden - API Authorizer
        api_01_authorizer = _apigw.CfnAuthorizer(
            self,
            "walledGardenApiAuthorizer",
            name="walledGardenSentry",
            rest_api_id=api_01.rest_api_id,
            type="COGNITO_USER_POOLS",
            provider_arns=[unicorn_user_pool_arn],
            authorizer_result_ttl_in_seconds=15,
            identity_source="method.request.header.Authorization")

        get_content = api_01.root.add_resource("home")
        # premium_content = get_content.add_resource("{premium}")
        premium_content = get_content.add_resource("premium")
        premium_content_method_get = premium_content.add_method(
            "GET",
            authorization_type=_apigw.AuthorizationType.COGNITO,
            authorization_scopes=[
                f"{unicorn_user_pool_res_srv_identifier}/{unicorn_read_scope}"
            ])

        premium_content_method_get.node.find_child(
            "Resource").add_property_override("AuthorizerId",
                                              api_01_authorizer.ref)
        # Add POST method
        premium_content_method_post = premium_content.add_method(
            "POST",
            authorization_type=_apigw.AuthorizationType.COGNITO,
            authorization_scopes=[
                f"{unicorn_user_pool_res_srv_identifier}/{unicorn_write_scope}"
            ])
        premium_content_method_post.node.find_child(
            "Resource").add_property_override("AuthorizerId",
                                              api_01_authorizer.ref)

        # Export API Endpoint URL
        self.premium_content_api_url = premium_content.url

        # Outputs
        output_1 = core.CfnOutput(
            self,
            "PremiumApiUrl",
            value=f"{premium_content.url}",
            description="Use a browser to access this url")
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # -----------------------------------
        #           Cognito User Pool
        # -----------------------------------
        userpool = cognito.UserPool(
            self,
            "ServerlessTodoUserPool",
            user_pool_name="ServerlessTodoUserPool",
            sign_in_aliases=cognito.SignInAliases(username=True, email=True),
            password_policy=cognito.PasswordPolicy(
                min_length=6,
                require_digits=True,
                require_lowercase=True,
                require_symbols=True,
                require_uppercase=True,
                temp_password_validity=core.Duration.days(7)),
            auto_verify=cognito.AutoVerifiedAttrs(email=True),
            standard_attributes=cognito.StandardAttributes(
                email=cognito.StandardAttribute(mutable=True, required=True),
                family_name=cognito.StandardAttribute(mutable=True,
                                                      required=True),
                given_name=cognito.StandardAttribute(mutable=True,
                                                     required=True)))
        user_pool_client = userpool.add_client(
            "UserPoolClient",
            auth_flows=cognito.AuthFlow(admin_user_password=True))

        # -----------------------------------
        #           dynamodb
        # -----------------------------------
        dynamodbTable = dynamodb.Table(
            self,
            "TaskTable",
            partition_key=dynamodb.Attribute(
                name="id", type=dynamodb.AttributeType.STRING),
            sort_key=dynamodb.Attribute(name="meta",
                                        type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            point_in_time_recovery=True,
            server_side_encryption=True)
        dynamodbTable.add_global_secondary_index(
            partition_key=dynamodb.Attribute(
                name="meta", type=dynamodb.AttributeType.STRING),
            sort_key=dynamodb.Attribute(name="id",
                                        type=dynamodb.AttributeType.STRING),
            index_name="meta-id-index")
        dynamodbTable.add_global_secondary_index(
            partition_key=dynamodb.Attribute(
                name="owner", type=dynamodb.AttributeType.STRING),
            sort_key=dynamodb.Attribute(name="meta",
                                        type=dynamodb.AttributeType.STRING),
            index_name="owner-meta-index")

        # -----------------------------------
        #             apigateway
        # -----------------------------------
        acm_arn = self.node.try_get_context('acm_arn')
        domain_name = self.node.try_get_context("domain_name")
        hosted_zone = self.node.try_get_context("hosted_zone")

        api_policy = iam.PolicyDocument(
            statements=iam.PolicyStatement(actions=["lambda:InvokeFunction"], )
            .add_resources("arn:aws:lambda:{}:{}:function:*".format(
                self.region, self.account)))

        if acm_arn and domain_name and hosted_zone:

            api = apigw.RestApi(
                self,
                'API',
                domain_name=apigw.DomainNameOptions(
                    certificate=acm.Certificate.from_certificate_arn(
                        self, 'ApiCertificate', acm_arn),
                    domain_name=domain_name,
                    endpoint_type=apigw.EndpointType.REGIONAL),
                deploy_options=apigw.StageOptions(metrics_enabled=True),
                policy=api_policy,
                rest_api_name="Serverless TODO API",
                endpoint_types=[apigw.EndpointType.REGIONAL],
                default_cors_preflight_options=apigw.CorsOptions(
                    allow_origins=apigw.Cors.
                    ALL_ORIGINS,  # TODO: Temporary for development
                    allow_headers=[
                        "Content-Type", "X-Amz-Date", "Authorization",
                        "X-Api-Key", "X-Amz-Security-Token", "X-Tracing-Id",
                        "x-jeffy-correlation-id", "x-amzn-trace-id"
                    ],
                    allow_methods=apigw.Cors.ALL_METHODS,
                    allow_credentials=True))
            route53.CfnRecordSet(
                self,
                "apiDomainRecord",
                name=domain_name,
                type="A",
                alias_target={
                    "dnsName":
                    api.domain_name.domain_name_alias_domain_name,
                    "hostedZoneId":
                    api.domain_name.domain_name_alias_hosted_zone_id
                },
                hosted_zone_id=hosted_zone,
            )
        else:
            api = apigw.RestApi(
                self,
                'API',
                deploy_options=apigw.StageOptions(metrics_enabled=True),
                policy=api_policy,
                rest_api_name="Serverless TODO API",
                endpoint_types=[apigw.EndpointType.REGIONAL],
                default_cors_preflight_options=apigw.CorsOptions(
                    allow_origins=apigw.Cors.
                    ALL_ORIGINS,  # TODO: Temporary for development
                    allow_headers=[
                        "Content-Type", "X-Amz-Date", "Authorization",
                        "X-Api-Key", "X-Amz-Security-Token", "X-Tracing-Id",
                        "x-jeffy-correlation-id", "x-amzn-trace-id"
                    ],
                    allow_methods=apigw.Cors.ALL_METHODS,
                    allow_credentials=True))

        cognito_authorizer = apigw.CognitoUserPoolsAuthorizer(
            self,
            "CognitoAuthorizer",
            cognito_user_pools=[userpool],
            authorizer_name='todo_cognito_authorizer',
            identity_source='method.request.header.Authorization',
            results_cache_ttl=core.Duration.minutes(60))

        api_role = iam.Role(self,
                            "ApiRole",
                            assumed_by=iam.ServicePrincipal(
                                service="apigateway.amazonaws.com"))
        api_statement = iam.PolicyStatement(
            actions=["lambda:InvokeFunction"],
            resources=[
                "arn:aws:lambda:{}:{}:function:*".format(
                    self.region, self.account)
            ])
        api_role.add_to_policy(api_statement)

        # -----------------------------------
        #      lambda common configure
        # -----------------------------------
        env = {
            "TABLE_NAME": dynamodbTable.table_name,
            "USER_POOL_ID": userpool.user_pool_id,
            "USER_POOL_NAME": userpool.user_pool_provider_name,
            "CLIENT_ID": user_pool_client.user_pool_client_id
        }

        # -----------------------------------
        #           get handler
        # -----------------------------------
        get_resource_base_name = "getTaskFunction"
        get_task_func = lambda_.Function(
            self,
            get_resource_base_name,
            code=lambda_.Code.from_asset(
                'function/src/task',
                bundling=core.BundlingOptions(
                    image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    command=[
                        'bash', '-c',
                        'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output'
                    ],
                )),
            handler="get.lambda_handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment=env,
            tracing=lambda_.Tracing.ACTIVE,
            timeout=core.Duration.seconds(29),
            memory_size=512)

        get_task_func.add_to_role_policy(statement=iam.PolicyStatement(
            actions=['dynamodb:*'],
            resources=[
                dynamodbTable.table_arn, dynamodbTable.table_arn + '/*'
            ]))
        logs.LogGroup(self,
                      get_resource_base_name + 'LogGroup',
                      log_group_name='/aws/lambda/' +
                      get_task_func.function_name,
                      retention=logs.RetentionDays.TWO_WEEKS)

        task_path = api.root.add_resource("task")
        task_id_path = task_path.add_resource("{task_id}")
        get_task_integration = apigw.LambdaIntegration(
            get_task_func, credentials_role=api_role)
        task_id_path.add_method(
            "GET",
            integration=get_task_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            authorizer=cognito_authorizer,
        )

        # -----------------------------------
        #           create handler
        # -----------------------------------
        create_resource_base_name = "createTaskFunction"
        create_task_func = lambda_.Function(
            self,
            create_resource_base_name,
            code=lambda_.Code.from_asset(
                'function/src/task',
                bundling=core.BundlingOptions(
                    image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    command=[
                        'bash', '-c',
                        'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output'
                    ],
                )),
            handler="create.lambda_handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment=env,
            tracing=lambda_.Tracing.ACTIVE,
            timeout=core.Duration.seconds(29),
            memory_size=512)

        create_task_func.add_to_role_policy(statement=iam.PolicyStatement(
            actions=['dynamodb:*'],
            resources=[
                dynamodbTable.table_arn, dynamodbTable.table_arn + '/*'
            ]))
        logs.LogGroup(self,
                      create_resource_base_name + 'LogGroup',
                      log_group_name='/aws/lambda/' +
                      create_task_func.function_name,
                      retention=logs.RetentionDays.TWO_WEEKS)

        create_task_integration = apigw.LambdaIntegration(
            create_task_func, credentials_role=api_role)
        task_path.add_method(
            "POST",
            integration=create_task_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            authorizer=cognito_authorizer,
        )

        # -----------------------------------
        #           update handler
        # -----------------------------------
        update_resource_base_name = "updateTaskFunction"
        update_task_func = lambda_.Function(
            self,
            update_resource_base_name,
            code=lambda_.Code.from_asset(
                'function/src/task',
                bundling=core.BundlingOptions(
                    image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    command=[
                        'bash', '-c',
                        'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output'
                    ],
                )),
            handler="update.lambda_handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment=env,
            tracing=lambda_.Tracing.ACTIVE,
            timeout=core.Duration.seconds(29),
            memory_size=512)

        update_task_func.add_to_role_policy(statement=iam.PolicyStatement(
            actions=['dynamodb:*'],
            resources=[
                dynamodbTable.table_arn, dynamodbTable.table_arn + '/*'
            ]))
        logs.LogGroup(self,
                      update_resource_base_name + 'LogGroup',
                      log_group_name='/aws/lambda/' +
                      update_task_func.function_name,
                      retention=logs.RetentionDays.TWO_WEEKS)

        update_task_integration = apigw.LambdaIntegration(
            update_task_func, credentials_role=api_role)
        task_id_path.add_method(
            "POST",
            integration=update_task_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            authorizer=cognito_authorizer,
        )

        # -----------------------------------
        #           delete handler
        # -----------------------------------
        delete_resource_base_name = "deleteTaskFunction"
        delete_task_func = lambda_.Function(
            self,
            delete_resource_base_name,
            code=lambda_.Code.from_asset(
                'function/src/task',
                bundling=core.BundlingOptions(
                    image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    command=[
                        'bash', '-c',
                        'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output'
                    ],
                )),
            handler="delete.lambda_handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment=env,
            tracing=lambda_.Tracing.ACTIVE,
            timeout=core.Duration.seconds(29),
            memory_size=512)

        delete_task_func.add_to_role_policy(statement=iam.PolicyStatement(
            actions=['dynamodb:*'],
            resources=[
                dynamodbTable.table_arn, dynamodbTable.table_arn + '/*'
            ]))
        logs.LogGroup(self,
                      delete_resource_base_name + 'LogGroup',
                      log_group_name='/aws/lambda/' +
                      delete_task_func.function_name,
                      retention=logs.RetentionDays.TWO_WEEKS)

        delete_task_integration = apigw.LambdaIntegration(
            delete_task_func, credentials_role=api_role)
        task_id_path.add_method(
            "DELETE",
            integration=delete_task_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            authorizer=cognito_authorizer,
        )

        # -----------------------------------
        #           search handler
        # -----------------------------------
        search_resource_base_name = "searchTaskFunction"
        search_task_func = lambda_.Function(
            self,
            search_resource_base_name,
            code=lambda_.Code.from_asset(
                'function/src/task',
                bundling=core.BundlingOptions(
                    image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    command=[
                        'bash', '-c',
                        'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output'
                    ],
                )),
            handler="search.lambda_handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment=env,
            tracing=lambda_.Tracing.ACTIVE,
            timeout=core.Duration.seconds(29),
            memory_size=512)

        search_task_func.add_to_role_policy(statement=iam.PolicyStatement(
            actions=['dynamodb:*'],
            resources=[
                dynamodbTable.table_arn, dynamodbTable.table_arn + '/*'
            ]))
        logs.LogGroup(self,
                      search_resource_base_name + 'LogGroup',
                      log_group_name='/aws/lambda/' +
                      search_task_func.function_name,
                      retention=logs.RetentionDays.TWO_WEEKS)

        search_task_integration = apigw.LambdaIntegration(
            search_task_func, credentials_role=api_role)
        tasks_path = api.root.add_resource("tasks")
        tasks_path.add_method(
            "GET",
            integration=search_task_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            authorizer=cognito_authorizer,
        )

        # -----------------------------------
        #           login handler
        # -----------------------------------
        login_resource_base_name = "loginFunction"
        login_task_func = lambda_.Function(
            self,
            login_resource_base_name,
            code=lambda_.Code.from_asset(
                'function/src/user',
                bundling=core.BundlingOptions(
                    image=lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    command=[
                        'bash', '-c',
                        'pip install -r requirements.txt -t /asset-output && cp -a . /asset-output'
                    ],
                )),
            handler="login.lambda_handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment=env,
            tracing=lambda_.Tracing.ACTIVE,
            timeout=core.Duration.seconds(29),
            memory_size=512)

        login_task_func.add_to_role_policy(statement=iam.PolicyStatement(
            actions=['cognito-idp:AdminInitiateAuth'],
            resources=[userpool.user_pool_arn]))
        logs.LogGroup(self,
                      login_resource_base_name + 'LogGroup',
                      log_group_name='/aws/lambda/' +
                      login_task_func.function_name,
                      retention=logs.RetentionDays.TWO_WEEKS)

        login_task_integration = apigw.LambdaIntegration(login_task_func)
        auth_path = api.root.add_resource("auth")
        auth_login_path = auth_path.add_resource("login")
        auth_login_path.add_method("POST", integration=login_task_integration)
Пример #24
0
    def __init__(self, scope: core.Construct, id: str, webhook_function,
                 on_demand_function, ingest_allowed_ips):
        super().__init__(scope, id)

        stack_name = core.Stack.of(self).stack_name

        policy = iam.PolicyDocument(statements=[
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["execute-api:Invoke"],
                principals=[iam.AnyPrincipal()],
                # note that the policy is a prop of the api which cannot reference itself
                # see the Cloudformation documentation for api gateway policy attribute
                resources=[core.Fn.join('', ['execute-api:/', '*'])]),
            iam.PolicyStatement(effect=iam.Effect.DENY,
                                actions=["execute-api:Invoke"],
                                principals=[iam.AnyPrincipal()],
                                resources=[
                                    core.Fn.join(
                                        '', ['execute-api:/', '*/POST/ingest'])
                                ],
                                conditions={
                                    "NotIpAddress": {
                                        "aws:SourceIp": ingest_allowed_ips
                                    }
                                })
        ])

        self.rest_api_name = f"{stack_name}-{names.REST_API}"

        log_group = logs.LogGroup(
            self,
            "apilogs",
            log_group_name=f"/aws/apigateway/{self.rest_api_name}/access_logs",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.SIX_MONTHS)

        self.api = apigw.LambdaRestApi(
            self,
            "api",
            handler=webhook_function,  # default handler
            rest_api_name=self.rest_api_name,
            proxy=False,
            deploy=True,
            policy=policy,
            deploy_options=apigw.StageOptions(
                access_log_destination=apigw.LogGroupLogDestination(log_group),
                access_log_format=apigw.AccessLogFormat.clf(),
                data_trace_enabled=True,
                metrics_enabled=True,
                logging_level=apigw.MethodLoggingLevel.INFO,
                stage_name=names.API_STAGE))

        self.api.add_api_key("ZoomIngesterApiKey")

        self.new_recording_resource = self.api.root.add_resource(
            "new_recording")
        self.new_recording_method = self.new_recording_resource.add_method(
            "POST",
            method_responses=[
                apigw.MethodResponse(status_code="200",
                                     response_models={
                                         "application/json":
                                         apigw.Model.EMPTY_MODEL
                                     })
            ])

        self.ingest_resource = self.api.root.add_resource("ingest",
            default_cors_preflight_options=apigw.CorsOptions(
                allow_origins=apigw.Cors.ALL_ORIGINS,
                allow_methods=["POST", "OPTIONS"],
                allow_headers=apigw.Cors.DEFAULT_HEADERS \
                              + ["Accept-Language","X-Requested-With"]
            )
        )
        on_demand_integration = apigw.LambdaIntegration(on_demand_function)
        self.ingest_method = self.ingest_resource.add_method(
            "POST",
            on_demand_integration,
            method_responses=[
                apigw.MethodResponse(status_code="200",
                                     response_models={
                                         "application/json":
                                         apigw.Model.EMPTY_MODEL
                                     })
            ])

        def endpoint_url(resource_name):
            return (f"https://{self.api.rest_api_id}.execute-api."
                    f"{core.Stack.of(self).region}.amazonaws.com/"
                    f"{names.API_STAGE}/{resource_name}")

        on_demand_function.add_environment("WEBHOOK_ENDPOINT_URL",
                                           endpoint_url("new_recording"))

        core.CfnOutput(
            self,
            "WebhookEndpoint",
            export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-url",
            value=endpoint_url("new_recording"))

        core.CfnOutput(
            self,
            "OnDemandEndpoint",
            export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-url",
            value=endpoint_url("ingest"))

        core.CfnOutput(
            self,
            "WebhookResourceId",
            export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-resource-id",
            value=self.new_recording_resource.resource_id)

        core.CfnOutput(
            self,
            "OnDemandResourceId",
            export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-resource-id",
            value=self.ingest_resource.resource_id)

        core.CfnOutput(self,
                       "RestApiId",
                       export_name=f"{stack_name}-{names.REST_API}-id",
                       value=self.api.rest_api_id)
Пример #25
0
  def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
    super().__init__(scope, construct_id, **kwargs)

    #XXX: For createing Amazon MWAA in the existing VPC,
    # remove comments from the below codes and
    # comments out vpc = aws_ec2.Vpc(..) codes,
    # then pass -c vpc_name=your-existing-vpc to cdk command
    # for example,
    # cdk -c vpc_name=your-existing-vpc syth
    #
    # vpc_name = self.node.try_get_context('vpc_name')
    # vpc = aws_ec2.Vpc.from_lookup(self, 'ExistingVPC',
    #   is_default=True,
    #   vpc_name=vpc_name
    # )

    vpc = aws_ec2.Vpc(self, "ApiGatewayDynamoDBVPC",
      max_azs=2,
      gateway_endpoints={
        "S3": aws_ec2.GatewayVpcEndpointOptions(
          service=aws_ec2.GatewayVpcEndpointAwsService.S3
        ),
        "DynamoDB": aws_ec2.GatewayVpcEndpointOptions(
          service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB
        )
      }
    )

    DDB_TABLE_SUFFIX = ''.join(random.sample((string.ascii_lowercase + string.digits), k=7))
    DDB_TABLE_NAME = "Comments-{}".format(DDB_TABLE_SUFFIX)

    ddb_table = aws_dynamodb.Table(self, "DynamoDbTable",
      table_name=DDB_TABLE_NAME,
      removal_policy=cdk.RemovalPolicy.DESTROY,
      partition_key=aws_dynamodb.Attribute(name="commentId",
        type=aws_dynamodb.AttributeType.STRING),
      time_to_live_attribute="ttl",
      billing_mode=aws_dynamodb.BillingMode.PROVISIONED,
      read_capacity=15,
      write_capacity=5,
    )

    ddb_table.add_global_secondary_index(
      read_capacity=15,
      write_capacity=5,
      index_name="pageId-index",
      partition_key=aws_dynamodb.Attribute(name='pageId', type=aws_dynamodb.AttributeType.STRING),
      projection_type=aws_dynamodb.ProjectionType.ALL
    )

    user_pool = aws_cognito.UserPool(self, 'UserPool',
      user_pool_name='UserPoolForApiGateway',
      removal_policy=cdk.RemovalPolicy.DESTROY,
      self_sign_up_enabled=True,
      sign_in_aliases={'email': True},
      auto_verify={'email': True},
      password_policy={
        'min_length': 8,
        'require_lowercase': False,
        'require_digits': False,
        'require_uppercase': False,
        'require_symbols': False,
      },
      account_recovery=aws_cognito.AccountRecovery.EMAIL_ONLY
    )

    user_pool_client = aws_cognito.UserPoolClient(self, 'UserPoolClient',
      user_pool=user_pool,
      auth_flows={
        'admin_user_password': True,
        'user_password': True,
        'custom': True,
        'user_srp': True
      },
      supported_identity_providers=[aws_cognito.UserPoolClientIdentityProvider.COGNITO]
    )

    auth = aws_apigateway.CognitoUserPoolsAuthorizer(self, 'AuthorizerForDynamoDBApi',
      cognito_user_pools=[user_pool]
    )

    ddb_access_policy_doc = aws_iam.PolicyDocument()
    ddb_access_policy_doc.add_statements(aws_iam.PolicyStatement(**{
      "effect": aws_iam.Effect.ALLOW,
      "resources": [ddb_table.table_arn],
      "actions": [
        "dynamodb:DeleteItem",
        "dynamodb:PartiQLInsert",
        "dynamodb:UpdateTimeToLive",
        "dynamodb:BatchWriteItem",
        "dynamodb:PutItem",
        "dynamodb:PartiQLUpdate",
        "dynamodb:UpdateItem",
        "dynamodb:PartiQLDelete"
      ]
    }))

    apigw_dynamodb_role = aws_iam.Role(self, "ApiGatewayRoleForDynamoDB",
      role_name='APIGatewayRoleForDynamoDB',
      assumed_by=aws_iam.ServicePrincipal('apigateway.amazonaws.com'),
      inline_policies={
        'DynamoDBAccessPolicy': ddb_access_policy_doc
      },
      managed_policies=[
        aws_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonDynamoDBReadOnlyAccess'),
      ]
    )

    dynamodb_api = aws_apigateway.RestApi(self, "DynamoDBProxyAPI",
      rest_api_name="comments-api",
      description="An Amazon API Gateway REST API that integrated with an Amazon DynamoDB.",
      endpoint_types=[aws_apigateway.EndpointType.REGIONAL],
      default_cors_preflight_options={
        "allow_origins": aws_apigateway.Cors.ALL_ORIGINS
      },
      deploy=True,
      deploy_options=aws_apigateway.StageOptions(stage_name="v1"),
      endpoint_export_name="DynamoDBProxyAPIEndpoint"
    )

    all_resources = dynamodb_api.root.add_resource("comments")
    one_resource = all_resources.add_resource("{pageId}")

    apigw_error_responses = [
      aws_apigateway.IntegrationResponse(status_code="400", selection_pattern="4\d{2}"),
      aws_apigateway.IntegrationResponse(status_code="500", selection_pattern="5\d{2}")
    ]

    apigw_ok_responses = [
      aws_apigateway.IntegrationResponse(
        status_code="200"
      )
    ]

    ddb_put_item_options = aws_apigateway.IntegrationOptions(
      credentials_role=apigw_dynamodb_role,
      integration_responses=[*apigw_ok_responses, *apigw_error_responses],
      request_templates={
        'application/json': json.dumps({
          "TableName": DDB_TABLE_NAME,
          "Item": {
            "commentId": {
              "S": "$context.requestId"
            },
            "pageId": {
              "S": "$input.path('$.pageId')"
            },
            "userName": {
              "S": "$input.path('$.userName')"
            },
            "message": {
              "S": "$input.path('$.message')"
            }
          }
        }, indent=2)
      },
      passthrough_behavior=aws_apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES
    )

    create_integration = aws_apigateway.AwsIntegration(
      service='dynamodb',
      action='PutItem',
      integration_http_method='POST',
      options=ddb_put_item_options
    )

    method_responses = [
      aws_apigateway.MethodResponse(status_code='200'),
      aws_apigateway.MethodResponse(status_code='400'),
      aws_apigateway.MethodResponse(status_code='500')
    ]

    all_resources.add_method('POST', create_integration,
      method_responses=method_responses,
      authorization_type=aws_apigateway.AuthorizationType.COGNITO,
      authorizer=auth
    )

    get_response_templates = '''
#set($inputRoot = $input.path('$'))
{
  "comments": [
    #foreach($elem in $inputRoot.Items) {
       "commentId": "$elem.commentId.S",
       "userName": "******",
       "message": "$elem.message.S"
     }#if($foreach.hasNext),#end
    #end
  ]
}'''

    ddb_query_item_options = aws_apigateway.IntegrationOptions(
      credentials_role=apigw_dynamodb_role,
      integration_responses=[
        aws_apigateway.IntegrationResponse(
          status_code="200",
          response_templates={
            'application/json': get_response_templates
          }
        ),
        *apigw_error_responses
      ],
      request_templates={
        'application/json': json.dumps({
          "TableName": DDB_TABLE_NAME,
          "IndexName": "pageId-index",
          "KeyConditionExpression": "pageId = :v1",
          "ExpressionAttributeValues": {
            ":v1": {
              "S": "$input.params('pageId')"
            }
          }
        }, indent=2)
      },
      passthrough_behavior=aws_apigateway.PassthroughBehavior.WHEN_NO_TEMPLATES
    )

    get_integration = aws_apigateway.AwsIntegration(
      service='dynamodb',
      action='Query',
      integration_http_method='POST',
      options=ddb_query_item_options
    )

    one_resource.add_method('GET', get_integration,
      method_responses=method_responses,
      authorization_type=aws_apigateway.AuthorizationType.COGNITO,
      authorizer=auth
    )

    cdk.CfnOutput(self, 'DynamoDBTableName', value=ddb_table.table_name)
    cdk.CfnOutput(self, 'UserPoolId', value=user_pool.user_pool_id)
    cdk.CfnOutput(self, 'UserPoolClientId', value=user_pool_client.user_pool_client_id)
    def __init__(self, scope: core.Construct, id: str, back_end_api_name: str,
                 stack_log_level: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Lambda Destination Queue
        async_dest_queue = _sqs.Queue(
            self, "Queue", queue_name="async_get_square_fn_dest_queue")

        # Create Serverless Event Processor using Lambda):
        # Read Lambda Code):
        try:
            with open(
                    "serverless_async_lambda_api/stacks/back_end/lambda_src/get_square.py",
                    mode="r") as f:
                get_square_fn_code = f.read()
        except OSError as e:
            print("Unable to read Lambda Function Code")
            raise e

        get_square_fn = _lambda.Function(
            self,
            "getSquareFn",
            function_name="get_square_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(get_square_fn_code),
            timeout=core.Duration.seconds(15),
            reserved_concurrent_executions=1,
            on_success=_lambda_dest.SqsDestination(async_dest_queue),
            on_failure=_lambda_dest.SqsDestination(async_dest_queue),
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "Environment": "Production",
                "ANDON_CORD_PULLED": "False"
            })
        get_square_fn_version = get_square_fn.latest_version
        get_square_fn_version_alias = _lambda.Alias(
            self,
            "greeterFnAlias",
            alias_name="MystiqueAutomation",
            version=get_square_fn_version)

        # Add Permissions to lambda to write messags to queue
        async_dest_queue.grant_send_messages(get_square_fn)

        # Create Custom Loggroup
        # /aws/lambda/function-name
        get_square_fn_fn_lg = _logs.LogGroup(
            self,
            "squareFnLoggroup",
            log_group_name=f"/aws/lambda/{get_square_fn.function_name}",
            retention=_logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Add API GW front end for the Lambda
        back_end_01_api_stage_options = _apigw.StageOptions(
            stage_name="miztiik",
            throttling_rate_limit=10,
            throttling_burst_limit=100,
            logging_level=_apigw.MethodLoggingLevel.INFO)

        # Create API Gateway
        api_01 = _apigw.RestApi(self,
                                "backEnd01Api",
                                rest_api_name=f"{back_end_api_name}",
                                deploy_options=back_end_01_api_stage_options,
                                endpoint_types=[_apigw.EndpointType.REGIONAL])

        # InvocationType='RequestResponse' if async_ else 'Event'

        back_end_01_api_res = api_01.root.add_resource("square")
        get_square = back_end_01_api_res.add_resource("{number}")

        # API VTL Template mapping
        # https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-integration-async.html
        # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-mapping-template-reference.html
        # https://aws.amazon.com/premiumsupport/knowledge-center/custom-headers-api-gateway-lambda/
        req_template = f'{{"number": "$input.params("number")"}}'

        # We are going to loop through the headers. Find the key:value for asynchrony i.e "InvocationType:Event"
        # If the headers are found, set is_async to true
        # If not found, return the response from lambda
        resp_template = """{
        "api_stage": "$context.stage",
        "api_request_id": "$context.requestId",
        "api_resource_path": "$context.resourcePath",
        "http_method": "$context.httpMethod",
        "source_ip": "$context.identity.sourceIp",
        "user-agent": "$context.identity.userAgent",
        #set($num_square = $util.escapeJavaScript($!input.json('$.square')))
        #foreach($param in $input.params().header.keySet())
        #if($param == "invocationtype" or $param == "InvocationType" && $util.escapeJavaScript($input.params().header.get($param)) == "Event")
        #set($is_async = "true")
        #end
        #end
        #if($is_async == "true")
        "asynchronous_invocation":"true",
        "message":"Event received. Check queue/logs for status"
        #else
        "synchronous_invocation":"true",
        "square_of_your_number_is":$!{num_square}
        #end
        }
        """

        get_square_method_get = get_square.add_method(
            http_method="GET",
            request_parameters={
                "method.request.header.InvocationType": True,
                "method.request.path.number": True
            },
            integration=_apigw.LambdaIntegration(
                handler=get_square_fn,
                proxy=False,
                request_parameters={
                    "integration.request.path.number":
                    "method.request.path.number",
                    "integration.request.header.X-Amz-Invocation-Type":
                    "method.request.path.InvocationType",
                    "integration.request.header.Content-Type":
                    "'application/x-www-form-urlencoded'"
                },
                passthrough_behavior=_apigw.PassthroughBehavior.
                WHEN_NO_TEMPLATES,
                request_templates={"application/json": f"{req_template}"},
                integration_responses=[
                    _apigw.IntegrationResponse(
                        status_code="200",
                        # selection_pattern="2\d{2}",  # Use for mapping Lambda Errors
                        response_parameters={},
                        response_templates={
                            "application/json": f"{resp_template}"
                        })
                ]),
            method_responses=[
                _apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        "method.response.header.Content-Length": True,
                    },
                    response_models={"application/json": _apigw.EmptyModel()})
            ])

        # Outputs
        output_1 = core.CfnOutput(
            self,
            "GetSquareApiUrl",
            value=f"{get_square.url}",
            description=
            "Use a browser to access this url. Change {number} to any value between 1 and 100."
        )
Пример #27
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ################################################
        # DynamoDB
        ################################################
        ddb_sims_tbl = ddb.Table(self,
                                 'RecommendationSIMS',
                                 table_name='RecommendationSIMS',
                                 removal_policy=core.RemovalPolicy.DESTROY,
                                 partition_key={
                                     'name': 'itemid',
                                     'type': ddb.AttributeType.STRING
                                 },
                                 billing_mode=ddb.BillingMode.PROVISIONED,
                                 read_capacity=5,
                                 write_capacity=50)

        ddb_sims_tbl_rw_policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[ddb_sims_tbl.table_arn],
            actions=[
                "dynamodb:BatchGetItem", "dynamodb:Describe*",
                "dynamodb:List*", "dynamodb:GetItem", "dynamodb:Query",
                "dynamodb:Scan", "dynamodb:BatchWriteItem",
                "dynamodb:DeleteItem", "dynamodb:PutItem",
                "dynamodb:UpdateItem"
            ])

        ################################################
        # Lambda + S3
        ################################################
        ddb_lambda_fn = _lambda.Function(
            self,
            'UpsertManhwakyungToDynmodbDB',
            function_name='UpsertManhwakyungToDynmodbDB',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('src/lambda/UpsertManhwakyungToDynmodbDB'),
            handler='manhwakyung_s3_to_dynamodb.lambda_handler',
            description=
            'Store recommendation results in dynamoDB (Recipe: SIMS)',
            environment={
                'REGION_NAME': kwargs['env'].region,
                'DDB_SIMS_TABLE': ddb_sims_tbl.table_name
            },
            timeout=core.Duration.minutes(1))

        s3_bucket = s3.Bucket(
            self,
            "data",
            #removal_policy=core.RemovalPolicy.DESTROY
        )

        # https://stackoverflow.com/questions/60087302/how-to-add-resource-policy-to-existing-s3-bucket-with-cdk-in-javascript
        # https://stackoverflow.com/questions/60282173/lookup-s3-bucket-and-add-a-trigger-to-invoke-a-lambda
        #s3_bucket = s3.Bucket.from_bucket_name(self, "ExistingBucket",
        #    bucket_name="team3-recommendation-system-personalize-data-jhhwang-cdk")

        s3_event_filter = s3.NotificationKeyFilter(
            prefix="results/by-title-id/", suffix=".out")
        s3_event_source = S3EventSource(
            s3_bucket,
            events=[s3.EventType.OBJECT_CREATED_PUT],
            filters=[s3_event_filter])
        ddb_lambda_fn.add_event_source(s3_event_source)

        s3_read_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            #resources = [s3_bucket.bucket_arn, "{}/*".format(s3_bucket.bucket_arn)],
            resources=["*"],
            actions=["s3:Get*", "s3:List*"])

        ddb_lambda_fn.add_to_role_policy(s3_read_statement)
        ddb_lambda_fn.add_to_role_policy(ddb_sims_tbl_rw_policy_statement)

        ##############################################################
        # APIGW + Lambda
        ##############################################################
        sims_title_lambda_fn = _lambda.Function(
            self,
            'RecommendSimsByTitleAPI',
            function_name='RecommendSimsByTitleAPI',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('src/lambda/RecommendSimsByTitleAPI'),
            handler='recommend_sims_by_title.lambda_handler',
            description='API to provide recommended results (Recipe: SIMS)',
            environment={
                'REGION_NAME': kwargs['env'].region,
                'DDB_SIMS_TABLE': ddb_sims_tbl.table_name,
                'DEF_ITEM_LIMIT': "10",
            },
            timeout=core.Duration.minutes(1))
        ddb_sims_tbl.grant_read_write_data(sims_title_lambda_fn)

        sims_title_api = apigw.LambdaRestApi(
            self,
            "RecommendSimsByTitle",
            handler=sims_title_lambda_fn,
            proxy=False,
            rest_api_name="RecommendSimsByTitle",
            description=
            'Webtoon is recommended based on webtoon id. (Recipe: SIMS)',
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"),
        )

        recomm_titles = sims_title_api.root.add_resource('recommended-titles')
        by_title = recomm_titles.add_resource('by-title')
        by_title.add_method("GET")
        by_title_id = by_title.add_resource('{id}')
        by_title_id.add_method("GET")
        by_title_id.add_cors_preflight(allow_origins=["*"],
                                       allow_methods=["*"])
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        host_name,
        cert_arn,
        zone_id,
        admin_user: str,
        admin_password: str,
        cloud9_instance_size: str,
        participant_limit: str,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        stack = core.Stack.of(self)
        stack.template_options.description = "Connected Drink Dispenser Workshop"

        # Static Website
        props: StaticSiteProps = StaticSiteProps(
            fqdn=host_name,
            hosted_zone_id=zone_id,
            certificate_arn=cert_arn,
            error_configuration=[
                {
                    "error_code": 403,
                    "error_caching_min_ttl": 300,
                    "response_code": 200,
                    "response_page_path": "/index.html",
                },
                {
                    "error_code": 404,
                    "error_caching_min_ttl": 300,
                    "response_code": 200,
                    "response_page_path": "/index.html",
                },
            ],
            output_name="CDDWebSite",
        )
        cdd_site = StaticSiteConstruct(self, "StaticSite", props)

        # Custom resource to clean out static website bucket prior to delete
        # TODO: Move this to the StaticSiteConstruct as option
        props: CustomResourceProps = CustomResourceProps(
            name=id + "-CR-S3DeleteObjects",
            lambda_directory="./lambda_functions/cr_s3_delete",
            handler="index.main",
            timeout=30,
            runtime=lambda_.Runtime.PYTHON_3_7,
            environment={"BUCKET_NAME": cdd_site.bucket_name},
        )
        s3_delete_cr = CustomResourceConstruct(self, "EmptyCddS3Bucket", props)
        # DependsOn the bucket (we need to delete objects before the bucket is deleted)
        s3_delete_cr.resource.node.add_dependency(cdd_site.bucket_resource)
        policy_statement = iam.PolicyStatement()
        policy_statement.add_actions("s3:GetBucket*")
        policy_statement.add_actions("s3:GetObject*")
        policy_statement.add_actions("s3:DeleteObject*")
        policy_statement.add_actions("s3:List*")
        policy_statement.add_resources(cdd_site.bucket_resource.bucket_arn)
        policy_statement.add_resources(
            f"{cdd_site.bucket_resource.bucket_arn}/*")
        s3_delete_cr.add_policy_to_role(policy_statement)

        # IAM Constructs
        user_group = iam.Group(
            self,
            "UserGroup",
            group_name=id + "-CDDUserGroup",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "ReadOnlyAccess")
            ],
        )

        # DynamoDB tables
        user_db = dynamodb.Table(
            # UserId as key, user "admin" tracks next available dispenser id
            # No access to users, RW to Cognito Lambda
            self,
            "UserTable",
            table_name=id + "-UserTable",
            partition_key={
                "name": "userName",
                "type": dynamodb.AttributeType.STRING
            },
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
        dispenser_db = dynamodb.Table(
            # Dispenser ID and credit amount - RO to users, RW to APIs
            self,
            "DispenserTable",
            table_name=id + "-DispenserTable",
            partition_key={
                "name": "dispenserId",
                "type": dynamodb.AttributeType.STRING,
            },
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
        dispenser_events = dynamodb.Table(
            # Recorded events from dispenser actions
            self,
            "DispenserEvents",
            table_name=id + "-DispenserEvents",
            partition_key={
                "name": "dispenserId",
                "type": dynamodb.AttributeType.STRING,
            },
            sort_key={
                "name": "timestamp",
                "type": dynamodb.AttributeType.STRING
            },
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        # Cognito Resources
        # User pool with phone_number as username
        props: CognitoUserPoolProps = CognitoUserPoolProps(
            user_pool_name=id + "-users",
            client_name=id + "-webclient",
            auto_verified_attributes=["phone_number"],
            schema=[
                {
                    "name": "group",
                    "attributeDataType": "String",
                    "mutable": True,
                    "required": False,
                },
                {
                    "name": "dispenserId",
                    "attributeDataType": "String",
                    "mutable": True,
                    "required": False,
                },
            ],
            policies={
                "passwordPolicy": {
                    "minimumLength": 6,
                    "requireLowercase": True,
                    "requireNumbers": True,
                    "requireSymbols": False,
                    "requireUppercase": False,
                }
            },
        )
        user_pool = CognitoUserPoolConstruct(self, "UserPool", props)

        # Role and lambda triggers
        lambda_cognito_access_role = iam.Role(
            # Access to IDP calls (for triggers)
            self,
            "LambdaCognitoAccessRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            inline_policies=[
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        actions=[
                            "logs:CreateLogGroup",
                            "logs:CreateLogStream",
                            "logs:PutLogEvents",
                        ],
                        resources=["arn:aws:logs:*:*:*"],
                    ),
                    iam.PolicyStatement(actions=["cognito-idp:*"],
                                        resources=["*"]),
                    iam.PolicyStatement(actions=["dynamodb:*"],
                                        resources=["*"]),
                ])
            ],
        )

        # Triggers for UserPool
        # Pre-sign-up: triggered when username, password, and phone number submitted
        lambda_cognito_trigger_pre_signup = lambda_.Function(
            self,
            "CogntioTriggerPreSignUp",
            function_name=id + "-CogntioTriggerPreSignUp",
            code=lambda_.AssetCode("./lambda_functions/cog_pre_signup"),
            handler="lambda.handler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            role=lambda_cognito_access_role,
            timeout=core.Duration.seconds(6),
            environment={
                "USER_TABLE": user_db.table_name,
                "PARTICIPANT_LIMIT": participant_limit,
            },
        )
        lambda_cognito_trigger_pre_signup.add_permission(
            "AllowCognitoPreSign",
            principal=iam.ServicePrincipal("cognito-idp.amazonaws.com"),
            source_arn=user_pool.user_pool_arn,
        )
        # Post confirmation: triggered after validation code provided
        lambda_cognito_trigger_post_confirm = lambda_.Function(
            self,
            "CogntioTriggerPostConfirm",
            function_name=id + "-CogntioTriggerPostConfirm",
            code=lambda_.AssetCode("./lambda_functions/cog_post_confirm"),
            handler="lambda.handler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            role=lambda_cognito_access_role,
            timeout=core.Duration.seconds(6),
            environment={
                "USER_TABLE": user_db.table_name,
                "PARTICIPANT_LIMIT": participant_limit,
            },
        )
        lambda_cognito_trigger_post_confirm.add_permission(
            "AllowCognitoPostConfirm",
            principal=iam.ServicePrincipal("cognito-idp.amazonaws.com"),
            source_arn=user_pool.user_pool_arn,
        )

        # Attach triggers to pool
        user_pool.user_pool.lambda_config = cognito.CfnUserPool.LambdaConfigProperty(
            pre_sign_up=lambda_cognito_trigger_pre_signup.function_arn,
            post_confirmation=lambda_cognito_trigger_post_confirm.function_arn,
        )

        cognito.CfnUserPoolGroup(
            self,
            "UserPoolCDDUser",
            group_name="cdd_user",
            description="General users of CDD (participants)",
            user_pool_id=user_pool.user_pool_id,
        )
        cognito.CfnUserPoolGroup(
            self,
            "UserPoolCDDAdmin",
            group_name="cdd_admin",
            description="CDD administrators",
            user_pool_id=user_pool.user_pool_id,
        )
        identity_pool = cognito.CfnIdentityPool(
            self,
            "IdentityPool",
            identity_pool_name=id.replace("-", "") + "_idpool",
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[{
                "clientId":
                user_pool.client_id,
                "providerName":
                user_pool.provider_name,
            }],
        )
        core.CfnOutput(
            self,
            "CognitoIdentityPoolId",
            export_name="CognitoIdentityPoolId",
            value=identity_pool.ref,
        )

        # Custom resource to create admin user - cannot do via CFn to set password
        props: CustomResourceProps = CustomResourceProps(
            name=id + "-CR-CreateCognitoAdminUser",
            lambda_directory="./lambda_functions/cr_create_admin_user",
            handler="index.main",
            timeout=30,
            runtime=lambda_.Runtime.PYTHON_3_7,
            environment={
                "COGNITO_USER_POOL_ID": user_pool.user_pool_id,
                "COGNITO_CLIENT_ID": user_pool.client_id,
                "ADMIN_USERNAME": admin_user,
                "ADMIN_PASSWORD": admin_password,
            },
        )
        create_admin_user_cr = CustomResourceConstruct(self, "CreateAdminUser",
                                                       props)
        # DependsOn the user pool
        create_admin_user_cr.resource.node.add_dependency(user_pool)
        policy_statement = iam.PolicyStatement()
        policy_statement.add_actions("cognito-idp:SignUp")
        policy_statement.add_actions("cognito-idp:AdminConfirmSignUp")
        policy_statement.add_resources("*")
        create_admin_user_cr.add_policy_to_role(policy_statement)

        # IAM roles for identity pool auth/unauth
        cog_unauth_role = iam.Role(
            self,
            "cognitoUnauthRole",
            role_name=f"Cognito_{identity_pool.identity_pool_name}_Unauth_Role",
            assumed_by=iam.FederatedPrincipal(
                "cognito-identity.amazonaws.com",
                conditions={
                    "StringEquals": {
                        "cognito-identity.amazonaws.com:aud": identity_pool.ref
                    },
                    "ForAnyValue:StringLike": {
                        "cognito-identity.amazonaws.com:amr": "unauthenticated"
                    },
                },
                assume_role_action="sts:AssumeRoleWithWebIdentity",
            ),
        )
        cog_unauth_role.attach_inline_policy(
            iam.Policy(
                self,
                "cognitoUnauth",
                policy_name="cognitoUnauth",
                statements=[
                    iam.PolicyStatement(
                        actions=[
                            "mobileanalytics:PutEvents", "cognito-sync:*"
                        ],
                        resources=["*"],
                    )
                ],
            ))
        cog_auth_role = iam.Role(
            self,
            "cognitoAuthRole",
            role_name=f"Cognito_{identity_pool.identity_pool_name}_Auth_Role",
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonESCognitoAccess")
            ],
            assumed_by=iam.FederatedPrincipal(
                "cognito-identity.amazonaws.com",
                conditions={
                    "StringEquals": {
                        "cognito-identity.amazonaws.com:aud": identity_pool.ref
                    },
                    "ForAnyValue:StringLike": {
                        "cognito-identity.amazonaws.com:amr": "authenticated"
                    },
                },
                assume_role_action="sts:AssumeRoleWithWebIdentity",
            ),
        )
        cog_auth_role.attach_inline_policy(
            iam.Policy(
                self,
                "cognitoAuth",
                policy_name="cognitoAuth",
                statements=[
                    iam.PolicyStatement(
                        actions=[
                            "mobileanalytics:PutEvents",
                            "cognito-sync:*",
                            "execute-api:*",
                        ],
                        resources=["*"],
                    ),
                    # Provide full access to IoT for the authenticated user
                    # The AWS IoT policy scopes down the access
                    iam.PolicyStatement(actions=["iot:*"], resources=["*"]),
                ],
            ))
        # Finally, attach auth and unauth roles to Identity pool
        cognito.CfnIdentityPoolRoleAttachment(
            self,
            "CDDIdentityPoolRoleAttach",
            identity_pool_id=identity_pool.ref,
            roles={
                "authenticated": cog_auth_role.role_arn,
                "unauthenticated": cog_unauth_role.role_arn,
            },
        )

        ### Supporting IAM Roles and Policies
        lambda_full_access_role = iam.Role(
            # Wide open role for Lambda's to access other services
            self,
            "LambdaFullAccessRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            inline_policies=[
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        actions=[
                            "logs:CreateLogGroup",
                            "logs:CreateLogStream",
                            "logs:PutLogEvents",
                        ],
                        resources=["arn:aws:logs:*:*:*"],
                    ),
                    iam.PolicyStatement(actions=["*"], resources=["*"]),
                ])
            ],
        )
        lambda_iot_full_access_role = iam.Role(
            # Wide open role for Lambda's to access other services
            self,
            "LambdaIoTFullAccessRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            inline_policies=[
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        actions=[
                            "logs:CreateLogGroup",
                            "logs:CreateLogStream",
                            "logs:PutLogEvents",
                        ],
                        resources=["arn:aws:logs:*:*:*"],
                    ),
                    iam.PolicyStatement(actions=["dynamodb:*", "iot:*"],
                                        resources=["*"]),
                ])
            ],
        )
        lambda_api_app_role = iam.Role(
            # Role for APIG Lambda functions - make specific per Lambda/method if needed
            self,
            "ApiAppRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            inline_policies=[
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        actions=[
                            "logs:CreateLogGroup",
                            "logs:CreateLogStream",
                            "logs:PutLogEvents",
                        ],
                        resources=["arn:aws:logs:*:*:*"],
                    ),
                    iam.PolicyStatement(
                        actions=["dynamodb:*"],
                        resources=[
                            f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_db.table_name}",
                            f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_events.table_name}",
                            f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{user_db.table_name}",
                        ],
                    ),
                    iam.PolicyStatement(actions=["iot:*"], resources=["*"]),
                ])
            ],
        )
        lambda_api_delete_user_role = iam.Role(
            # Role for APIG Lambda delete user - specific as this has to delete multiple services
            self,
            "ApiDeleteUserRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            inline_policies=[
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        actions=[
                            "logs:CreateLogGroup",
                            "logs:CreateLogStream",
                            "logs:PutLogEvents",
                        ],
                        resources=["arn:aws:logs:*:*:*"],
                    ),
                    iam.PolicyStatement(
                        actions=["dynamodb:*"],
                        resources=[
                            f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_db.table_name}",
                            f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_events.table_name}",
                            f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{user_db.table_name}",
                        ],
                    ),
                    iam.PolicyStatement(actions=["cloud9:DeleteEnvironment"],
                                        resources=["*"]),
                    iam.PolicyStatement(
                        actions=[
                            "iam:DeleteLoginProfile",
                            "iam:ListGroupsForUser",
                            "iam:RemoveUserFromGroup",
                            "iam:DeleteUser",
                        ],
                        resources=["*"],
                    ),
                    iam.PolicyStatement(
                        actions=["cognito-idp:AdminDeleteUser"],
                        resources=["*"]),
                    iam.PolicyStatement(actions=["iot:*"], resources=["*"]),
                ])
            ],
        )
        lambda_api_dispense_role = iam.Role(
            # Role for lambda
            self,
            "CommandRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            inline_policies=[
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        actions=[
                            "logs:CreateLogGroup",
                            "logs:CreateLogStream",
                            "logs:PutLogEvents",
                        ],
                        resources=["arn:aws:logs:*:*:*"],
                    ),
                    iam.PolicyStatement(
                        actions=["dynamodb:*"],
                        resources=[
                            f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_db.table_name}",
                            f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{dispenser_events.table_name}",
                        ],
                    ),
                    iam.PolicyStatement(actions=["iot:*"], resources=["*"]),
                ])
            ],
        )

        # IoT Policies
        iot_policy_dispenser_limited = iot.CfnPolicy(
            self,
            "IoTDispenserLimitedPolicy",
            policy_name=id + "-DispenserLimitedAccess",
            policy_document={
                "Version":
                "2012-10-17",
                "Statement": [
                    {
                        "Effect":
                        "Allow",
                        "Action": ["iot:Connect"],
                        "Resource": [
                            f"arn:aws:iot:{stack.region}:{stack.account}:client/${{iot:Connection.Thing.ThingName}}"
                        ],
                        "Condition": {
                            "Bool": {
                                "iot:Connection.Thing.IsAttached": [True]
                            }
                        },
                    },
                    {
                        "Effect": "Allow",
                        "Action": ["iot:Receive"],
                        "Resource": ["*"]
                    },
                    {
                        "Effect":
                        "Allow",
                        "Action": ["iot:Subscribe"],
                        "Resource": [
                            f"arn:aws:iot:{stack.region}:{stack.account}:topicfilter/$aws/things/${{iot:Certificate.Subject.CommonName}}/shadow/*",
                            f"arn:aws:iot:{stack.region}:{stack.account}:topicfilter/$aws/things/${{iot:Certificate.Subject.CommonName}}/cmd/${{iot:Certificate.Subject.CommonName}}",
                        ],
                    },
                    {
                        "Effect":
                        "Allow",
                        "Action": ["iot:Publish"],
                        "Resource": [
                            f"arn:aws:iot:{stack.region}:{stack.account}:topic/$aws/things/${{iot:Certificate.Subject.CommonName}}/shadow/update",
                            f"arn:aws:iot:{stack.region}:{stack.account}:topic/$aws/things/${{iot:Certificate.Subject.CommonName}}/shadow/get",
                            f"arn:aws:iot:{stack.region}:{stack.account}:topic/test/${{iot:Certificate.Subject.CommonName}}",
                            f"arn:aws:iot:{stack.region}:{stack.account}:topic/cmd/${{iot:Certificate.Subject.CommonName}}/response",
                        ],
                    },
                ],
            },
        )
        iot_policy_client = iot.CfnPolicy(
            self,
            "IoTClientPolicy",
            policy_name=id + "-IoTClientAccess",
            policy_document={
                "Version":
                "2012-10-17",
                "Statement": [
                    {
                        "Effect": "Allow",
                        "Action": ["iot:Connect", "iot:Receive"],
                        "Resource": ["*"],
                    },
                    {
                        "Effect":
                        "Allow",
                        "Action": ["iot:Subscribe"],
                        "Resource": [
                            f"arn:aws:iot:{stack.region}:{stack.account}:topicfilter/events/*",
                            f"arn:aws:iot:{stack.region}:{stack.account}:topicfilter/$aws/things/*/shadow/update/accepted",
                        ],
                    },
                ],
            },
        )

        ### Lambda Functions
        # General Lambda Functions NOT associated with APIG
        lambda_process_events = lambda_.Function(
            self,
            "ProcessEvents",
            function_name=id + "-ProcessEvents",
            code=lambda_.AssetCode("./lambda_functions/process_events"),
            handler="process_events.handler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            role=lambda_iot_full_access_role,
            timeout=core.Duration.seconds(20),
            environment={
                "EVENT_TABLE": dispenser_events.table_name,
                "STATUS_TABLE": dispenser_db.table_name,
            },
        )

        ## API Lambda functions
        # Return credit for dispenser
        api_credit_dispenser_function = lambda_.Function(
            self,
            "ApiCreditDispenserFunction",
            function_name=id + "-ApiCreditDispenserFunction",
            code=lambda_.AssetCode("./lambda_functions/api_credit_dispenser"),
            handler="credit_dispenser.handler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            role=lambda_api_app_role,
            timeout=core.Duration.seconds(15),
            memory_size=128,
            environment={
                "DISPENSER_TABLE": dispenser_db.table_name,
                "EVENT_TABLE": dispenser_events.table_name,
            },
        )
        # Command
        api_command_function = lambda_.Function(
            self,
            "ApiCommandFunction",
            function_name=id + "-ApiCommandFunction",
            code=lambda_.AssetCode("./lambda_functions/api_command"),
            handler="command.handler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            role=lambda_api_app_role,
            timeout=core.Duration.seconds(15),
            memory_size=128,
            environment={
                "DispenserTable": dispenser_db.table_name,
                "EventTable": dispenser_events.table_name,
            },
        )
        # Request dispense operation (set shadow or command to dispense)
        api_dispense_function = lambda_.Function(
            self,
            "ApiDispenseFunction",
            function_name=id + "-ApiDispenseFunction",
            code=lambda_.AssetCode("./lambda_functions/api_dispense"),
            handler="dispense.handler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            role=lambda_api_dispense_role,
            timeout=core.Duration.seconds(15),
            memory_size=128,
            environment={
                "DISPENSER_TABLE": dispenser_db.table_name,
                "EVENT_TABLE": dispenser_events.table_name,
            },
        )
        # Request dispense operation (set shadow or command to dispense)
        api_dispenser_status_function = lambda_.Function(
            self,
            "ApiDispenserStatusFunction",
            function_name=id + "-ApiDispenserStatusFunction",
            code=lambda_.AssetCode("./lambda_functions/api_dispenser_status"),
            handler="dispenser_status.handler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            role=lambda_api_app_role,
            timeout=core.Duration.seconds(15),
            memory_size=128,
            environment={"DISPENSER_TABLE": dispenser_db.table_name},
        )
        # Request user details from user table, create resources if needed
        # NOTE: This uses an overley permissive policy to create the resources needed
        api_get_resources_function = lambda_.Function(
            self,
            "ApiGetResourcesFunction",
            function_name=id + "-ApiGetResourcesFunction",
            code=lambda_.AssetCode("./lambda_functions/api_get_resources"),
            handler="get_resources.handler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            role=lambda_full_access_role,
            # Timeout is for user creation: certain tasks such as Cloud9 may take longer
            timeout=core.Duration.seconds(300),
            memory_size=128,
            environment={
                "DISPENSER_TABLE": dispenser_db.table_name,
                "EVENT_TABLE": dispenser_events.table_name,
                "USER_TABLE": user_db.table_name,
                "USER_PERMISSIONS_GROUP": user_group.group_name,
                "IOT_POLICY_DISPENSER_LIMITED":
                iot_policy_dispenser_limited.policy_name,
                "IOT_POLICY_CLIENT": iot_policy_client.policy_name,
                "CLOUD9_INSTANCE_SIZE": cloud9_instance_size,
            },
        )
        # Request user details from user table
        api_delete_user_function = lambda_.Function(
            self,
            "ApiDeleteUserFunction",
            function_name=id + "-ApiDeleteUserFunction",
            code=lambda_.AssetCode("./lambda_functions/api_delete_user"),
            handler="delete_user.handler",
            runtime=lambda_.Runtime.PYTHON_3_7,
            role=lambda_api_delete_user_role,
            timeout=core.Duration.seconds(28),
            memory_size=256,
            environment={
                "DISPENSER_TABLE": dispenser_db.table_name,
                "EVENT_TABLE": dispenser_events.table_name,
                "USER_TABLE": user_db.table_name,
                "USER_POOL_ID": user_pool.user_pool_id,
            },
        )

        ### API Gateway
        api = apigateway.RestApi(
            self,
            id + "-API",
            api_key_source_type=apigateway.ApiKeySourceType.HEADER,
            deploy_options=apigateway.StageOptions(stage_name="prod"),
        )

        core.CfnOutput(
            self,
            "APIEndpoint",
            export_name="APIEndpoint",
            value=
            f"https://{api.rest_api_id}.execute-api.{stack.region}.amazonaws.com/prod/",
        )
        # Although / is not used as method, provide OPTIONS for hinting CORS
        add_cors_options(api.root)
        # Define Cognito authorizer and attach to gateway
        cog_authorizer = apigateway.CfnAuthorizer(
            self,
            "CognitoAuth",
            name="CognitoAuthName",
            rest_api_id=api.rest_api_id,
            type="COGNITO_USER_POOLS",
            identity_source="method.request.header.Authorization",
            provider_arns=[user_pool.user_pool_arn],
        )

        # # Resources (paths) and methods (GET, POST, etc.), for the API
        api_credit_resource = api.root.add_resource("credit")
        add_resource_method(
            api_credit_resource,
            http_method="GET",
            integration=apigateway.LambdaIntegration(
                api_credit_dispenser_function),
            authorization_type=apigateway.AuthorizationType.COGNITO,
            authorizer=cog_authorizer,
        )
        add_cors_options(api_credit_resource)
        # command
        api_command_resource = api.root.add_resource("command")
        add_resource_method(
            api_command_resource,
            http_method="GET",
            integration=apigateway.LambdaIntegration(api_command_function),
            authorization_type=apigateway.AuthorizationType.COGNITO,
            authorizer=cog_authorizer,
        )
        add_cors_options(api_command_resource)
        # Actuate dispenser
        api_dispense_resource = api.root.add_resource("dispense")
        add_resource_method(
            api_dispense_resource,
            http_method="GET",
            integration=apigateway.LambdaIntegration(api_dispense_function),
            authorization_type=apigateway.AuthorizationType.COGNITO,
            authorizer=cog_authorizer,
        )
        add_cors_options(api_dispense_resource)
        # Return dispenser status (from DynamoDB)
        api_dispenser_status_resource = api.root.add_resource("status")
        add_resource_method(
            api_dispenser_status_resource,
            http_method="GET",
            integration=apigateway.LambdaIntegration(
                api_dispenser_status_function),
            authorization_type=apigateway.AuthorizationType.COGNITO,
            authorizer=cog_authorizer,
        )
        add_cors_options(api_dispenser_status_resource)
        # Return user details from User Table
        api_get_resources_resource = api.root.add_resource("getResources")
        add_resource_method(
            api_get_resources_resource,
            http_method="POST",
            integration=apigateway.LambdaIntegration(
                api_get_resources_function),
            authorization_type=apigateway.AuthorizationType.COGNITO,
            authorizer=cog_authorizer,
        )
        add_cors_options(api_get_resources_resource)
        # Create a user based on valid token
        api_delete_user_resource = api.root.add_resource("deleteUser")
        add_resource_method(
            api_delete_user_resource,
            http_method="POST",
            integration=apigateway.LambdaIntegration(api_delete_user_function),
            authorization_type=apigateway.AuthorizationType.COGNITO,
            authorizer=cog_authorizer,
        )
        add_cors_options(api_delete_user_resource)

        # Create policy and reference group
        iam.Policy(
            self,
            "UserPermissionsPolicy",
            groups=[user_group],
            policy_name=id + "-UserPermissions",
            statements=[
                # Elevated permissions beyond the ReadOnlyUser
                # Allow seeing all MQTT messages
                iam.PolicyStatement(
                    actions=["iot:Subscribe", "iot:Connect", "iot:Receive"],
                    resources=["*"],
                ),
                # Allow search indexing
                iam.PolicyStatement(
                    actions=["iot:SearchIndex"],
                    resources=[
                        f"arn:aws:iot:{stack.region}:{stack.account}:index/AWS_Things"
                    ],
                ),
                # Allow changing of security group ingress on EC2 (Cloud9) to support mapping 443 to
                iam.PolicyStatement(
                    actions=[
                        "ec2:AuthorizeSecurityGroupIngress",
                        "ec2:RevokeSecurityGroupIngress",
                    ],
                    resources=[
                        f"arn:aws:ec2:{stack.region}:{stack.account}:security-group/*"
                    ],
                ),
                # DENY access to credentials table
                iam.PolicyStatement(
                    effect=iam.Effect.DENY,
                    actions=["dynamodb:*"],
                    resources=[
                        f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{user_db.table_name}"
                    ],
                ),
                # DENY access to S3 overall
                iam.PolicyStatement(effect=iam.Effect.DENY,
                                    actions=["s3:*"],
                                    resources=["*"]),
            ],
        )

        # IoT Constructs
        # Rule to process shadow events and send to logging
        iot_rule_log_shadow_events = iot.CfnTopicRule(
            self,
            "LogShadowEventsRule",
            rule_name=id.replace("-", "") + "_LogShadowEvents",
            topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty(
                description=
                "Based on shadow topic and content, process messages via Lambda",
                rule_disabled=False,
                aws_iot_sql_version="2016-03-23",
                sql=
                "select *, topic() AS topic FROM '$aws/things/+/shadow/update/documents'",
                actions=[
                    iot.CfnTopicRule.ActionProperty(
                        lambda_=iot.CfnTopicRule.LambdaActionProperty(
                            function_arn=lambda_process_events.function_arn))
                ],
            ),
        )
        # Allow rule to invoke the logging function
        lambda_process_events.add_permission(
            "AllowIoTRule1",
            principal=iam.ServicePrincipal("iot.amazonaws.com"),
            source_arn=iot_rule_log_shadow_events.attr_arn,
        )
        # Rule to process generic events and send to logging
        iot_rule_log_generic_events = iot.CfnTopicRule(
            self,
            "LogGenericEventsRule",
            rule_name=id.replace("-", "") + "_LogGenericEvents",
            topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty(
                description="Log generic events, enrich, then send to Lambda",
                rule_disabled=False,
                aws_iot_sql_version="2016-03-23",
                sql=
                "select *, timestamp() AS ts, topic() AS topic FROM 'events'",
                actions=[
                    iot.CfnTopicRule.ActionProperty(
                        lambda_=iot.CfnTopicRule.LambdaActionProperty(
                            function_arn=lambda_process_events.function_arn))
                ],
            ),
        )
        # Allow generic_events rule to Invoke the process_events function
        lambda_process_events.add_permission(
            "AllowIoTRule2",
            principal=iam.ServicePrincipal("iot.amazonaws.com"),
            source_arn=iot_rule_log_generic_events.attr_arn,
        )
        # Rule to process dispenser specific events and send to logging
        iot_rule_log_dispenser_events = iot.CfnTopicRule(
            self,
            "LogDispenserEventsRule",
            rule_name=id.replace("-", "") + "_LogDispenserEvents",
            topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty(
                description=
                "Log specific dispenser events, enrich, then send to Lambda",
                rule_disabled=False,
                aws_iot_sql_version="2016-03-23",
                sql=
                "select *, timestamp() AS ts, topic() AS topic FROM 'events/+'",
                actions=[
                    iot.CfnTopicRule.ActionProperty(
                        lambda_=iot.CfnTopicRule.LambdaActionProperty(
                            function_arn=lambda_process_events.function_arn))
                ],
            ),
        )
        # Allow log_dispenser_events rule to Invoke the process_events function
        lambda_process_events.add_permission(
            "AllowIoTRule3",
            principal=iam.ServicePrincipal("iot.amazonaws.com"),
            source_arn=iot_rule_log_dispenser_events.attr_arn,
        )
        # Rule to process cmd/NNN/response WHERE "command=dispense"
        iot_rule_command_response_dispense = iot.CfnTopicRule(
            self,
            "DispenseCommandResponseRule",
            rule_name=id.replace("-", "") + "_DispenseCommandResponse",
            topic_rule_payload=iot.CfnTopicRule.TopicRulePayloadProperty(
                description=
                "Invoke Lambda to process dispense commands from dispenser",
                rule_disabled=False,
                aws_iot_sql_version="2016-03-23",
                sql=
                "select *, topic() AS topic FROM '$aws/things/+/shadow/update/accepted' WHERE isUndefined(state.reported.response) = False",
                actions=[
                    iot.CfnTopicRule.ActionProperty(
                        lambda_=iot.CfnTopicRule.LambdaActionProperty(
                            function_arn=api_dispense_function.function_arn))
                ],
            ),
        )
        # Allow command_response rule to Invoke the dispense function to reconcile outstanding requests
        api_dispense_function.add_permission(
            "AllowIoTCommandResponseRule",
            principal=iam.ServicePrincipal("iot.amazonaws.com"),
            source_arn=iot_rule_command_response_dispense.attr_arn,
        )

        # Custom resource to delete workshop users - run to clean up any lingering ones
        # if the admin user didn't clean up. A lot of dependsOn as users are created with bindings
        # to other resources
        props: CustomResourceProps = CustomResourceProps(
            name=id + "-CR-DeleteParticipantUsers",
            lambda_directory="./lambda_functions/cr_delete_participant_users",
            handler="index.main",
            timeout=30,
            runtime=lambda_.Runtime.PYTHON_3_7,
            environment={
                # Read user records from UserTable
                "USER_TABLE":
                user_db.table_name,
                # Invoke the api_delete_user function
                "DELETE_USER_LAMBDA_FUNCTION":
                api_delete_user_function.function_arn,
            },
        )
        delete_participant_users_cr = CustomResourceConstruct(
            self, "DeleteParticpantUsers", props)
        # DependsOn the API Delete User Function
        delete_participant_users_cr.resource.node.add_dependency(
            api_delete_user_function)
        # DependsOn the user pool to delete Cognito users
        delete_participant_users_cr.resource.node.add_dependency(user_pool)
        # DependsOn the DynamoDB UserTable
        delete_participant_users_cr.resource.node.add_dependency(user_db)
        # DependsOn the IoT dispenser and client policies
        delete_participant_users_cr.resource.node.add_dependency(
            iot_policy_dispenser_limited)
        delete_participant_users_cr.resource.node.add_dependency(
            iot_policy_client)
        # DependsOn the IoT IAM user group
        delete_participant_users_cr.resource.node.add_dependency(user_group)
        # Permissions for function to delete users
        policy_statement = iam.PolicyStatement()
        policy_statement.add_actions("dynamodb:*")
        policy_statement.add_resources(
            f"arn:aws:dynamodb:{stack.region}:{stack.account}:table/{user_db.table_name}"
        )
        delete_participant_users_cr.add_policy_to_role(policy_statement)
        policy_statement = iam.PolicyStatement()
        policy_statement.add_actions("lambda:InvokeFunction")
        policy_statement.add_resources(api_delete_user_function.function_arn)
        delete_participant_users_cr.add_policy_to_role(policy_statement)
Пример #29
0
    def __init__(self, scope: core.Construct, id: str, table: dynamodb.Table, index_name: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        role = iam.Role(
            self, 'LambdaRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'),
            ],
        )

        table.grant_read_data(role)

        xray = lambda_.LayerVersion(
            self, 'Xray',
            compatible_runtimes=[
                lambda_.Runtime.PYTHON_3_6,
                lambda_.Runtime.PYTHON_3_7,
                lambda_.Runtime.PYTHON_3_8,
            ],
            code=lambda_.Code.from_asset('src/xray'),
        )

        version_options = lambda_.VersionOptions(
            retry_attempts=0, # No retries
            provisioned_concurrent_executions=1,  # Avoid cold starts
        )

        list_function = lambda_.Function(
            self, 'List',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset('src/list'),
            handler='list.handler',
            role=role,
            tracing=lambda_.Tracing.ACTIVE,
            current_version_options=version_options,
            layers=[
                xray,
            ],
            environment={
                'TABLE_NAME': table.table_name,
                'INDEX_NAME': index_name,
            }
        )

        query_function = lambda_.Function(
            self, 'Query',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset('src/query'),
            handler='query.handler',
            role=role,
            tracing=lambda_.Tracing.ACTIVE,
            current_version_options=version_options,
            layers=[
                xray,
            ],
            environment={
                'TABLE_NAME': table.table_name
            }
        )

        api = apigateway.RestApi(
            self, 'StockHistory',
            endpoint_types=[
                apigateway.EndpointType.REGIONAL
            ],
            cloud_watch_role=True,
            deploy_options=apigateway.StageOptions(
                logging_level=apigateway.MethodLoggingLevel.ERROR,
                metrics_enabled=True,
                tracing_enabled=True,
            ),
        )

        stock_root_resource = api.root.add_resource('stock')
        stock_id_resource = stock_root_resource.add_resource('{ticker}')

        stock_root_resource.add_method(
            http_method='GET',
            integration=apigateway.LambdaIntegration(
                list_function.current_version,
                proxy=True,
            ),
        )

        stock_id_resource.add_method(
            http_method='GET',
            integration=apigateway.LambdaIntegration(
                query_function.current_version,
                proxy=True,
            ),
            request_parameters={
                'method.request.querystring.start': False,
                'method.request.querystring.end': False,
            },
        )

        self.api = api
Пример #30
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        webhook_function,
        on_demand_function,
        schedule_update_function,
        status_query_function,
        slack_function,
        ingest_allowed_ips,
    ):
        super().__init__(scope, id)

        stack_name = core.Stack.of(self).stack_name

        policy = iam.PolicyDocument(
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=["execute-api:Invoke"],
                    principals=[iam.AnyPrincipal()],
                    # note that the policy is a prop of the api which cannot
                    # reference itself, see the Cloudformation documentation
                    # for api gateway policy attribute
                    resources=[core.Fn.join("", ["execute-api:/", "*"])],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.DENY,
                    actions=["execute-api:Invoke"],
                    principals=[iam.AnyPrincipal()],
                    resources=[
                        core.Fn.join("", ["execute-api:/", "*/POST/ingest"]),
                        core.Fn.join("", ["execute-api:/", "*/GET/status"]),
                    ],
                    conditions={
                        "NotIpAddress": {"aws:SourceIp": ingest_allowed_ips}
                    },
                ),
            ]
        )

        self.rest_api_name = f"{stack_name}-{names.REST_API}"

        log_group = logs.LogGroup(
            self,
            "apilogs",
            log_group_name=f"/aws/apigateway/{self.rest_api_name}/access_logs",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.SIX_MONTHS,
        )

        self.api = apigw.LambdaRestApi(
            self,
            "api",
            handler=webhook_function,  # default handler
            rest_api_name=self.rest_api_name,
            proxy=False,
            deploy=True,
            policy=policy,
            deploy_options=apigw.StageOptions(
                access_log_destination=apigw.LogGroupLogDestination(log_group),
                access_log_format=apigw.AccessLogFormat.clf(),
                data_trace_enabled=True,
                metrics_enabled=True,
                logging_level=apigw.MethodLoggingLevel.INFO,
                stage_name=names.API_STAGE,
            ),
        )

        self.api.add_api_key("ZoomIngesterApiKey")

        self.new_recording_resource = self.create_resource(
            "new_recording",
            webhook_function,
            "POST",
        )

        self.ingest_resource = self.create_resource(
            "ingest",
            on_demand_function,
            "POST",
            cors_options=apigw.CorsOptions(
                allow_origins=apigw.Cors.ALL_ORIGINS,
                allow_methods=["POST", "OPTIONS"],
                allow_headers=apigw.Cors.DEFAULT_HEADERS
                + ["Accept-Language", "X-Requested-With"],
            ),
        )

        self.schedule_update_resource = self.create_resource(
            "schedule_update",
            schedule_update_function,
            "POST",
        )

        self.status_query_resource = self.create_resource(
            "status",
            status_query_function,
            "GET",
        )

        self.slack_resource = self.create_resource(
            "slack",
            slack_function,
            "POST",
        )

        def endpoint_url(resource_name):
            return (
                f"https://{self.api.rest_api_id}.execute-api."
                f"{core.Stack.of(self).region}.amazonaws.com/"
                f"{names.API_STAGE}/{resource_name}"
            )

        on_demand_function.add_environment(
            "WEBHOOK_ENDPOINT_URL",
            endpoint_url("new_recording"),
        )

        core.CfnOutput(
            self,
            "WebhookEndpoint",
            export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-url",
            value=endpoint_url("new_recording"),
        )

        core.CfnOutput(
            self,
            "OnDemandEndpoint",
            export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-url",
            value=endpoint_url("ingest"),
        )

        core.CfnOutput(
            self,
            "ScheduleUpdateEndpoint",
            export_name=f"{stack_name}-{names.SCHEDULE_UPDATE_ENDPOINT}-url",
            value=endpoint_url("schedule_update"),
        )

        core.CfnOutput(
            self,
            "StatusQueryEndpoint",
            export_name=f"{stack_name}-{names.STATUS_ENDPOINT}-url",
            value=endpoint_url("status"),
        )

        core.CfnOutput(
            self,
            "SlackEndpoint",
            export_name=f"{stack_name}-{names.SLACK_ENDPOINT}-url",
            value=endpoint_url("slack"),
        )

        core.CfnOutput(
            self,
            "WebhookResourceId",
            export_name=f"{stack_name}-{names.WEBHOOK_ENDPOINT}-resource-id",
            value=self.new_recording_resource.resource_id,
        )

        core.CfnOutput(
            self,
            "OnDemandResourceId",
            export_name=f"{stack_name}-{names.ON_DEMAND_ENDPOINT}-resource-id",
            value=self.ingest_resource.resource_id,
        )

        core.CfnOutput(
            self,
            "ScheduleUpdateResourceId",
            export_name=f"{stack_name}-{names.SCHEDULE_UPDATE_ENDPOINT}-resource-id",
            value=self.schedule_update_resource.resource_id,
        )

        core.CfnOutput(
            self,
            "StatusQueryResourceId",
            export_name=f"{stack_name}-{names.STATUS_ENDPOINT}-resource-id",
            value=self.status_query_resource.resource_id,
        )

        core.CfnOutput(
            self,
            "SlackResourceId",
            export_name=f"{stack_name}-{names.SLACK_ENDPOINT}-resource-id",
            value=self.slack_resource.resource_id,
        )

        core.CfnOutput(
            self,
            "RestApiId",
            export_name=f"{stack_name}-{names.REST_API}-id",
            value=self.api.rest_api_id,
        )