Ejemplo n.º 1
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create new IAM group and user
        group = iam.Group(self, "RekGroup")
        user = iam.User(self, "RekUser")

        # add IAM user to the new group
        user.add_to_group(group)

        # create S3 bucket to hold images
        # give new user access to the bucket
        bucket = s3.Bucket(self, 'Bucket')
        bucket.grant_read_write(user)

        # create DynamoDB table to hold Rekognition results
        table = ddb.Table(self,
                          'Classifications',
                          partition_key={
                              'name': 'image_name',
                              'type': ddb.AttributeType.STRING
                          })

        # create Lambda function
        lambda_function = _lambda.Function(
            self,
            'RekFunction',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='rekfunction.handler',
            code=_lambda.Code.from_asset(
                'rekognition_lambda_s3_trigger/lambda'),
            environment={
                'BUCKET_NAME': bucket.bucket_name,
                'TABLE_NAME': table.table_name
            })

        # add Rekognition permissions for Lambda function
        statement = iam.PolicyStatement()
        statement.add_actions("rekognition:DetectLabels")
        statement.add_resources("*")
        lambda_function.add_to_role_policy(statement)

        # create trigger for Lambda function with image type suffixes
        notification = s3_notifications.LambdaDestination(lambda_function)
        notification.bind(self, bucket)
        bucket.add_object_created_notification(
            notification, s3.NotificationKeyFilter(suffix='.jpg'))
        bucket.add_object_created_notification(
            notification, s3.NotificationKeyFilter(suffix='.jpeg'))
        bucket.add_object_created_notification(
            notification, s3.NotificationKeyFilter(suffix='.png'))

        # grant permissions for lambda to read/write to DynamoDB table and bucket
        table.grant_read_write_data(lambda_function)
        bucket.grant_read_write(lambda_function)
Ejemplo n.º 2
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        shoe_company_ingestion_bucket_name = 'devassoc-shoe-company-ingestion'
        shoe_company_ingestion_bucket = s3.Bucket(
            self,
            'shoe-company-ingestion-bucket',
            bucket_name=shoe_company_ingestion_bucket_name,
            access_control=s3.BucketAccessControl.PRIVATE,
            removal_policy=core.RemovalPolicy.DESTROY,
            auto_delete_objects=True)
        core.CfnOutput(self,
                       'new-ingestion-bucket',
                       value=shoe_company_ingestion_bucket.bucket_name)

        shoe_company_json_bucket_name = 'devassoc-shoe-company-json'
        shoe_company_json_bucket = s3.Bucket(
            self,
            'shoe-company-json-bucket',
            bucket_name=shoe_company_json_bucket_name,
            access_control=s3.BucketAccessControl.PRIVATE,
            removal_policy=core.RemovalPolicy.DESTROY,
            auto_delete_objects=True)
        core.CfnOutput(self,
                       'new-json-bucket',
                       value=shoe_company_json_bucket.bucket_name)

        lambda_role = iam.Role(
            self,
            'lambda-role',
            role_name='PayrollProcessingLambdaRole',
            description=
            'Provides lambda with access to s3 and cloudwatch to execute payroll processing',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
        lambda_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('AWSLambdaExecute'))

        this_dir = path.dirname(__file__)
        conversion_function = lam.Function(
            self,
            'conversion-function',
            function_name='PayrollProcessing',
            runtime=lam.Runtime.PYTHON_3_7,
            handler='conversion.lambda_handler',
            code=lam.Code.from_asset(path.join(this_dir, 'lambda')),
            role=lambda_role,
            description=
            'Converts payroll csvs to json and puts results in s3 bucket',
            timeout=core.Duration.minutes(3),
            memory_size=128)
        conversion_function.add_permission(
            'lambdas3permission',
            principal=iam.ServicePrincipal('s3.amazonaws.com'),
            action='lambda:InvokeFunction',
            source_arn=shoe_company_ingestion_bucket.bucket_arn,
            source_account=kwargs.get('env')['account'])
        shoe_company_ingestion_bucket.add_object_created_notification(
            s3n.LambdaDestination(conversion_function),
            s3.NotificationKeyFilter(suffix='.csv'))
Ejemplo n.º 3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        lambdaFn = _lambda.Function(self, "SampleLambdaFunction", 
            code=_lambda.Code.from_asset('function/'),
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            function_name="sample_lambda_function"
        )

        # 環境変数を追加
        lambdaFn.add_environment(key="STAGE", value="DEV")

        # s3バケットを作成し、通知イベントを設定
        bucket = _s3.Bucket(self, "SampleBucket", bucket_name="kimi-first-cdk-bucket")
        notification = aws_s3_notifications.LambdaDestination(lambdaFn)
        bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, notification, _s3.NotificationKeyFilter(prefix="hoge", suffix=".csv"))

        # 定期的に実行するイベントを設定
        rule = _events.Rule(self, "SampleEventRule", 
            rule_name="schedule_trigger_event",
            schedule=_events.Schedule.expression("cron(10 * * * ? *)")
        )
        rule.add_target(_targets.LambdaFunction(lambdaFn))
Ejemplo n.º 4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        lambda_layer = aws_lambda.LayerVersion(
            self,
            "pillow_layer",
            code=aws_lambda.Code.asset("./python-pillow-6.2.1.zip"),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_7],
            description="Pillow_upload_from_cdk")
        # aws_lambda.LayerVersion.from_layer_version_arn(self, "pillow_layer",
        #                                             layer_version_arn="arn:aws-cn:lambda:cn-northwest-1:313497334385:layer:pillow:4"
        #                                             )

        # The code that defines your stack goes here
        mylambda = aws_lambda.Function(
            self,
            "myfunction_id",
            description="lambda trigger by S3 to convert image",
            # function_name="img-process-cdk-deploy",  # the name will be auto create
            code=aws_lambda.Code.asset("./lambda"),
            handler="lambda_function.lambda_handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(60),
            memory_size=256,
            reserved_concurrent_executions=10,
            layers=[lambda_layer],
            dead_letter_queue_enabled=True,  # cdk will create a new SQS for this
            # role = aws_iam.Role.from_role_arn(self, "mylambda_role",
            #         role_arn="arn:aws-cn:iam::313497334385:role/Lambda-access-img-process-S3bucket"
            #         )  # cdk will create a role for this
        )

        with open('./env.js', 'r') as f:
            env = json.load(f)
            for (k, v) in env.items():
                mylambda.add_environment(k, v)

        #create a new bucket, Import bucket can not add event trigger
        s3_bucket = aws_s3.Bucket(
            self,
            "mybucket",
            # bucket name will be auto created or define here
            # bucket_name="img-process-cdk-created"
        )
        mylambda.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                s3_bucket,
                events=[aws_s3.EventType.OBJECT_CREATED],
                filters=[aws_s3.NotificationKeyFilter(prefix='input/')]))
        s3_bucket.grant_read_write(mylambda)
Ejemplo n.º 5
0
    def __init__(self, scope: core.Construct, id: str, sqsCfn, s3_loc_up,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.PREFIX = id

        self.sqs_queue = sqs.Queue.from_queue_arn(
            self, "QueuefromCfn",
            f"arn:aws:sqs:us-east-1:{core.Aws.ACCOUNT_ID}:{sqsCfn.queue_name}")

        s3_loc_up.add_object_created_notification(
            aws_s3_notifications.SqsDestination(self.sqs_queue),
            _s3.NotificationKeyFilter(
                prefix='stdized-data/comprehend_results/csv/', suffix='.csv'))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #This will create the s3 bucket in AWS
        bucket = s3.Bucket(self, "ssl_s3_bucket_raw")

        #This will create the sqs in AWS
        queue = sqs.Queue(self, "ssl_sqs_event_queue")

        #Create S3 notification object which points to SQS.
        notification = aws_s3_notifications.SqsDestination(queue)
        filter1 = s3.NotificationKeyFilter(prefix="home/")

        #Attach notificaton event to S3 bucket.
        bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                      notification, filter1)
Ejemplo n.º 7
0
    def __init__(
        self,
        scope: core.Construct,
        construct_id: str,
        elastic_domain: aes.Domain,
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)
        indexing_lambda = _lambda.Function(
            self,
            "IndexingHandler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(LAMBDA_PATH),
            handler="lambda_function.lambda_handler",
            environment={
                "EMBEDDER_IP": config.get_embedder_ip(),
                "ES_URL": elastic_domain.domain_endpoint,
                "ES_USER": config.get_es_credentials()[0],
                "ES_PASSWORD": config.get_es_credentials()[1],
                "INDEX_NAME": config.get_es_index(),
            },
        )
        notification = s3n.LambdaDestination(indexing_lambda)

        block_public_access = s3.BlockPublicAccess(
            block_public_acls=True,
            block_public_policy=True,
            ignore_public_acls=True,
            restrict_public_buckets=True)
        bucket = s3.Bucket(self,
                           "DocsDestination",
                           block_public_access=block_public_access,
                           removal_policy=core.RemovalPolicy.DESTROY)
        bucket.grant_read(indexing_lambda)
        bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            notification,
            s3.NotificationKeyFilter(prefix="wikipages/"),
        )

        core.Tags.of(indexing_lambda).add("system-id", config.get_system_id())
        core.Tags.of(bucket).add("system-id", config.get_system_id())

        core.CfnOutput(self, "S3BucketName", value=bucket.bucket_name)
        core.CfnOutput(self,
                       "IndexingLambdaName",
                       value=indexing_lambda.function_name)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Primary S3 Bucket
        main_bucket = aws_s3.Bucket(self, "multipagepdfa2i", removal_policy=core.RemovalPolicy.DESTROY)
        
        # Create sqs queue
        page_sqs = aws_sqs.Queue(
            self, "multipagepdfa2i_page_sqs",
            queue_name = "multipagepdfa2i_page_sqs",
            visibility_timeout=core.Duration.minutes(3)
        )
        
        # Create all of the Lambda Functions
        lambda_functions = self.create_lambda_functions(page_sqs)

        # Create notification that triggers kick off lambda on pdf being uploaded to kickoff
        kickoff_notification = aws_s3_notifications.LambdaDestination(lambda_functions["kickoff"])
        
        lambda_functions["analyzepdf"].add_event_source(aws_lambda_event_sources.SqsEventSource(page_sqs, batch_size=3))

        main_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,  
            kickoff_notification,
            aws_s3.NotificationKeyFilter(prefix="uploads/", suffix="pdf")
        )

        self.configure_dynamo_table("multia2ipdf_callback", "jobid", "callback_token")
        self.configure_dynamo_table("multipagepdfa2i_upload_ids", "id", "key")       

        self.create_state_machine(lambda_functions, page_sqs)

        human_complete_target = aws_events_targets.LambdaFunction(lambda_functions["humancomplete"])

        human_review_event_pattern = aws_events.EventPattern(
            source=["aws.sagemaker"],
            detail_type=["SageMaker A2I HumanLoop Status Change"]
        )

        aws_events.Rule(self, 
            "multipadepdfa2i_HumanReviewComplete", 
            event_pattern=human_review_event_pattern,
            targets=[human_complete_target]
        )

        
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Define S3 bucket
        my_bucket = s3.Bucket(self, "ssl-s3-sns-event-raw")

        #Add Filters if required
        filter1 = s3.NotificationKeyFilter(prefix="home/")

        #sns Topic
        my_sns = sns.Topic(self,
                           id="my-sns-topic",
                           display_name="my-sns-topic")

        #Create the s3 notification objects which points to Lambda
        notification = notifications.SnsDestination(my_sns)

        #link s3 and sns
        my_bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                         notification, filter1)

        #create sqs queue
        my_sqs = sqs.Queue(self, id="my-queue")

        #create sqs / sns subcription
        subscription = aws_sns_subscriptions.SqsSubscription(my_sqs)

        #add subscription to sns.
        my_sns.add_subscription(subscription)

        #create lambda function
        my_lambda = _lambda.Function(self,
                                     "HelloHandler",
                                     runtime=_lambda.Runtime.PYTHON_3_7,
                                     handler="hello.handler",
                                     code=_lambda.Code.asset('lambda'))

        #create sns/lambda subscription
        subscription = aws_sns_subscriptions.LambdaSubscription(my_lambda)

        #add lambda subscription to sns
        my_sns.add_subscription(subscription)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #Create Role
        S3SqsKmsSampleStack.__Role = IamService.create_role(self)

        #get KMS policy Document
        kms_policy_document = IamService.get_kms_policy_documents(self)

        kms_key = kms.Key(self,
                          id='ssl_s3_sqs_kms_key',
                          alias='sslS3SqsKmsKey',
                          description='This is kms key',
                          enabled=True,
                          enable_key_rotation=True,
                          policy=kms_policy_document)

        #This will create the s3 bucket in AWS
        bucket = s3.Bucket(self,
                           "ssl_s3_bucket_raw_kms",
                           bucket_name="ssl-s3-bucket-kms-raw",
                           encryption=s3.BucketEncryption.KMS,
                           encryption_key=kms_key)

        #This will create the sqs in AWS
        queue = sqs.Queue(self,
                          "ssl_sqs_event_queue",
                          queue_name="ssl-sqs-kms-event-queue",
                          encryption=sqs.QueueEncryption.KMS,
                          encryption_master_key=kms_key)

        #queue.node.add_dependency(kms_key)
        bucket.node.add_dependency(queue, kms_key)
        # #Create S3 notification object which points to SQS.
        notification = aws_s3_notifications.SqsDestination(queue)
        filter1 = s3.NotificationKeyFilter(prefix="home/")

        # #Attach notificaton event to S3 bucket.

        bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                      notification, filter1)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Define aws lambda function
        my_lambda = _lambda.Function(self,
                                     "HelloHandler",
                                     runtime=_lambda.Runtime.PYTHON_3_7,
                                     handler="hello.handler",
                                     code=_lambda.Code.asset('lambda'))

        # Define S3 bucket
        my_bucket = s3.Bucket(self, "ssl-s3-lambda-event-raw")

        #Create the s3 notification objects which points to Lambda
        notification = aws_s3_notifications.LambdaDestination(my_lambda)

        #Add Filters if required
        filter1 = s3.NotificationKeyFilter(prefix="home/")
        #Add event trigger from s3 to lambda
        my_bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                         notification, filter1)
    def create_events(self, services):
        # kickoff_notification = aws_s3_notifications.LambdaDestination(services["lambda"]["kickoff"])
        extensions = [
            "pdf", "pDf", "pDF", "pdF", "PDF", "Pdf",
            "png", "pNg", "pNG", "pnG", "PNG", "Png",
            "jpg", "jPg", "jPG", "jpG", "JPG", "Jpg"
        ]
        for extension in extensions:
            services["main_s3_bucket"].add_event_notification(
                aws_s3.EventType.OBJECT_CREATED,  
                aws_s3_notifications.SqsDestination(services["sf_sqs"]),
                aws_s3.NotificationKeyFilter(prefix="uploads/", suffix=extension)
            )    
        
        services["lambda"]["kickoff"].add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                services["sf_sqs"], 
                batch_size=1
            )
        )
        
        services["lambda"]["analyzepdf"].add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                services["textract_sqs"], 
                batch_size=1
            )
        )

        human_complete_target = aws_events_targets.LambdaFunction(services["lambda"]["humancomplete"])

        human_review_event_pattern = aws_events.EventPattern(
            source=["aws.sagemaker"],
            detail_type=["SageMaker A2I HumanLoop Status Change"]
        )

        aws_events.Rule(self, 
            "multipadepdfa2i_HumanReviewComplete", 
            event_pattern=human_review_event_pattern,
            targets=[human_complete_target]
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create the S3 bucket
        my_bucket = s3.Bucket(self,"my_s3_bucket_raw")

        #create dynamo db table
        self._table = ddb.Table(
            self, 'Employee',
            partition_key={'name': 'Emp_Id', 'type': ddb.AttributeType.NUMBER}
        )
        
        #Create lambda function
        my_lambda = _lambda.Function(
                            self,
                            id="EventFunction",
                            runtime=_lambda.Runtime.PYTHON_3_7,
                            code=_lambda.Code.asset("lambda"),
                            handler="event.handler" ,
                            environment={
                                "Table_Name":self._table.table_name
                            }                           
        )

        #Add Filters
        filter1=s3.NotificationKeyFilter(prefix="home/",suffix=".json")
        
        #Create Notification
        s3_lambda_notification = notification.LambdaDestination(my_lambda)

        #link s3 and lambda
        my_bucket.add_event_notification(s3.EventType.OBJECT_CREATED,s3_lambda_notification,filter1)
        
        #grant lambda read/write access to lambda function
        self._table.grant_read_write_data(my_lambda)
        _lambda.IFunction.grant_invoke(self,my_lambda)

        #Grant s3 read access to lambda function
        my_bucket.grant_read(my_lambda)
        
Ejemplo n.º 14
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # 创建sts lambda访问角色
        #  action -> statement -> policy -> role -> attach lambda
        actions = [
            "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents",
            "sts:AssumeRole"
        ]

        policyStatement = PolicyStatement(actions=actions, effect=Effect.ALLOW)
        policyStatement.add_all_resources()

        policy_name = "{}-policy".format(Constant.PROJECT_NAME)
        sts_policy = Policy(self, policy_name, policy_name=policy_name)

        sts_policy.add_statements(policyStatement)

        role_name = "{}-role".format(Constant.PROJECT_NAME)
        access_role = Role(self,
                           role_name,
                           role_name=role_name,
                           assumed_by=ServicePrincipal('lambda.amazonaws.com'))

        sts_policy.attach_to_role(access_role)

        # 创建thum lambda访问角色
        #  action -> statement -> policy -> role
        thum_actions = [
            "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents",
            "s3:PutObject"
        ]

        thum_policyStatement = PolicyStatement(actions=thum_actions,
                                               effect=Effect.ALLOW)
        thum_policyStatement.add_all_resources()

        thum_policy_name = "{}-policy-thum".format(Constant.PROJECT_NAME)
        thum_policy = Policy(self,
                             thum_policy_name,
                             policy_name=thum_policy_name)

        thum_policy.add_statements(thum_policyStatement)

        thum_role_name = "{}-role-thum".format(Constant.PROJECT_NAME)
        thum_access_role = Role(
            self,
            thum_role_name,
            role_name=thum_role_name,
            assumed_by=ServicePrincipal('lambda.amazonaws.com'))

        thum_policy.attach_to_role(thum_access_role)

        # 创建S3 put的角色
        #  action -> statement -> policy -> role
        s3_actions = [
            "s3:PutObject",
            "s3:GetObject",
            "s3:ListBucket",
            "s3:PutObjectTagging",
            "s3:PutObjectAcl",
        ]
        s3_policyStatement = PolicyStatement(actions=s3_actions,
                                             effect=Effect.ALLOW)
        s3_policyStatement.add_all_resources()

        s3_policy_name = "{}-policy-s3".format(Constant.PROJECT_NAME)
        s3_policy = Policy(self, s3_policy_name, policy_name=s3_policy_name)

        s3_policy.add_statements(s3_policyStatement)

        s3_role_name = "{}-role-s3".format(Constant.PROJECT_NAME)
        s3_access_role = Role(self,
                              s3_role_name,
                              role_name=s3_role_name,
                              assumed_by=ArnPrincipal(access_role.role_arn))

        s3_policy.attach_to_role(s3_access_role)

        # 创建STS lambda
        sts_lambda = _lambda.Function(
            self,
            'sts',
            function_name='sts',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='auth.handler',
            timeout=Duration.minutes(1),
            role=access_role,
        )
        sts_lambda.add_environment("role_to_assume_arn",
                                   s3_access_role.role_arn)

        base_api = apigw.RestApi(
            self,
            'Endpoint',
            endpoint_types=[EndpointType.REGIONAL],
        )
        example_entity = base_api.root.add_resource('auth')
        example_entity_lambda_integration = apigw.LambdaIntegration(
            sts_lambda,
            proxy=False,
            integration_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin':
                    "'*'",
                }
            }])
        example_entity.add_method(
            'GET',
            example_entity_lambda_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        self.add_cors_options(example_entity)

        # 创建缩略图lambda
        layer_cv2 = _lambda.LayerVersion(
            self,
            'cv2',
            code=_lambda.Code.from_bucket(
                s3.Bucket.from_bucket_name(self,
                                           "cdk-data-layer",
                                           bucket_name='nowfox'),
                'cdk-data/cv2.zip'),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
        )

        lambda_thum = _lambda.Function(
            self,
            'thum',
            function_name='thum',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='thum.handler',
            timeout=Duration.minutes(1),
            role=thum_access_role,
        )
        lambda_thum.add_environment("frame_second", "3")
        lambda_thum.add_layers(layer_cv2)

        # 创建存储上传图片的bucket
        s3_bucket_name = "{}-{}".format("upload", self._get_UUID(4))
        s3_upload = s3.Bucket(
            self,
            id=s3_bucket_name,
            bucket_name=s3_bucket_name,
            # access_control=s3.BucketAccessControl.PUBLIC_READ,#不建议使用这个,这个会有list权限,下面这个没有list权限
            public_read_access=True,
            removal_policy=core.RemovalPolicy.
            DESTROY,  # TODO:  destroy for test
            # removal_policy=core.RemovalPolicy.RETAIN
        )
        notification = aws_s3_notifications.LambdaDestination(lambda_thum)
        s3_filter = s3.NotificationKeyFilter(suffix=".mp4")
        s3_upload.add_event_notification(s3.EventType.OBJECT_CREATED_PUT,
                                         notification, s3_filter)
        s3_upload.add_cors_rule(allowed_methods=[
            HttpMethods.POST, HttpMethods.PUT, HttpMethods.GET
        ],
                                allowed_origins=["*"],
                                allowed_headers=["*"])
        '''
        #创建上传lambda
        lambda_upload = _lambda.Function(
            self, 'upload',function_name='upload',
            runtime=_lambda.Runtime.JAVA_8,
            code=_lambda.Code.from_bucket(s3.Bucket.from_bucket_name(self, "cdk-data-upload-jar", bucket_name='nowfox'),
                                          'cdk-data/fileupload.jar'),
            handler='fileupload.FileUploadFunctionHandler::handleRequest',
            timeout=Duration.minutes(1),
            memory_size=512,
            role=access_role,
        )
        lambda_upload.add_environment("bucket", s3_bucket_name)
        lambda_upload.add_environment("region", "cn-northwest-1")
        '''
        core.CfnOutput(self,
                       "authUrl",
                       value=base_api.url + "auth",
                       description="authUrl")
        core.CfnOutput(self,
                       "S3BucketName",
                       value=s3_bucket_name,
                       description="S3BucketName")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # ------ Necessary Roles ------
        roles = IamRole(
            self, 'IamRoles'
        )
        

        # ------ S3 Buckets ------
        # Create Athena bucket
        athena_bucket = _s3.Bucket(self, "AthenaBucket",
            removal_policy=core.RemovalPolicy.DESTROY
        )
        # Create Forecast bucket
        forecast_bucket = _s3.Bucket(self, "FoecastBucket",
            removal_policy=core.RemovalPolicy.DESTROY
        )


        # ------ Athena ------ 
        # Config Athena query result output location
        workgroup_prop = _athena.CfnWorkGroup.WorkGroupConfigurationProperty(
            result_configuration=_athena.CfnWorkGroup.ResultConfigurationProperty(
                output_location="s3://"+athena_bucket.bucket_name
            )
        )
        # Create Athena workgroup
        athena_workgroup = _athena.CfnWorkGroup(
            self, 'ForecastGroup',
            name='ForecastGroup', 
            recursive_delete_option=True, 
            state='ENABLED', 
            work_group_configuration=workgroup_prop
        )
            
    
        # ------ SNS Topic ------
        topic = sns.Topic(
            self, 'NotificationTopic',
            display_name='StepsTopic'
        )
        # SNS email subscription. Get the email address from context value(cdk.json)
        topic.add_subscription(subs.EmailSubscription(self.node.try_get_context('my_email')))
         

        # ------ Layers ------
        shared_layer = _lambda.LayerVersion(
            self, 'LambdaLayer',
            layer_version_name='testfolderlayer',
            code=_lambda.AssetCode('shared/')
        )


        # ------ Lambdas for stepfuctions------
        create_dataset_lambda = _lambda.Function(
            self, 'CreateDataset',
            function_name='CreateDataset',
            code=_lambda.Code.asset('lambdas/createdataset/'),
            handler='dataset.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            timeout=core.Duration.seconds(30),
            layers=[shared_layer]
        )

        create_dataset_group_lambda = _lambda.Function(
            self, 'CreateDatasetGroup',
            function_name='CreateDatasetGroup',
            code = _lambda.Code.asset('lambdas/createdatasetgroup/'),
            handler = 'datasetgroup.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        import_data_lambda = _lambda.Function(
            self, 'CreateDatasetImportJob',
            function_name='CreateDatasetImportJob',
            code = _lambda.Code.asset('lambdas/createdatasetimportjob/'),
            handler = 'datasetimport.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            environment= {
                'FORECAST_ROLE': roles.forecast_role.role_arn
            },
            layers=[shared_layer]
        )

        create_predictor_lambda = _lambda.Function(
            self, 'CreatePredictor',
            function_name='CreatePredictor',
            code = _lambda.Code.asset('lambdas/createpredictor/'),
            handler = 'predictor.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        create_forecast_lambda = _lambda.Function(
            self, 'CreateForecast',
            function_name='CreateForecast',
            code = _lambda.Code.asset('lambdas/createforecast/'),
            handler = 'forecast.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            environment= {
                'EXPORT_ROLE': roles.forecast_role.role_arn
            },
            layers=[shared_layer],
            timeout=core.Duration.seconds(30)
        )

        # Deploy lambda with python dependencies from requirements.txt
        update_resources_lambda = _lambda_python.PythonFunction(
            self, 'UpdateResources',
            function_name='UpdateResources',
            entry='lambdas/updateresources/',
            index='update.py',
            handler='lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.update_role,
            environment= {
                'ATHENA_WORKGROUP': athena_workgroup.name,
                'ATHENA_BUCKET' : athena_bucket.bucket_name
            },
            layers=[shared_layer],
            timeout=core.Duration.seconds(900)
        )
        

        notify_lambda = _lambda.Function(
            self, 'NotifyTopic',
            function_name='NotifyTopic',
            code = _lambda.Code.asset('lambdas/notify/'),
            handler = 'notify.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            environment= {
                'SNS_TOPIC_ARN': topic.topic_arn
            },
            layers=[shared_layer]
        )

        delete_forecast_lambda = _lambda.Function(
            self, 'DeleteForecast',
            function_name='DeleteForecast',
            code = _lambda.Code.asset('lambdas/deleteforecast/'),
            handler = 'deleteforecast.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        delete_predctor_lambda = _lambda.Function(
            self, 'DeletePredictor',
            function_name='DeletePredictor',
            code = _lambda.Code.asset('lambdas/deletepredictor/'),
            handler = 'deletepredictor.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )

        delete_importjob_lambda = _lambda.Function(
            self, 'DeleteImportJob',
            function_name='DeleteImportJob',
            code = _lambda.Code.asset('lambdas/deletedatasetimport/'),
            handler = 'deletedataset.lambda_handler',
            runtime = _lambda.Runtime.PYTHON_3_7,
            role=roles.lambda_role,
            layers=[shared_layer]
        )


        # ------ StepFunctions ------
        strategy_choice = sfn.Choice(
            self, 'Strategy-Choice'
        )

        success_state = sfn.Succeed(
            self, 'SuccessState'
        )

        failed = sfn_tasks.LambdaInvoke(
            self, 'Failed',
            lambda_function = notify_lambda,
            result_path=None
        ).next(strategy_choice)

        create_dataset_job = sfn_tasks.LambdaInvoke(
            self, 'Create-Dataset', 
            lambda_function = create_dataset_lambda,
            retry_on_service_exceptions=True,
            payload_response_only=True
        )

        self.add_retry_n_catch(create_dataset_job, failed)

        create_dataset_group_job = sfn_tasks.LambdaInvoke(
            self, 'Create-DatasetGroup', 
            lambda_function = create_dataset_group_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(create_dataset_group_job, failed)


        import_data_job = sfn_tasks.LambdaInvoke(
            self, 'Import-Data',
            lambda_function = import_data_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(import_data_job, failed)

        create_predictor_job = sfn_tasks.LambdaInvoke(
            self, 'Create-Predictor',
            lambda_function = create_predictor_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(create_predictor_job, failed)

        create_forecast_job = sfn_tasks.LambdaInvoke(
            self, 'Create-Forecast',
            lambda_function = create_forecast_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(create_forecast_job, failed)

        update_resources_job = sfn_tasks.LambdaInvoke(
            self, 'Update-Resources',
            lambda_function = update_resources_lambda,
            payload_response_only=True
        )
        self.add_retry_n_catch(update_resources_job, failed)

        notify_success = sfn_tasks.LambdaInvoke(
            self, 'Notify-Success',
            lambda_function = notify_lambda,
            payload_response_only=True
        )

        delete_forecast_job = sfn_tasks.LambdaInvoke(
            self, 'Delete-Forecast',
            lambda_function = delete_forecast_lambda,
            payload_response_only=True
        )
        self.delete_retry(delete_forecast_job)

        delete_predictor_job = sfn_tasks.LambdaInvoke(
            self, 'Delete-Predictor',
            lambda_function = delete_predctor_lambda,
            payload_response_only=True
        )
        self.delete_retry(delete_predictor_job)

        delete_import_job = sfn_tasks.LambdaInvoke(
            self, 'Delete-ImportJob',
            lambda_function = delete_importjob_lambda,
            payload_response_only=True
        )
        self.delete_retry(delete_import_job)
        
        
        definition = create_dataset_job\
            .next(create_dataset_group_job)\
            .next(import_data_job)\
            .next(create_predictor_job)\
            .next(create_forecast_job)\
            .next(update_resources_job)\
            .next(notify_success)\
            .next(strategy_choice.when(sfn.Condition.boolean_equals('$.params.PerformDelete', False), success_state)\
                                .otherwise(delete_forecast_job).afterwards())\
            .next(delete_predictor_job)\
            .next(delete_import_job)
                    
            
        deployt_state_machine = sfn.StateMachine(
            self, 'StateMachine',
            definition = definition
            # role=roles.states_execution_role
        )

        # S3 event trigger lambda
        s3_lambda = _lambda.Function(
            self, 'S3Lambda',
            function_name='S3Lambda',
            code=_lambda.Code.asset('lambdas/s3lambda/'),
            handler='parse.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            role=roles.trigger_role,
            environment= {
                'STEP_FUNCTIONS_ARN': deployt_state_machine.state_machine_arn,
                'PARAMS_FILE': self.node.try_get_context('parameter_file')
            }
        )
        s3_lambda.add_event_source(
            event_src.S3EventSource(
                bucket=forecast_bucket,
                events=[_s3.EventType.OBJECT_CREATED],
                filters=[_s3.NotificationKeyFilter(
                    prefix='train/',
                    suffix='.csv'
                )]
            )
        )

        # CloudFormation output
        core.CfnOutput(
            self, 'StepFunctionsName',
            description='Step Functions Name',
            value=deployt_state_machine.state_machine_name
        )

        core.CfnOutput(
            self, 'ForecastBucketName',
            description='Forecast bucket name to drop you files',
            value=forecast_bucket.bucket_name
        )

        core.CfnOutput(
            self, 'AthenaBucketName',
            description='Athena bucket name to drop your files',
            value=athena_bucket.bucket_name
        )
Ejemplo n.º 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if self.node.try_get_context('vpc_type'):
            validate_cdk_json(self)

        ES_LOADER_TIMEOUT = 600
        ######################################################################
        # REGION mapping / ELB & Lambda Arch
        ######################################################################
        elb_id_temp = region_info.FactName.ELBV2_ACCOUNT
        elb_map_temp = region_info.RegionInfo.region_map(elb_id_temp)
        region_dict = {}
        for region in elb_map_temp:
            # ELB account ID
            region_dict[region] = {'ElbV2AccountId': elb_map_temp[region]}
            # Lambda Arch
            if region in ('us-east-1', 'us-east-2', 'us-west-2', 'ap-south-1',
                          'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1',
                          'eu-central-1', 'eu-west-1', 'eu-west-2'):
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.ARM_64.name)
            else:
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.X86_64.name)
        region_mapping = core.CfnMapping(
            scope=self, id='RegionMap', mapping=region_dict)

        ######################################################################
        # get params
        ######################################################################
        allow_source_address = core.CfnParameter(
            self, 'AllowedSourceIpAddresses', allowed_pattern=r'^[0-9./\s]*',
            description='Space-delimited list of CIDR blocks',
            default='10.0.0.0/8 172.16.0.0/12 192.168.0.0/16')
        sns_email = core.CfnParameter(
            self, 'SnsEmail', allowed_pattern=r'^[0-9a-zA-Z@_\-\+\.]*',
            description=('Input your email as SNS topic, where Amazon '
                         'OpenSearch Service will send alerts to'),
            default='*****@*****.**')
        geoip_license_key = core.CfnParameter(
            self, 'GeoLite2LicenseKey', allowed_pattern=r'^[0-9a-zA-Z]{16}$',
            default='xxxxxxxxxxxxxxxx',
            description=("If you wolud like to enrich geoip locaiton such as "
                         "IP address's country, get a license key form MaxMind"
                         " and input the key. If you not, keep "
                         "xxxxxxxxxxxxxxxx"))
        reserved_concurrency = core.CfnParameter(
            self, 'ReservedConcurrency', default=10, type='Number',
            description=('Input reserved concurrency. Increase this value if '
                         'there are steady logs delay despite no errors'))
        aes_domain_name = self.node.try_get_context('aes_domain_name')
        bucket = f'{aes_domain_name}-{core.Aws.ACCOUNT_ID}'
        s3bucket_name_geo = f'{bucket}-geo'
        s3bucket_name_log = f'{bucket}-log'
        s3bucket_name_snapshot = f'{bucket}-snapshot'

        # organizations / multiaccount
        org_id = self.node.try_get_context('organizations').get('org_id')
        org_mgmt_id = self.node.try_get_context(
            'organizations').get('management_id')
        org_member_ids = self.node.try_get_context(
            'organizations').get('member_ids')
        no_org_ids = self.node.try_get_context(
            'no_organizations').get('aws_accounts')

        # Overwrite default S3 bucket name as customer name
        temp_geo = self.node.try_get_context('s3_bucket_name').get('geo')
        if temp_geo:
            s3bucket_name_geo = temp_geo
        else:
            print('Using default bucket names')
        temp_log = self.node.try_get_context('s3_bucket_name').get('log')
        if temp_log:
            s3bucket_name_log = temp_log
        elif org_id or no_org_ids:
            s3bucket_name_log = f'{aes_domain_name}-{self.account}-log'
        else:
            print('Using default bucket names')
        temp_snap = self.node.try_get_context('s3_bucket_name').get('snapshot')
        if temp_snap:
            s3bucket_name_snapshot = temp_snap
        else:
            print('Using default bucket names')
        kms_cmk_alias = self.node.try_get_context('kms_cmk_alias')
        if not kms_cmk_alias:
            kms_cmk_alias = 'aes-siem-key'
            print('Using default key alais')

        ######################################################################
        # deploy VPC when context is defined as using VPC
        ######################################################################
        # vpc_type is 'new' or 'import' or None
        vpc_type = self.node.try_get_context('vpc_type')

        if vpc_type == 'new':
            is_vpc = True
            vpc_cidr = self.node.try_get_context('new_vpc_nw_cidr_block')
            subnet_cidr_mask = int(
                self.node.try_get_context('new_vpc_subnet_cidr_mask'))
            is_vpc = True
            # VPC
            vpc_aes_siem = aws_ec2.Vpc(
                self, 'VpcAesSiem', cidr=vpc_cidr,
                max_azs=3, nat_gateways=0,
                subnet_configuration=[
                    aws_ec2.SubnetConfiguration(
                        subnet_type=aws_ec2.SubnetType.ISOLATED,
                        name='aes-siem-subnet', cidr_mask=subnet_cidr_mask)])
            subnet1 = vpc_aes_siem.isolated_subnets[0]
            subnets = [{'subnet_type': aws_ec2.SubnetType.ISOLATED}]
            vpc_subnets = aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED)
            vpc_aes_siem_opt = vpc_aes_siem.node.default_child.cfn_options
            vpc_aes_siem_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
            for subnet in vpc_aes_siem.isolated_subnets:
                subnet_opt = subnet.node.default_child.cfn_options
                subnet_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
        elif vpc_type == 'import':
            vpc_id = self.node.try_get_context('imported_vpc_id')
            vpc_aes_siem = aws_ec2.Vpc.from_lookup(
                self, 'VpcAesSiem', vpc_id=vpc_id)

            subnet_ids = get_subnet_ids(self)
            subnets = []
            for number, subnet_id in enumerate(subnet_ids, 1):
                obj_id = 'Subenet' + str(number)
                subnet = aws_ec2.Subnet.from_subnet_id(self, obj_id, subnet_id)
                subnets.append(subnet)
            subnet1 = subnets[0]
            vpc_subnets = aws_ec2.SubnetSelection(subnets=subnets)

        if vpc_type:
            is_vpc = True
            # Security Group
            sg_vpc_noinbound_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcNoinboundSecurityGroup',
                security_group_name='aes-siem-noinbound-vpc-sg',
                vpc=vpc_aes_siem)

            sg_vpc_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcSecurityGroup',
                security_group_name='aes-siem-vpc-sg',
                vpc=vpc_aes_siem)
            sg_vpc_aes_siem.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(vpc_aes_siem.vpc_cidr_block),
                connection=aws_ec2.Port.tcp(443),)
            sg_vpc_opt = sg_vpc_aes_siem.node.default_child.cfn_options
            sg_vpc_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

            # VPC Endpoint
            vpc_aes_siem.add_gateway_endpoint(
                'S3Endpoint', service=aws_ec2.GatewayVpcEndpointAwsService.S3,
                subnets=subnets)
            vpc_aes_siem.add_interface_endpoint(
                'SQSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.SQS,)
            vpc_aes_siem.add_interface_endpoint(
                'KMSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.KMS,)
        else:
            is_vpc = False

        is_vpc = core.CfnCondition(
            self, 'IsVpc', expression=core.Fn.condition_equals(is_vpc, True))
        """
        CloudFormation実行時の条件式の書き方
        ClassのBasesが aws_cdk.core.Resource の時は、
        node.default_child.cfn_options.condition = is_vpc
        ClassのBasesが aws_cdk.core.CfnResource の時は、
        cfn_options.condition = is_vpc
        """

        ######################################################################
        # create cmk of KMS to encrypt S3 bucket
        ######################################################################
        kms_aes_siem = aws_kms.Key(
            self, 'KmsAesSiemLog', description='CMK for SIEM solution',
            removal_policy=core.RemovalPolicy.RETAIN)

        aws_kms.Alias(
            self, 'KmsAesSiemLogAlias', alias_name=kms_cmk_alias,
            target_key=kms_aes_siem,
            removal_policy=core.RemovalPolicy.RETAIN)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow GuardDuty to use the key',
                actions=['kms:GenerateDataKey'],
                principals=[aws_iam.ServicePrincipal(
                    'guardduty.amazonaws.com')],
                resources=['*'],),)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow VPC Flow Logs to use the key',
                actions=['kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*',
                         'kms:GenerateDataKey*', 'kms:DescribeKey'],
                principals=[aws_iam.ServicePrincipal(
                    'delivery.logs.amazonaws.com')],
                resources=['*'],),)
        # basic policy
        key_policy_basic1 = aws_iam.PolicyStatement(
            sid='Allow principals in the account to decrypt log files',
            actions=['kms:DescribeKey', 'kms:ReEncryptFrom'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_basic1)

        # for Athena
        key_policy_athena = aws_iam.PolicyStatement(
            sid='Allow Athena to query s3 objects with this key',
            actions=['kms:Decrypt', 'kms:DescribeKey', 'kms:Encrypt',
                     'kms:GenerateDataKey*', 'kms:ReEncrypt*'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],
            conditions={'ForAnyValue:StringEquals': {
                'aws:CalledVia': 'athena.amazonaws.com'}})
        kms_aes_siem.add_to_resource_policy(key_policy_athena)

        # for CloudTrail
        key_policy_trail1 = aws_iam.PolicyStatement(
            sid='Allow CloudTrail to describe key',
            actions=['kms:DescribeKey'],
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_trail1)

        key_policy_trail2 = aws_iam.PolicyStatement(
            sid=('Allow CloudTrail to encrypt logs'),
            actions=['kms:GenerateDataKey*'],
            principals=[aws_iam.ServicePrincipal(
                'cloudtrail.amazonaws.com')],
            resources=['*'],
            conditions={'StringLike': {
                'kms:EncryptionContext:aws:cloudtrail:arn': [
                    f'arn:aws:cloudtrail:*:{core.Aws.ACCOUNT_ID}:trail/*']}})
        kms_aes_siem.add_to_resource_policy(key_policy_trail2)

        ######################################################################
        # create s3 bucket
        ######################################################################
        block_pub = aws_s3.BlockPublicAccess(
            block_public_acls=True,
            ignore_public_acls=True,
            block_public_policy=True,
            restrict_public_buckets=True
        )
        s3_geo = aws_s3.Bucket(
            self, 'S3BucketForGeoip', block_public_access=block_pub,
            bucket_name=s3bucket_name_geo,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for log collector
        s3_log = aws_s3.Bucket(
            self, 'S3BucketForLog', block_public_access=block_pub,
            bucket_name=s3bucket_name_log, versioned=True,
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for aes snapshot
        s3_snapshot = aws_s3.Bucket(
            self, 'S3BucketForSnapshot', block_public_access=block_pub,
            bucket_name=s3bucket_name_snapshot,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        ######################################################################
        # IAM Role
        ######################################################################
        # delopyment policy for lambda deploy-aes
        arn_prefix = f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
        loggroup_aes = f'log-group:/aws/aes/domains/{aes_domain_name}/*'
        loggroup_opensearch = (
            f'log-group:/aws/OpenSearchService/domains/{aes_domain_name}/*')
        loggroup_lambda = 'log-group:/aws/lambda/aes-siem-*'
        policydoc_create_loggroup = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:PutResourcePolicy',
                        'logs:DescribeLogGroups',
                        'logs:DescribeLogStreams'
                    ],
                    resources=[f'{arn_prefix}:*', ]
                ),
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:CreateLogGroup', 'logs:CreateLogStream',
                        'logs:PutLogEvents', 'logs:PutRetentionPolicy'],
                    resources=[
                        f'{arn_prefix}:{loggroup_aes}',
                        f'{arn_prefix}:{loggroup_opensearch}',
                        f'{arn_prefix}:{loggroup_lambda}',
                    ],
                )
            ]
        )

        policydoc_crhelper = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'lambda:AddPermission',
                        'lambda:RemovePermission',
                        'events:ListRules',
                        'events:PutRule',
                        'events:DeleteRule',
                        'events:PutTargets',
                        'events:RemoveTargets'],
                    resources=['*']
                )
            ]
        )

        # snaphot rule for AES
        policydoc_snapshot = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['s3:ListBucket'],
                    resources=[s3_snapshot.bucket_arn]
                ),
                aws_iam.PolicyStatement(
                    actions=['s3:GetObject', 's3:PutObject',
                             's3:DeleteObject'],
                    resources=[s3_snapshot.bucket_arn + '/*']
                )
            ]
        )
        aes_siem_snapshot_role = aws_iam.Role(
            self, 'AesSiemSnapshotRole',
            role_name='aes-siem-snapshot-role',
            inline_policies=[policydoc_snapshot, ],
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        policydoc_assume_snapshrole = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['iam:PassRole'],
                    resources=[aes_siem_snapshot_role.role_arn]
                ),
            ]
        )

        aes_siem_deploy_role_for_lambda = aws_iam.Role(
            self, 'AesSiemDeployRoleForLambda',
            role_name='aes-siem-deploy-role-for-lambda',
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonOpenSearchServiceFullAccess'),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
            ],
            inline_policies=[policydoc_assume_snapshrole, policydoc_snapshot,
                             policydoc_create_loggroup, policydoc_crhelper],
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        if vpc_type:
            aes_siem_deploy_role_for_lambda.add_managed_policy(
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaVPCAccessExecutionRole')
            )

        # for alert from Amazon OpenSearch Service
        aes_siem_sns_role = aws_iam.Role(
            self, 'AesSiemSnsRole',
            role_name='aes-siem-sns-role',
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        # EC2 role
        aes_siem_es_loader_ec2_role = aws_iam.Role(
            self, 'AesSiemEsLoaderEC2Role',
            role_name='aes-siem-es-loader-for-ec2',
            assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com'),
        )

        aws_iam.CfnInstanceProfile(
            self, 'AesSiemEsLoaderEC2InstanceProfile',
            instance_profile_name=aes_siem_es_loader_ec2_role.role_name,
            roles=[aes_siem_es_loader_ec2_role.role_name]
        )

        ######################################################################
        # in VPC
        ######################################################################
        aes_role_exist = check_iam_role('/aws-service-role/es.amazonaws.com/')
        if vpc_type and not aes_role_exist:
            slr_aes = aws_iam.CfnServiceLinkedRole(
                self, 'AWSServiceRoleForAmazonOpenSearchService',
                aws_service_name='es.amazonaws.com',
                description='Created by cloudformation of siem stack'
            )
            slr_aes.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # SQS for es-laoder's DLQ
        ######################################################################
        sqs_aes_siem_dlq = aws_sqs.Queue(
            self, 'AesSiemDlq', queue_name='aes-siem-dlq',
            retention_period=core.Duration.days(14))

        sqs_aes_siem_splitted_logs = aws_sqs.Queue(
            self, 'AesSiemSqsSplitLogs',
            queue_name='aes-siem-sqs-splitted-logs',
            dead_letter_queue=aws_sqs.DeadLetterQueue(
                max_receive_count=2, queue=sqs_aes_siem_dlq),
            visibility_timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            retention_period=core.Duration.days(14))

        ######################################################################
        # Setup Lambda
        ######################################################################
        # setup lambda of es_loader
        lambda_es_loader_vpc_kwargs = {}
        if vpc_type:
            lambda_es_loader_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': vpc_subnets,
            }

        lambda_es_loader = aws_lambda.Function(
            self, 'LambdaEsLoader', **lambda_es_loader_vpc_kwargs,
            function_name='aes-siem-es-loader',
            description=f'{SOLUTION_NAME} / es-loader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/es_loader.zip'),
            code=aws_lambda.Code.asset('../lambda/es_loader'),
            handler='index.lambda_handler',
            memory_size=2048,
            timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            reserved_concurrent_executions=(
                reserved_concurrency.value_as_number),
            dead_letter_queue_enabled=True,
            dead_letter_queue=sqs_aes_siem_dlq,
            environment={
                'GEOIP_BUCKET': s3bucket_name_geo, 'LOG_LEVEL': 'info',
                'POWERTOOLS_LOGGER_LOG_EVENT': 'false',
                'POWERTOOLS_SERVICE_NAME': 'es-loader',
                'POWERTOOLS_METRICS_NAMESPACE': 'SIEM'})
        es_loader_newver = lambda_es_loader.add_version(
            name=__version__, description=__version__)
        es_loader_opt = es_loader_newver.node.default_child.cfn_options
        es_loader_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # send only
        # sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage')
        # send and reieve. but it must be loop
        sqs_aes_siem_dlq.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        sqs_aes_siem_splitted_logs.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        lambda_es_loader.add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                sqs_aes_siem_splitted_logs, batch_size=1))

        # es-loaer on EC2 role
        sqs_aes_siem_dlq.grant(
            aes_siem_es_loader_ec2_role, 'sqs:GetQueue*', 'sqs:ListQueues*',
            'sqs:ReceiveMessage*', 'sqs:DeleteMessage*')

        lambda_geo = aws_lambda.Function(
            self, 'LambdaGeoipDownloader',
            function_name='aes-siem-geoip-downloader',
            description=f'{SOLUTION_NAME} / geoip-downloader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/geoip_downloader'),
            handler='index.lambda_handler',
            memory_size=320,
            timeout=core.Duration.seconds(300),
            environment={
                's3bucket_name': s3bucket_name_geo,
                'license_key': geoip_license_key.value_as_string,
            }
        )
        lambda_geo_newver = lambda_geo.add_version(
            name=__version__, description=__version__)
        lamba_geo_opt = lambda_geo_newver.node.default_child.cfn_options
        lamba_geo_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # setup OpenSearch Service
        ######################################################################
        lambda_deploy_es = aws_lambda.Function(
            self, 'LambdaDeployAES',
            function_name='aes-siem-deploy-aes',
            description=f'{SOLUTION_NAME} / opensearch domain deployment',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/deploy_es.zip'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_domain_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_deploy_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_deploy_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_deploy_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_deploy_es.add_environment('vpc_subnet_id', 'None')
            lambda_deploy_es.add_environment('security_group_id', 'None')
        deploy_es_newver = lambda_deploy_es.add_version(
            name=__version__, description=__version__)
        deploy_es_opt = deploy_es_newver.node.default_child.cfn_options
        deploy_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # execute lambda_deploy_es to deploy Amaozon ES Domain
        aes_domain = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainDeployedR2',
            service_token=lambda_deploy_es.function_arn,)
        aes_domain.add_override('Properties.ConfigVersion', __version__)

        es_endpoint = aes_domain.get_att('es_endpoint').to_string()
        lambda_es_loader.add_environment('ES_ENDPOINT', es_endpoint)
        lambda_es_loader.add_environment(
            'SQS_SPLITTED_LOGS_URL', sqs_aes_siem_splitted_logs.queue_url)

        lambda_configure_es_vpc_kwargs = {}
        if vpc_type:
            lambda_configure_es_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': aws_ec2.SubnetSelection(subnets=[subnet1, ]), }
        lambda_configure_es = aws_lambda.Function(
            self, 'LambdaConfigureAES', **lambda_configure_es_vpc_kwargs,
            function_name='aes-siem-configure-aes',
            description=f'{SOLUTION_NAME} / opensearch configuration',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_config_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
                'es_endpoint': es_endpoint,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_configure_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_configure_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_configure_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_configure_es.add_environment('vpc_subnet_id', 'None')
            lambda_configure_es.add_environment('security_group_id', 'None')
        configure_es_newver = lambda_configure_es.add_version(
            name=__version__, description=__version__)
        configure_es_opt = configure_es_newver.node.default_child.cfn_options
        configure_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        aes_config = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainConfiguredR2',
            service_token=lambda_configure_es.function_arn,)
        aes_config.add_override('Properties.ConfigVersion', __version__)
        aes_config.add_depends_on(aes_domain)
        aes_config.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        es_arn = (f'arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
                  f':domain/{aes_domain_name}')
        # grant permission to es_loader role
        inline_policy_to_load_entries_into_es = aws_iam.Policy(
            self, 'aes-siem-policy-to-load-entries-to-es',
            policy_name='aes-siem-policy-to-load-entries-to-es',
            statements=[
                aws_iam.PolicyStatement(
                    actions=['es:*'],
                    resources=[es_arn + '/*', ]),
            ]
        )
        lambda_es_loader.role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)
        aes_siem_es_loader_ec2_role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)

        # grant additional permission to es_loader role
        additional_kms_cmks = self.node.try_get_context('additional_kms_cmks')
        if additional_kms_cmks:
            inline_policy_access_to_additional_cmks = aws_iam.Policy(
                self, 'access_to_additional_cmks',
                policy_name='access_to_additional_cmks',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['kms:Decrypt'],
                        resources=sorted(set(additional_kms_cmks))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
        additional_buckets = self.node.try_get_context('additional_s3_buckets')

        if additional_buckets:
            buckets_list = []
            for bucket in additional_buckets:
                buckets_list.append(f'arn:aws:s3:::{bucket}')
                buckets_list.append(f'arn:aws:s3:::{bucket}/*')
            inline_policy_access_to_additional_buckets = aws_iam.Policy(
                self, 'access_to_additional_buckets',
                policy_name='access_to_additional_buckets',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['s3:GetObject*', 's3:GetBucket*', 's3:List*'],
                        resources=sorted(set(buckets_list))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)

        kms_aes_siem.grant_decrypt(lambda_es_loader)
        kms_aes_siem.grant_decrypt(aes_siem_es_loader_ec2_role)

        ######################################################################
        # s3 notification and grant permisssion
        ######################################################################
        s3_geo.grant_read_write(lambda_geo)
        s3_geo.grant_read(lambda_es_loader)
        s3_geo.grant_read(aes_siem_es_loader_ec2_role)
        s3_log.grant_read(lambda_es_loader)
        s3_log.grant_read(aes_siem_es_loader_ec2_role)

        # create s3 notification for es_loader
        notification = aws_s3_notifications.LambdaDestination(lambda_es_loader)

        # assign notification for the s3 PUT event type
        # most log system use PUT, but also CLB use POST & Multipart Upload
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='AWSLogs/'))

        # For user logs, not AWS logs
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='UserLogs/'))

        # Download geoip to S3 once by executing lambda_geo
        get_geodb = aws_cloudformation.CfnCustomResource(
            self, 'ExecLambdaGeoipDownloader',
            service_token=lambda_geo.function_arn,)
        get_geodb.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # Download geoip every day at 6PM UTC
        rule = aws_events.Rule(
            self, 'CwlRuleLambdaGeoipDownloaderDilly',
            schedule=aws_events.Schedule.rate(core.Duration.hours(12)))
        rule.add_target(aws_events_targets.LambdaFunction(lambda_geo))

        ######################################################################
        # bucket policy
        ######################################################################
        s3_awspath = s3_log.bucket_arn + '/AWSLogs/' + core.Aws.ACCOUNT_ID
        bucket_policy_common1 = aws_iam.PolicyStatement(
            sid='ELB Policy',
            principals=[aws_iam.AccountPrincipal(
                account_id=region_mapping.find_in_map(
                    core.Aws.REGION, 'ElbV2AccountId'))],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],)
        # NLB / ALB / R53resolver / VPC Flow Logs
        bucket_policy_elb1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_elb2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_common1)
        s3_log.add_to_resource_policy(bucket_policy_elb1)
        s3_log.add_to_resource_policy(bucket_policy_elb2)

        # CloudTrail
        bucket_policy_trail1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For Cloudtrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:GetBucketAcl'], resources=[s3_log.bucket_arn],)
        bucket_policy_trail2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For CloudTrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_trail1)
        s3_log.add_to_resource_policy(bucket_policy_trail2)

        # GuardDuty
        bucket_policy_gd1 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to use the getBucketLocation operation',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:GetBucketLocation'], resources=[s3_log.bucket_arn],)
        bucket_policy_gd2 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to upload objects to the bucket',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_log.bucket_arn + '/*'],)
        bucket_policy_gd5 = aws_iam.PolicyStatement(
            sid='Deny non-HTTPS access', effect=aws_iam.Effect.DENY,
            actions=['s3:*'], resources=[s3_log.bucket_arn + '/*'],
            conditions={'Bool': {'aws:SecureTransport': 'false'}})
        bucket_policy_gd5.add_any_principal()
        s3_log.add_to_resource_policy(bucket_policy_gd1)
        s3_log.add_to_resource_policy(bucket_policy_gd2)
        s3_log.add_to_resource_policy(bucket_policy_gd5)

        # Config
        bucket_policy_config1 = aws_iam.PolicyStatement(
            sid='AWSConfig BucketPermissionsCheck and BucketExistenceCheck',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_config2 = aws_iam.PolicyStatement(
            sid='AWSConfigBucketDelivery',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/Config/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_config1)
        s3_log.add_to_resource_policy(bucket_policy_config2)

        # geoip
        bucket_policy_geo1 = aws_iam.PolicyStatement(
            sid='Allow geoip downloader and es-loader to read/write',
            principals=[lambda_es_loader.role, lambda_geo.role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_geo.bucket_arn + '/*'],)
        s3_geo.add_to_resource_policy(bucket_policy_geo1)

        # ES Snapshot
        bucket_policy_snapshot = aws_iam.PolicyStatement(
            sid='Allow ES to store snapshot',
            principals=[aes_siem_snapshot_role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_snapshot.bucket_arn + '/*'],)
        s3_snapshot.add_to_resource_policy(bucket_policy_snapshot)

        ######################################################################
        # for multiaccount / organizaitons
        ######################################################################
        if org_id or no_org_ids:
            ##################################################################
            # KMS key policy for multiaccount / organizaitons
            ##################################################################
            # for CloudTrail
            cond_tail2 = self.make_resource_list(
                path='arn:aws:cloudtrail:*:', tail=':trail/*',
                keys=self.list_without_none(org_mgmt_id, no_org_ids))
            key_policy_mul_trail2 = aws_iam.PolicyStatement(
                sid=('Allow CloudTrail to encrypt logs for multiaccounts'),
                actions=['kms:GenerateDataKey*'],
                principals=[aws_iam.ServicePrincipal(
                    'cloudtrail.amazonaws.com')],
                resources=['*'],
                conditions={'StringLike': {
                    'kms:EncryptionContext:aws:cloudtrail:arn': cond_tail2}})
            kms_aes_siem.add_to_resource_policy(key_policy_mul_trail2)

            # for replicaiton
            key_policy_rep1 = aws_iam.PolicyStatement(
                sid=('Enable cross account encrypt access for S3 Cross Region '
                     'Replication'),
                actions=['kms:Encrypt'],
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                resources=['*'],)
            kms_aes_siem.add_to_resource_policy(key_policy_rep1)

            ##################################################################
            # Buckdet Policy for multiaccount / organizaitons
            ##################################################################
            s3_log_bucket_arn = 'arn:aws:s3:::' + s3bucket_name_log

            # for CloudTrail
            s3_mulpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_org_trail = aws_iam.PolicyStatement(
                sid='AWSCloudTrailWrite for Multiaccounts / Organizations',
                principals=[
                    aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_mulpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_org_trail)

            # config
            s3_conf_multpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/Config/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_mul_config2 = aws_iam.PolicyStatement(
                sid='AWSConfigBucketDelivery',
                principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_conf_multpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_mul_config2)

            # for replication
            bucket_policy_rep1 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on objects',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:ReplicateDelete', 's3:ReplicateObject',
                         's3:ReplicateTags', 's3:GetObjectVersionTagging',
                         's3:ObjectOwnerOverrideToBucketOwner'],
                resources=[f'{s3_log_bucket_arn}/*'])
            bucket_policy_rep2 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on bucket',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:List*', 's3:GetBucketVersioning',
                         's3:PutBucketVersioning'],
                resources=[f'{s3_log_bucket_arn}'])
            s3_log.add_to_resource_policy(bucket_policy_rep1)
            s3_log.add_to_resource_policy(bucket_policy_rep2)

        ######################################################################
        # SNS topic for Amazon OpenSearch Service Alert
        ######################################################################
        sns_topic = aws_sns.Topic(
            self, 'SnsTopic', topic_name='aes-siem-alert',
            display_name='AES SIEM')

        sns_topic.add_subscription(aws_sns_subscriptions.EmailSubscription(
            email_address=sns_email.value_as_string))
        sns_topic.grant_publish(aes_siem_sns_role)

        ######################################################################
        # output of CFn
        ######################################################################
        kibanaurl = f'https://{es_endpoint}/_dashboards/'
        kibanaadmin = aes_domain.get_att('kibanaadmin').to_string()
        kibanapass = aes_domain.get_att('kibanapass').to_string()

        core.CfnOutput(self, 'RoleDeploy', export_name='role-deploy',
                       value=aes_siem_deploy_role_for_lambda.role_arn)
        core.CfnOutput(self, 'DashboardsUrl', export_name='dashboards-url',
                       value=kibanaurl)
        core.CfnOutput(self, 'DashboardsPassword',
                       export_name='dashboards-pass', value=kibanapass,
                       description=('Please change the password in OpenSearch '
                                    'Dashboards ASAP'))
        core.CfnOutput(self, 'DashboardsAdminID',
                       export_name='dashboards-admin', value=kibanaadmin)
    def __init__(self, scope: core.Construct, id: str,
                 infra: RtspBaseResourcesConstruct, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create the inventory bucket...
        self.inventories = s3.Bucket(
            self,
            'InventoryBucket',
            bucket_name='homenet-{}.rtsp-inventories.{}.virtual.world'.format(
                infra.landing_zone.zone_name,
                core.Stack.of(self).region).lower(),
            removal_policy=core.RemovalPolicy.DESTROY,
            cors=[
                s3.CorsRule(allowed_methods=[s3.HttpMethods.GET],
                            allowed_origins=['*'])
            ],
            lifecycle_rules=[
                s3.LifecycleRule(
                    id='Transition-to-IA-after-30D',
                    prefix='eufy/',
                    abort_incomplete_multipart_upload_after=core.Duration.days(
                        7),
                    transitions=[
                        s3.Transition(
                            storage_class=s3.StorageClass.INFREQUENT_ACCESS,
                            transition_after=core.Duration.days(30))
                    ])
            ])

        # Create inventory collections for the Eufy Homebases...
        infra.bucket.add_inventory(
            objects_prefix='eufy/',
            inventory_id='{}-InventoryReport'.format('EufyFull'),
            format=s3.InventoryFormat.CSV,
            frequency=s3.InventoryFrequency.DAILY,
            include_object_versions=s3.InventoryObjectVersion.CURRENT,
            destination=s3.InventoryDestination(
                bucket=self.inventories,
                bucket_owner=core.Aws.ACCOUNT_ID,
                prefix=None))

        for base_name in ['Moonbase', 'Starbase']:
            prefix = 'eufy/{}.cameras.real.world/'.format(base_name).lower()
            infra.bucket.add_inventory(
                objects_prefix=prefix,
                inventory_id='{}-InventoryReport'.format(base_name),
                format=s3.InventoryFormat.CSV,
                frequency=s3.InventoryFrequency.DAILY,
                include_object_versions=s3.InventoryObjectVersion.CURRENT,
                destination=s3.InventoryDestination(
                    bucket=self.inventories,
                    bucket_owner=core.Aws.ACCOUNT_ID,
                    prefix=None))

        # Broadcast inventory creation events...
        self.inventoryAvailable = sns.Topic(
            self,
            'InventoryAvailable',
            display_name='HomeNet-{}-Rtsp-InventoryAvailable'.format(
                infra.landing_zone.zone_name),
            topic_name='HomeNet-{}-Rtsp-InventoryAvailable'.format(
                infra.landing_zone.zone_name))

        self.inventories.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            s3n.SnsDestination(topic=self.inventoryAvailable),
            s3.NotificationKeyFilter(suffix='manifest.json'))

        # Attach debug queue to the notification
        self.inventoryAvailable.add_subscription(
            subs.SqsSubscription(
                raw_message_delivery=True,
                queue=sqs.Queue(
                    self,
                    'InventoryDebugQueue',
                    removal_policy=core.RemovalPolicy.DESTROY,
                    retention_period=core.Duration.days(7),
                    queue_name='HomeNet-{}-RtspInventoryAvailable_Debug'.
                    format(infra.landing_zone.zone_name))))

        # Subscribe the GroundTruth Manifest Generator
        groundtruth = RtspGroundTruthManifestGenerationFunction(
            self,
            'GroundTruthManifest',
            infra=infra,
            topic=self.inventoryAvailable)

        self.inventories.grant_read_write(groundtruth.function.role)

        # Create the RtspNormalizeImage S3 Object Lambda
        RtspNormalizeImageAccessPoint(scope=self,
                                      id='NormalizedImage',
                                      infra=infra)
Ejemplo n.º 18
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        # Image Bucket
        image_bucket = s3.Bucket(self,
                                 IMG_BUCKET_NAME,
                                 removal_policy=cdk.RemovalPolicy.DESTROY)
        cdk.CfnOutput(self, "imageBucket", value=image_bucket.bucket_name)

        image_bucket.add_cors_rule(
            allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT],
            allowed_origins=["*"],
            allowed_headers=["*"],
            max_age=3000,
        )

        # Thumbnail Bucket
        resized_image_bucket = s3.Bucket(
            self,
            RESIZED_IMG_BUCKET_NAME,
            removal_policy=cdk.RemovalPolicy.DESTROY)
        cdk.CfnOutput(self,
                      "resizedBucket",
                      value=resized_image_bucket.bucket_name)

        resized_image_bucket.add_cors_rule(
            allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT],
            allowed_origins=["*"],
            allowed_headers=["*"],
            max_age=3000,
        )
        # S3 Static bucket for website code
        web_bucket = s3.Bucket(
            self,
            WEBSITE_BUCKET_NAME,
            website_index_document="index.html",
            website_error_document="index.html",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            # uncomment this and delete the policy statement below to allow public access to our
            # static website
            # public_read_access=true
        )

        web_policy_statement = iam.PolicyStatement(
            actions=["s3:GetObject"],
            resources=[web_bucket.arn_for_objects("*")],
            principals=[iam.AnyPrincipal()],
            conditions={"IpAddress": {
                "aws:SourceIp": ["139.138.203.36"]
            }},
        )

        web_bucket.add_to_resource_policy(web_policy_statement)

        cdk.CfnOutput(self,
                      "bucketURL",
                      value=web_bucket.bucket_website_domain_name)

        # Deploy site contents to S3 Bucket
        s3_dep.BucketDeployment(
            self,
            "DeployWebsite",
            sources=[s3_dep.Source.asset("./public")],
            destination_bucket=web_bucket,
        )

        # DynamoDB to store image labels
        partition_key = dynamodb.Attribute(name="image",
                                           type=dynamodb.AttributeType.STRING)
        table = dynamodb.Table(
            self,
            "ImageLabels",
            partition_key=partition_key,
            removal_policy=cdk.RemovalPolicy.DESTROY,
        )
        cdk.CfnOutput(self, "ddbTable", value=table.table_name)

        # Lambda layer for Pillow library
        layer = lb.LayerVersion(
            self,
            "pil",
            code=lb.Code.from_asset("reklayer"),
            compatible_runtimes=[lb.Runtime.PYTHON_3_7],
            license="Apache-2.0",
            description=
            "A layer to enable the PIL library in our Rekognition Lambda",
        )

        # Lambda function
        rek_fn = lb.Function(
            self,
            "rekognitionFunction",
            code=lb.Code.from_asset("rekognitionFunction"),
            runtime=lb.Runtime.PYTHON_3_7,
            handler="index.handler",
            timeout=cdk.Duration.seconds(30),
            memory_size=1024,
            layers=[layer],
            environment={
                "TABLE": table.table_name,
                "BUCKET": image_bucket.bucket_name,
                "THUMBBUCKET": resized_image_bucket.bucket_name,
            },
        )

        image_bucket.grant_read(rek_fn)
        resized_image_bucket.grant_write(rek_fn)
        table.grant_write_data(rek_fn)

        rek_fn.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=["rekognition:DetectLabels"],
                                resources=["*"]))

        # Lambda for Synchronous front end
        serviceFn = lb.Function(
            self,
            "serviceFunction",
            code=lb.Code.from_asset("servicelambda"),
            runtime=lb.Runtime.PYTHON_3_7,
            handler="index.handler",
            environment={
                "TABLE": table.table_name,
                "BUCKET": image_bucket.bucket_name,
                "RESIZEDBUCKET": resized_image_bucket.bucket_name,
            },
        )

        image_bucket.grant_write(serviceFn)
        resized_image_bucket.grant_write(serviceFn)
        table.grant_read_write_data(serviceFn)

        # Cognito User Pool Auth
        auto_verified_attrs = cognito.AutoVerifiedAttrs(email=True)
        sign_in_aliases = cognito.SignInAliases(email=True, username=True)
        user_pool = cognito.UserPool(
            self,
            "UserPool",
            self_sign_up_enabled=True,
            auto_verify=auto_verified_attrs,
            sign_in_aliases=sign_in_aliases,
        )

        user_pool_client = cognito.UserPoolClient(self,
                                                  "UserPoolClient",
                                                  user_pool=user_pool,
                                                  generate_secret=False)

        identity_pool = cognito.CfnIdentityPool(
            self,
            "ImageRekognitionIdentityPool",
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[{
                "clientId":
                user_pool_client.user_pool_client_id,
                "providerName":
                user_pool.user_pool_provider_name,
            }],
        )

        # API Gateway
        cors_options = apigw.CorsOptions(allow_origins=apigw.Cors.ALL_ORIGINS,
                                         allow_methods=apigw.Cors.ALL_METHODS)
        api = apigw.LambdaRestApi(
            self,
            "imageAPI",
            default_cors_preflight_options=cors_options,
            handler=serviceFn,
            proxy=False,
        )

        auth = apigw.CfnAuthorizer(
            self,
            "ApiGatewayAuthorizer",
            name="customer-authorizer",
            identity_source="method.request.header.Authorization",
            provider_arns=[user_pool.user_pool_arn],
            rest_api_id=api.rest_api_id,
            # type=apigw.AuthorizationType.COGNITO,
            type="COGNITO_USER_POOLS",
        )

        assumed_by = iam.FederatedPrincipal(
            "cognito-identity.amazon.com",
            conditions={
                "StringEquals": {
                    "cognito-identity.amazonaws.com:aud": identity_pool.ref
                },
                "ForAnyValue:StringLike": {
                    "cognito-identity.amazonaws.com:amr": "authenticated"
                },
            },
            assume_role_action="sts:AssumeRoleWithWebIdentity",
        )
        authenticated_role = iam.Role(
            self,
            "ImageRekognitionAuthenticatedRole",
            assumed_by=assumed_by,
        )
        # IAM policy granting users permission to get and put their pictures
        policy_statement = iam.PolicyStatement(
            actions=["s3:GetObject", "s3:PutObject"],
            effect=iam.Effect.ALLOW,
            resources=[
                image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/*",
                image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/",
                resized_image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/*",
                resized_image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/",
            ],
        )

        # IAM policy granting users permission to list their pictures
        list_policy_statement = iam.PolicyStatement(
            actions=["s3:ListBucket"],
            effect=iam.Effect.ALLOW,
            resources=[
                image_bucket.bucket_arn, resized_image_bucket.bucket_arn
            ],
            conditions={
                "StringLike": {
                    "s3:prefix":
                    ["private/${cognito-identity.amazonaws.com:sub}/*"]
                }
            },
        )

        authenticated_role.add_to_policy(policy_statement)
        authenticated_role.add_to_policy(list_policy_statement)

        # Attach role to our Identity Pool
        cognito.CfnIdentityPoolRoleAttachment(
            self,
            "IdentityPoolRoleAttachment",
            identity_pool_id=identity_pool.ref,
            roles={"authenticated": authenticated_role.role_arn},
        )

        # Get some outputs from cognito
        cdk.CfnOutput(self, "UserPoolId", value=user_pool.user_pool_id)
        cdk.CfnOutput(self,
                      "AppClientId",
                      value=user_pool_client.user_pool_client_id)
        cdk.CfnOutput(self, "IdentityPoolId", value=identity_pool.ref)

        # New Amazon API Gateway with AWS Lambda Integration
        success_response = apigw.IntegrationResponse(
            status_code="200",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": "'*'"
            },
        )
        error_response = apigw.IntegrationResponse(
            selection_pattern="(\n|.)+",
            status_code="500",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": "'*'"
            },
        )

        request_template = json.dumps({
            "action":
            "$util.escapeJavaScript($input.params('action'))",
            "key":
            "$util.escapeJavaScript($input.params('key'))",
        })

        lambda_integration = apigw.LambdaIntegration(
            serviceFn,
            proxy=False,
            request_parameters={
                "integration.request.querystring.action":
                "method.request.querystring.action",
                "integration.request.querystring.key":
                "method.request.querystring.key",
            },
            request_templates={"application/json": request_template},
            passthrough_behavior=apigw.PassthroughBehavior.WHEN_NO_TEMPLATES,
            integration_responses=[success_response, error_response],
        )

        imageAPI = api.root.add_resource("images")

        success_resp = apigw.MethodResponse(
            status_code="200",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": True
            },
        )
        error_resp = apigw.MethodResponse(
            status_code="500",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": True
            },
        )

        # GET /images
        get_method = imageAPI.add_method(
            "GET",
            lambda_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            request_parameters={
                "method.request.querystring.action": True,
                "method.request.querystring.key": True,
            },
            method_responses=[success_resp, error_resp],
        )
        # DELETE /images
        delete_method = imageAPI.add_method(
            "DELETE",
            lambda_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            request_parameters={
                "method.request.querystring.action": True,
                "method.request.querystring.key": True,
            },
            method_responses=[success_resp, error_resp],
        )

        # Override the authorizer id because it doesn't work when defininting it as a param
        # in add_method
        get_method_resource = get_method.node.find_child("Resource")
        get_method_resource.add_property_override("AuthorizerId", auth.ref)
        delete_method_resource = delete_method.node.find_child("Resource")
        delete_method_resource.add_property_override("AuthorizerId", auth.ref)

        # Building SQS queue and DeadLetter Queue
        dl_queue = sqs.Queue(
            self,
            "ImageDLQueue",
            queue_name="ImageDLQueue",
        )

        dl_queue_opts = sqs.DeadLetterQueue(max_receive_count=2,
                                            queue=dl_queue)

        queue = sqs.Queue(
            self,
            "ImageQueue",
            queue_name="ImageQueue",
            visibility_timeout=cdk.Duration.seconds(30),
            receive_message_wait_time=cdk.Duration.seconds(20),
            dead_letter_queue=dl_queue_opts,
        )

        # S3 Bucket Create Notification to SQS
        # Whenever an image is uploaded add it to the queue

        image_bucket.add_object_created_notification(
            s3n.SqsDestination(queue),
            s3.NotificationKeyFilter(prefix="private/"))
Ejemplo n.º 19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = aws_ec2.Vpc(
            self,
            "OctemberVPC",
            max_azs=2,
            #      subnet_configuration=[{
            #          "cidrMask": 24,
            #          "name": "Public",
            #          "subnetType": aws_ec2.SubnetType.PUBLIC,
            #        },
            #        {
            #          "cidrMask": 24,
            #          "name": "Private",
            #          "subnetType": aws_ec2.SubnetType.PRIVATE
            #        },
            #        {
            #          "cidrMask": 28,
            #          "name": "Isolated",
            #          "subnetType": aws_ec2.SubnetType.ISOLATED,
            #          "reserved": True
            #        }
            #      ],
            gateway_endpoints={
                "S3":
                aws_ec2.GatewayVpcEndpointOptions(
                    service=aws_ec2.GatewayVpcEndpointAwsService.S3)
            })

        dynamo_db_endpoint = vpc.add_gateway_endpoint(
            "DynamoDbEndpoint",
            service=aws_ec2.GatewayVpcEndpointAwsService.DYNAMODB)

        s3_bucket = s3.Bucket(
            self,
            "s3bucket",
            bucket_name="octember-bizcard-{region}-{account}".format(
                region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID))

        api = apigw.RestApi(
            self,
            "BizcardImageUploader",
            rest_api_name="BizcardImageUploader",
            description="This service serves uploading bizcard images into s3.",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            binary_media_types=["image/png", "image/jpg"],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        rest_api_role = aws_iam.Role(
            self,
            "ApiGatewayRoleForS3",
            role_name="ApiGatewayRoleForS3FullAccess",
            assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3FullAccess")
            ])

        list_objects_responses = [
            apigw.IntegrationResponse(
                status_code="200",
                #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html#aws_cdk.aws_apigateway.IntegrationResponse.response_parameters
                # The response parameters from the backend response that API Gateway sends to the method response.
                # Use the destination as the key and the source as the value:
                #  - The destination must be an existing response parameter in the MethodResponse property.
                #  - The source must be an existing method request parameter or a static value.
                response_parameters={
                    'method.response.header.Timestamp':
                    'integration.response.header.Date',
                    'method.response.header.Content-Length':
                    'integration.response.header.Content-Length',
                    'method.response.header.Content-Type':
                    'integration.response.header.Content-Type'
                }),
            apigw.IntegrationResponse(status_code="400",
                                      selection_pattern="4\d{2}"),
            apigw.IntegrationResponse(status_code="500",
                                      selection_pattern="5\d{2}")
        ]

        list_objects_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses)

        get_s3_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path='/',
            options=list_objects_integration_options)

        api.root.add_method(
            "GET",
            get_s3_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={'method.request.header.Content-Type': False})

        get_s3_folder_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses,
            #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html#aws_cdk.aws_apigateway.IntegrationOptions.request_parameters
            # Specify request parameters as key-value pairs (string-to-string mappings), with a destination as the key and a source as the value.
            # The source must be an existing method request parameter or a static value.
            request_parameters={
                "integration.request.path.bucket": "method.request.path.folder"
            })

        get_s3_folder_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{bucket}",
            options=get_s3_folder_integration_options)

        s3_folder = api.root.add_resource('{folder}')
        s3_folder.add_method(
            "GET",
            get_s3_folder_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True
            })

        get_s3_item_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=list_objects_responses,
            request_parameters={
                "integration.request.path.bucket":
                "method.request.path.folder",
                "integration.request.path.object": "method.request.path.item"
            })

        get_s3_item_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="GET",
            path="{bucket}/{object}",
            options=get_s3_item_integration_options)

        s3_item = s3_folder.add_resource('{item}')
        s3_item.add_method(
            "GET",
            get_s3_item_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Timestamp': False,
                        'method.response.header.Content-Length': False,
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True,
                'method.request.path.item': True
            })

        put_s3_item_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[
                apigw.IntegrationResponse(status_code="200"),
                apigw.IntegrationResponse(status_code="400",
                                          selection_pattern="4\d{2}"),
                apigw.IntegrationResponse(status_code="500",
                                          selection_pattern="5\d{2}")
            ],
            request_parameters={
                "integration.request.header.Content-Type":
                "method.request.header.Content-Type",
                "integration.request.path.bucket":
                "method.request.path.folder",
                "integration.request.path.object": "method.request.path.item"
            })

        put_s3_item_integration = apigw.AwsIntegration(
            service="s3",
            integration_http_method="PUT",
            path="{bucket}/{object}",
            options=put_s3_item_integration_options)

        s3_item.add_method(
            "PUT",
            put_s3_item_integration,
            authorization_type=apigw.AuthorizationType.IAM,
            api_key_required=False,
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_parameters={
                        'method.response.header.Content-Type': False
                    },
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.folder': True,
                'method.request.path.item': True
            })

        ddb_table = dynamodb.Table(
            self,
            "BizcardImageMetaInfoDdbTable",
            table_name="OctemberBizcardImgMeta",
            partition_key=dynamodb.Attribute(
                name="image_id", type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PROVISIONED,
            read_capacity=15,
            write_capacity=5)

        img_kinesis_stream = kinesis.Stream(
            self, "BizcardImagePath", stream_name="octember-bizcard-image")

        # create lambda function
        trigger_textract_lambda_fn = _lambda.Function(
            self,
            "TriggerTextExtractorFromImage",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="TriggerTextExtractorFromImage",
            handler="trigger_text_extract_from_s3_image.lambda_handler",
            description="Trigger to extract text from an image in S3",
            code=_lambda.Code.asset(
                "./src/main/python/TriggerTextExtractFromS3Image"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'DDB_TABLE_NAME': ddb_table.table_name,
                'KINESIS_STREAM_NAME': img_kinesis_stream.stream_name
            },
            timeout=core.Duration.minutes(5))

        ddb_table_rw_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            resources=[ddb_table.table_arn],
            actions=[
                "dynamodb:BatchGetItem", "dynamodb:Describe*",
                "dynamodb:List*", "dynamodb:GetItem", "dynamodb:Query",
                "dynamodb:Scan", "dynamodb:BatchWriteItem",
                "dynamodb:DeleteItem", "dynamodb:PutItem",
                "dynamodb:UpdateItem", "dax:Describe*", "dax:List*",
                "dax:GetItem", "dax:BatchGetItem", "dax:Query", "dax:Scan",
                "dax:BatchWriteItem", "dax:DeleteItem", "dax:PutItem",
                "dax:UpdateItem"
            ])

        trigger_textract_lambda_fn.add_to_role_policy(
            ddb_table_rw_policy_statement)
        trigger_textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[img_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:Get*", "kinesis:List*",
                                        "kinesis:Describe*",
                                        "kinesis:PutRecord",
                                        "kinesis:PutRecords"
                                    ]))

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3_event_filter = s3.NotificationKeyFilter(prefix="bizcard-raw-img/",
                                                   suffix=".jpg")
        s3_event_source = S3EventSource(s3_bucket,
                                        events=[s3.EventType.OBJECT_CREATED],
                                        filters=[s3_event_filter])
        trigger_textract_lambda_fn.add_event_source(s3_event_source)

        #XXX: https://github.com/aws/aws-cdk/issues/2240
        # To avoid to create extra Lambda Functions with names like LogRetentionaae0aa3c5b4d4f87b02d85b201efdd8a
        # if log_retention=aws_logs.RetentionDays.THREE_DAYS is added to the constructor props
        log_group = aws_logs.LogGroup(
            self,
            "TriggerTextractLogGroup",
            log_group_name="/aws/lambda/TriggerTextExtractorFromImage",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(trigger_textract_lambda_fn)

        text_kinesis_stream = kinesis.Stream(
            self, "BizcardTextData", stream_name="octember-bizcard-txt")

        textract_lambda_fn = _lambda.Function(
            self,
            "GetTextFromImage",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="GetTextFromImage",
            handler="get_text_from_s3_image.lambda_handler",
            description="extract text from an image in S3",
            code=_lambda.Code.asset("./src/main/python/GetTextFromS3Image"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'DDB_TABLE_NAME': ddb_table.table_name,
                'KINESIS_STREAM_NAME': text_kinesis_stream.stream_name
            },
            timeout=core.Duration.minutes(5))

        textract_lambda_fn.add_to_role_policy(ddb_table_rw_policy_statement)
        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[text_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:Get*", "kinesis:List*",
                                        "kinesis:Describe*",
                                        "kinesis:PutRecord",
                                        "kinesis:PutRecords"
                                    ]))

        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        textract_lambda_fn.add_to_role_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["textract:*"]))

        img_kinesis_event_source = KinesisEventSource(
            img_kinesis_stream,
            batch_size=100,
            starting_position=_lambda.StartingPosition.LATEST)
        textract_lambda_fn.add_event_source(img_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "GetTextFromImageLogGroup",
            log_group_name="/aws/lambda/GetTextFromImage",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(textract_lambda_fn)

        sg_use_bizcard_es = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard elasticsearch client',
            security_group_name='use-octember-bizcard-es')
        core.Tags.of(sg_use_bizcard_es).add('Name', 'use-octember-bizcard-es')

        sg_bizcard_es = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard elasticsearch',
            security_group_name='octember-bizcard-es')
        core.Tags.of(sg_bizcard_es).add('Name', 'octember-bizcard-es')

        sg_bizcard_es.add_ingress_rule(peer=sg_bizcard_es,
                                       connection=aws_ec2.Port.all_tcp(),
                                       description='octember-bizcard-es')
        sg_bizcard_es.add_ingress_rule(peer=sg_use_bizcard_es,
                                       connection=aws_ec2.Port.all_tcp(),
                                       description='use-octember-bizcard-es')

        sg_ssh_access = aws_ec2.SecurityGroup(
            self,
            "BastionHostSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for bastion host',
            security_group_name='octember-bastion-host-sg')
        core.Tags.of(sg_ssh_access).add('Name', 'octember-bastion-host')
        sg_ssh_access.add_ingress_rule(peer=aws_ec2.Peer.any_ipv4(),
                                       connection=aws_ec2.Port.tcp(22),
                                       description='ssh access')

        bastion_host = aws_ec2.BastionHostLinux(
            self,
            "BastionHost",
            vpc=vpc,
            instance_type=aws_ec2.InstanceType('t3.nano'),
            security_group=sg_ssh_access,
            subnet_selection=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))
        bastion_host.instance.add_security_group(sg_use_bizcard_es)

        #XXX: aws cdk elastsearch example - https://github.com/aws/aws-cdk/issues/2873
        es_cfn_domain = aws_elasticsearch.CfnDomain(
            self,
            'BizcardSearch',
            elasticsearch_cluster_config={
                "dedicatedMasterCount": 3,
                "dedicatedMasterEnabled": True,
                "dedicatedMasterType": "t2.medium.elasticsearch",
                "instanceCount": 2,
                "instanceType": "t2.medium.elasticsearch",
                "zoneAwarenessEnabled": True
            },
            ebs_options={
                "ebsEnabled": True,
                "volumeSize": 10,
                "volumeType": "gp2"
            },
            domain_name="octember-bizcard",
            elasticsearch_version="7.9",
            encryption_at_rest_options={"enabled": False},
            access_policies={
                "Version":
                "2012-10-17",
                "Statement": [{
                    "Effect":
                    "Allow",
                    "Principal": {
                        "AWS": "*"
                    },
                    "Action":
                    ["es:Describe*", "es:List*", "es:Get*", "es:ESHttp*"],
                    "Resource":
                    self.format_arn(service="es",
                                    resource="domain",
                                    resource_name="octember-bizcard/*")
                }]
            },
            snapshot_options={"automatedSnapshotStartHour": 17},
            vpc_options={
                "securityGroupIds": [sg_bizcard_es.security_group_id],
                "subnetIds":
                vpc.select_subnets(
                    subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids
            })
        core.Tags.of(es_cfn_domain).add('Name', 'octember-bizcard-es')

        s3_lib_bucket_name = self.node.try_get_context("lib_bucket_name")

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        s3_lib_bucket = s3.Bucket.from_bucket_name(self, id,
                                                   s3_lib_bucket_name)
        es_lib_layer = _lambda.LayerVersion(
            self,
            "ESLib",
            layer_version_name="es-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket,
                                          "var/octember-es-lib.zip"))

        redis_lib_layer = _lambda.LayerVersion(
            self,
            "RedisLib",
            layer_version_name="redis-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(s3_lib_bucket,
                                          "var/octember-redis-lib.zip"))

        #XXX: Deploy lambda in VPC - https://github.com/aws/aws-cdk/issues/1342
        upsert_to_es_lambda_fn = _lambda.Function(
            self,
            "UpsertBizcardToES",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertBizcardToElasticSearch",
            handler="upsert_bizcard_to_es.lambda_handler",
            description="Upsert bizcard text into elasticsearch",
            code=_lambda.Code.asset("./src/main/python/UpsertBizcardToES"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                'ES_INDEX': 'octember_bizcard',
                'ES_TYPE': 'bizcard'
            },
            timeout=core.Duration.minutes(5),
            layers=[es_lib_layer],
            security_groups=[sg_use_bizcard_es],
            vpc=vpc)

        text_kinesis_event_source = KinesisEventSource(
            text_kinesis_stream,
            batch_size=99,
            starting_position=_lambda.StartingPosition.LATEST)
        upsert_to_es_lambda_fn.add_event_source(text_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertBizcardToESLogGroup",
            log_group_name="/aws/lambda/UpsertBizcardToElasticSearch",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_es_lambda_fn)

        firehose_role_policy_doc = aws_iam.PolicyDocument()
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        s3_bucket.bucket_arn, "{}/*".format(
                            s3_bucket.bucket_arn)
                    ],
                    "actions": [
                        "s3:AbortMultipartUpload", "s3:GetBucketLocation",
                        "s3:GetObject", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads", "s3:PutObject"
                    ]
                }))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=[
                                        "glue:GetTable",
                                        "glue:GetTableVersion",
                                        "glue:GetTableVersions"
                                    ]))

        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=[text_kinesis_stream.stream_arn],
                                    actions=[
                                        "kinesis:DescribeStream",
                                        "kinesis:GetShardIterator",
                                        "kinesis:GetRecords"
                                    ]))

        firehose_log_group_name = "/aws/kinesisfirehose/octember-bizcard-txt-to-s3"
        firehose_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                effect=aws_iam.Effect.ALLOW,
                #XXX: The ARN will be formatted as follows:
                # arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
                resources=[
                    self.format_arn(service="logs",
                                    resource="log-group",
                                    resource_name="{}:log-stream:*".format(
                                        firehose_log_group_name),
                                    sep=":")
                ],
                actions=["logs:PutLogEvents"]))

        firehose_role = aws_iam.Role(
            self,
            "FirehoseDeliveryRole",
            role_name="FirehoseDeliveryRole",
            assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={"firehose_role_policy": firehose_role_policy_doc})

        bizcard_text_to_s3_delivery_stream = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "BizcardTextToS3",
            delivery_stream_name="octember-bizcard-txt-to-s3",
            delivery_stream_type="KinesisStreamAsSource",
            kinesis_stream_source_configuration={
                "kinesisStreamArn": text_kinesis_stream.stream_arn,
                "roleArn": firehose_role.role_arn
            },
            extended_s3_destination_configuration={
                "bucketArn": s3_bucket.bucket_arn,
                "bufferingHints": {
                    "intervalInSeconds": 60,
                    "sizeInMBs": 1
                },
                "cloudWatchLoggingOptions": {
                    "enabled": True,
                    "logGroupName": firehose_log_group_name,
                    "logStreamName": "S3Delivery"
                },
                "compressionFormat": "GZIP",
                "prefix": "bizcard-text/",
                "roleArn": firehose_role.role_arn
            })

        sg_use_bizcard_es_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchCacheClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard search query cache client',
            security_group_name='use-octember-bizcard-es-cache')
        core.Tags.of(sg_use_bizcard_es_cache).add(
            'Name', 'use-octember-bizcard-es-cache')

        sg_bizcard_es_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardSearchCacheSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard search query cache',
            security_group_name='octember-bizcard-es-cache')
        core.Tags.of(sg_bizcard_es_cache).add('Name',
                                              'octember-bizcard-es-cache')

        sg_bizcard_es_cache.add_ingress_rule(
            peer=sg_use_bizcard_es_cache,
            connection=aws_ec2.Port.tcp(6379),
            description='use-octember-bizcard-es-cache')

        es_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            "QueryCacheSubnetGroup",
            description="subnet group for octember-bizcard-es-cache",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='octember-bizcard-es-cache')

        es_query_cache = aws_elasticache.CfnCacheCluster(
            self,
            "BizcardSearchQueryCache",
            cache_node_type="cache.t3.small",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.5",
            auto_minor_version_upgrade=False,
            cluster_name="octember-bizcard-es-cache",
            snapshot_retention_limit=3,
            snapshot_window="17:00-19:00",
            preferred_maintenance_window="mon:19:00-mon:20:30",
            #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098
            #cache_subnet_group_name=es_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
            cache_subnet_group_name='octember-bizcard-es-cache',
            vpc_security_group_ids=[sg_bizcard_es_cache.security_group_id])

        #XXX: If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster.
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-cache-cluster.html#cfn-elasticache-cachecluster-cachesubnetgroupname
        es_query_cache.add_depends_on(es_query_cache_subnet_group)

        #XXX: add more than 2 security groups
        # https://github.com/aws/aws-cdk/blob/ea10f0d141a48819ec0000cd7905feda993870a9/packages/%40aws-cdk/aws-lambda/lib/function.ts#L387
        # https://github.com/aws/aws-cdk/issues/1555
        # https://github.com/aws/aws-cdk/pull/5049
        bizcard_search_lambda_fn = _lambda.Function(
            self,
            "BizcardSearchServer",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="BizcardSearchProxy",
            handler="es_search_bizcard.lambda_handler",
            description="Proxy server to search bizcard text",
            code=_lambda.Code.asset("./src/main/python/SearchBizcard"),
            environment={
                'ES_HOST': es_cfn_domain.attr_domain_endpoint,
                'ES_INDEX': 'octember_bizcard',
                'ES_TYPE': 'bizcard',
                'ELASTICACHE_HOST': es_query_cache.attr_redis_endpoint_address
            },
            timeout=core.Duration.minutes(1),
            layers=[es_lib_layer, redis_lib_layer],
            security_groups=[sg_use_bizcard_es, sg_use_bizcard_es_cache],
            vpc=vpc)

        #XXX: create API Gateway + LambdaProxy
        search_api = apigw.LambdaRestApi(
            self,
            "BizcardSearchAPI",
            handler=bizcard_search_lambda_fn,
            proxy=False,
            rest_api_name="BizcardSearch",
            description="This service serves searching bizcard text.",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        bizcard_search = search_api.root.add_resource('search')
        bizcard_search.add_method(
            "GET",
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ])

        sg_use_bizcard_graph_db = aws_ec2.SecurityGroup(
            self,
            "BizcardGraphDbClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard graph db client',
            security_group_name='use-octember-bizcard-neptune')
        core.Tags.of(sg_use_bizcard_graph_db).add(
            'Name', 'use-octember-bizcard-neptune')

        sg_bizcard_graph_db = aws_ec2.SecurityGroup(
            self,
            "BizcardGraphDbSG",
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for octember bizcard graph db',
            security_group_name='octember-bizcard-neptune')
        core.Tags.of(sg_bizcard_graph_db).add('Name',
                                              'octember-bizcard-neptune')

        sg_bizcard_graph_db.add_ingress_rule(
            peer=sg_bizcard_graph_db,
            connection=aws_ec2.Port.tcp(8182),
            description='octember-bizcard-neptune')
        sg_bizcard_graph_db.add_ingress_rule(
            peer=sg_use_bizcard_graph_db,
            connection=aws_ec2.Port.tcp(8182),
            description='use-octember-bizcard-neptune')

        bizcard_graph_db_subnet_group = aws_neptune.CfnDBSubnetGroup(
            self,
            "NeptuneSubnetGroup",
            db_subnet_group_description=
            "subnet group for octember-bizcard-neptune",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            db_subnet_group_name='octember-bizcard-neptune')

        bizcard_graph_db = aws_neptune.CfnDBCluster(
            self,
            "BizcardGraphDB",
            availability_zones=vpc.availability_zones,
            db_subnet_group_name=bizcard_graph_db_subnet_group.
            db_subnet_group_name,
            db_cluster_identifier="octember-bizcard",
            backup_retention_period=1,
            preferred_backup_window="08:45-09:15",
            preferred_maintenance_window="sun:18:00-sun:18:30",
            vpc_security_group_ids=[sg_bizcard_graph_db.security_group_id])
        bizcard_graph_db.add_depends_on(bizcard_graph_db_subnet_group)

        bizcard_graph_db_instance = aws_neptune.CfnDBInstance(
            self,
            "BizcardGraphDBInstance",
            db_instance_class="db.r5.large",
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            availability_zone=vpc.availability_zones[0],
            db_cluster_identifier=bizcard_graph_db.db_cluster_identifier,
            db_instance_identifier="octember-bizcard",
            preferred_maintenance_window="sun:18:00-sun:18:30")
        bizcard_graph_db_instance.add_depends_on(bizcard_graph_db)

        bizcard_graph_db_replica_instance = aws_neptune.CfnDBInstance(
            self,
            "BizcardGraphDBReplicaInstance",
            db_instance_class="db.r5.large",
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            availability_zone=vpc.availability_zones[-1],
            db_cluster_identifier=bizcard_graph_db.db_cluster_identifier,
            db_instance_identifier="octember-bizcard-replica",
            preferred_maintenance_window="sun:18:00-sun:18:30")
        bizcard_graph_db_replica_instance.add_depends_on(bizcard_graph_db)
        bizcard_graph_db_replica_instance.add_depends_on(
            bizcard_graph_db_instance)

        gremlinpython_lib_layer = _lambda.LayerVersion(
            self,
            "GremlinPythonLib",
            layer_version_name="gremlinpython-lib",
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            code=_lambda.Code.from_bucket(
                s3_lib_bucket, "var/octember-gremlinpython-lib.zip"))

        #XXX: https://github.com/aws/aws-cdk/issues/1342
        upsert_to_neptune_lambda_fn = _lambda.Function(
            self,
            "UpsertBizcardToGraphDB",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="UpsertBizcardToNeptune",
            handler="upsert_bizcard_to_graph_db.lambda_handler",
            description="Upsert bizcard into neptune",
            code=_lambda.Code.asset(
                "./src/main/python/UpsertBizcardToGraphDB"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_endpoint,
                'NEPTUNE_PORT': bizcard_graph_db.attr_port
            },
            timeout=core.Duration.minutes(5),
            layers=[gremlinpython_lib_layer],
            security_groups=[sg_use_bizcard_graph_db],
            vpc=vpc)

        upsert_to_neptune_lambda_fn.add_event_source(text_kinesis_event_source)

        log_group = aws_logs.LogGroup(
            self,
            "UpsertBizcardToGraphDBLogGroup",
            log_group_name="/aws/lambda/UpsertBizcardToNeptune",
            retention=aws_logs.RetentionDays.THREE_DAYS)
        log_group.grant_write(upsert_to_neptune_lambda_fn)

        sg_use_bizcard_neptune_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardNeptuneCacheClientSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard recommendation query cache client',
            security_group_name='use-octember-bizcard-neptune-cache')
        core.Tags.of(sg_use_bizcard_neptune_cache).add(
            'Name', 'use-octember-bizcard-es-cache')

        sg_bizcard_neptune_cache = aws_ec2.SecurityGroup(
            self,
            "BizcardNeptuneCacheSG",
            vpc=vpc,
            allow_all_outbound=True,
            description=
            'security group for octember bizcard recommendation query cache',
            security_group_name='octember-bizcard-neptune-cache')
        core.Tags.of(sg_bizcard_neptune_cache).add(
            'Name', 'octember-bizcard-neptune-cache')

        sg_bizcard_neptune_cache.add_ingress_rule(
            peer=sg_use_bizcard_neptune_cache,
            connection=aws_ec2.Port.tcp(6379),
            description='use-octember-bizcard-neptune-cache')

        recomm_query_cache_subnet_group = aws_elasticache.CfnSubnetGroup(
            self,
            "RecommQueryCacheSubnetGroup",
            description="subnet group for octember-bizcard-neptune-cache",
            subnet_ids=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids,
            cache_subnet_group_name='octember-bizcard-neptune-cache')

        recomm_query_cache = aws_elasticache.CfnCacheCluster(
            self,
            "BizcardRecommQueryCache",
            cache_node_type="cache.t3.small",
            num_cache_nodes=1,
            engine="redis",
            engine_version="5.0.5",
            auto_minor_version_upgrade=False,
            cluster_name="octember-bizcard-neptune-cache",
            snapshot_retention_limit=3,
            snapshot_window="17:00-19:00",
            preferred_maintenance_window="mon:19:00-mon:20:30",
            #XXX: Do not use referece for "cache_subnet_group_name" - https://github.com/aws/aws-cdk/issues/3098
            #cache_subnet_group_name=recomm_query_cache_subnet_group.cache_subnet_group_name, # Redis cluster goes to wrong VPC
            cache_subnet_group_name='octember-bizcard-neptune-cache',
            vpc_security_group_ids=[
                sg_bizcard_neptune_cache.security_group_id
            ])

        recomm_query_cache.add_depends_on(recomm_query_cache_subnet_group)

        bizcard_recomm_lambda_fn = _lambda.Function(
            self,
            "BizcardRecommender",
            runtime=_lambda.Runtime.PYTHON_3_7,
            function_name="BizcardRecommender",
            handler="neptune_recommend_bizcard.lambda_handler",
            description="This service serves PYMK(People You May Know).",
            code=_lambda.Code.asset("./src/main/python/RecommendBizcard"),
            environment={
                'REGION_NAME': core.Aws.REGION,
                'NEPTUNE_ENDPOINT': bizcard_graph_db.attr_read_endpoint,
                'NEPTUNE_PORT': bizcard_graph_db.attr_port,
                'ELASTICACHE_HOST':
                recomm_query_cache.attr_redis_endpoint_address
            },
            timeout=core.Duration.minutes(1),
            layers=[gremlinpython_lib_layer, redis_lib_layer],
            security_groups=[
                sg_use_bizcard_graph_db, sg_use_bizcard_neptune_cache
            ],
            vpc=vpc)

        #XXX: create API Gateway + LambdaProxy
        recomm_api = apigw.LambdaRestApi(
            self,
            "BizcardRecommendAPI",
            handler=bizcard_recomm_lambda_fn,
            proxy=False,
            rest_api_name="BizcardRecommend",
            description="This service serves PYMK(People You May Know).",
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"))

        bizcard_recomm = recomm_api.root.add_resource('pymk')
        bizcard_recomm.add_method(
            "GET",
            method_responses=[
                apigw.MethodResponse(
                    status_code="200",
                    response_models={'application/json': apigw.EmptyModel()}),
                apigw.MethodResponse(status_code="400"),
                apigw.MethodResponse(status_code="500")
            ])

        sagemaker_notebook_role_policy_doc = aws_iam.PolicyDocument()
        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "arn:aws:s3:::aws-neptune-notebook",
                        "arn:aws:s3:::aws-neptune-notebook/*"
                    ],
                    "actions": ["s3:GetObject", "s3:ListBucket"]
                }))

        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect":
                    aws_iam.Effect.ALLOW,
                    "resources": [
                        "arn:aws:neptune-db:{region}:{account}:{cluster_id}/*".
                        format(region=core.Aws.REGION,
                               account=core.Aws.ACCOUNT_ID,
                               cluster_id=bizcard_graph_db.
                               attr_cluster_resource_id)
                    ],
                    "actions": ["neptune-db:connect"]
                }))

        sagemaker_notebook_role = aws_iam.Role(
            self,
            'SageMakerNotebookForNeptuneWorkbenchRole',
            role_name='AWSNeptuneNotebookRole-OctemberBizcard',
            assumed_by=aws_iam.ServicePrincipal('sagemaker.amazonaws.com'),
            #XXX: use inline_policies to work around https://github.com/aws/aws-cdk/issues/5221
            inline_policies={
                'AWSNeptuneNotebook': sagemaker_notebook_role_policy_doc
            })

        neptune_wb_lifecycle_content = '''#!/bin/bash
sudo -u ec2-user -i <<'EOF'
echo "export GRAPH_NOTEBOOK_AUTH_MODE=DEFAULT" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_HOST={NeptuneClusterEndpoint}" >> ~/.bashrc
echo "export GRAPH_NOTEBOOK_PORT={NeptuneClusterPort}" >> ~/.bashrc
echo "export NEPTUNE_LOAD_FROM_S3_ROLE_ARN=''" >> ~/.bashrc
echo "export AWS_REGION={AWS_Region}" >> ~/.bashrc
aws s3 cp s3://aws-neptune-notebook/graph_notebook.tar.gz /tmp/graph_notebook.tar.gz
rm -rf /tmp/graph_notebook
tar -zxvf /tmp/graph_notebook.tar.gz -C /tmp
/tmp/graph_notebook/install.sh
EOF
'''.format(NeptuneClusterEndpoint=bizcard_graph_db.attr_endpoint,
           NeptuneClusterPort=bizcard_graph_db.attr_port,
           AWS_Region=core.Aws.REGION)

        neptune_wb_lifecycle_config_prop = aws_sagemaker.CfnNotebookInstanceLifecycleConfig.NotebookInstanceLifecycleHookProperty(
            content=core.Fn.base64(neptune_wb_lifecycle_content))

        neptune_wb_lifecycle_config = aws_sagemaker.CfnNotebookInstanceLifecycleConfig(
            self,
            'NpetuneWorkbenchLifeCycleConfig',
            notebook_instance_lifecycle_config_name=
            'AWSNeptuneWorkbenchOctemberBizcardLCConfig',
            on_start=[neptune_wb_lifecycle_config_prop])

        neptune_workbench = aws_sagemaker.CfnNotebookInstance(
            self,
            'NeptuneWorkbench',
            instance_type='ml.t2.medium',
            role_arn=sagemaker_notebook_role.role_arn,
            lifecycle_config_name=neptune_wb_lifecycle_config.
            notebook_instance_lifecycle_config_name,
            notebook_instance_name='OctemberBizcard-NeptuneWorkbench',
            root_access='Disabled',
            security_group_ids=[sg_use_bizcard_graph_db.security_group_name],
            subnet_id=bizcard_graph_db_subnet_group.subnet_ids[0])
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        """
        initialize function for CDK
        """
        super().__init__(scope, construct_id, **kwargs)

        # -------------------------------
        # S3 Bucket for Manifests
        # -------------------------------

        qs_gov_bucket = s3.Bucket(
            self,
            id=f"{cf.PROJECT}-Bucket",
        )
        bucket_name = qs_gov_bucket.bucket_name

        # -------------------------------
        # IAM
        # -------------------------------

        list_roles_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-ListRolesPolicy",
            managed_policy_name=f"{cf.PROJECT}-ListRolesPolicy",
            description=None,
            path="/",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["iam:ListRoles", "iam:ListAccountAliases"],
                )
            ],
        )

        federated_quicksight_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-FederatedQuickSightPolicy",
            managed_policy_name=f"{cf.PROJECT}-FederatedQuickSightPolicy",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:iam::{cf.ACCOUNT}:saml-provider/{cf.OKTA_IDP_NAME}"
                    ],
                    actions=["sts:AssumeRoleWithSAML"],
                    conditions={
                        "StringEquals": {
                            "saml:aud": "https://signin.aws.amazon.com/saml"
                        }
                    },
                )
            ],
        )

        okta_federated_principal = iam.FederatedPrincipal(
            federated=
            f"arn:aws:iam::{cf.ACCOUNT}:saml-provider/{cf.OKTA_IDP_NAME}",
            assume_role_action="sts:AssumeRoleWithSAML",
            conditions={
                "StringEquals": {
                    "SAML:aud": "https://signin.aws.amazon.com/saml"
                }
            },
        )

        federated_quicksight_role = iam.Role(
            self,
            id=f"{cf.PROJECT}-{cf.QS_FEDERATED_ROLE_NAME}",
            role_name=f"{cf.QS_FEDERATED_ROLE_NAME}",
            assumed_by=okta_federated_principal,
            description=
            "Allow Okta to Federate Login & User Creation to QuickSight",
            managed_policies=[federated_quicksight_policy],
        )

        iam.User(
            self,
            id=f"{cf.PROJECT}-OktaSSOUser",
            user_name=f"{cf.PROJECT}-OktaSSOUser",
            managed_policies=[list_roles_policy],
        )

        # -------------------------------
        # Lambda Layers
        # -------------------------------

        path_src = os.path.join(cf.PATH_SRC, "")

        sp.call(["make", "bundle"], cwd=path_src)

        requests_layer = _lambda.LayerVersion(
            self,
            f"{cf.PROJECT}-requests-layer",
            code=_lambda.Code.from_asset(os.path.join(path_src,
                                                      "requests.zip")))

        sp.call(["make", "clean"], cwd=path_src)

        # -------------------------------
        # Lambda Functions
        # -------------------------------

        # iam role for Lambdas

        qs_governance_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-LambdaPolicy",
            managed_policy_name=f"{cf.PROJECT}-LambdaPolicy",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["ses:SendEmail", "ses:SendRawEmail"],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:secretsmanager:{cf.REGION}:{cf.ACCOUNT}:secret:{cf.OKTA_SECRET_ID}*",
                        f"arn:aws:secretsmanager:{cf.REGION}:{cf.ACCOUNT}:secret:{cf.SLACK_SECRET_ID}*"
                    ],
                    actions=[
                        "secretsmanager:GetSecretValue",
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:iam::{cf.ACCOUNT}:policy/{cf.QS_PREFIX}*"
                    ],
                    actions=[
                        "iam:CreatePolicy",
                        "iam:GetPolicy",
                        "iam:DeletePolicy",
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["iam:ListPolicies"],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=[
                        "glue:GetDatabase",
                        "glue:GetDatabases",
                        "glue:GetTable",
                        "glue:GetPartitions",
                        "glue:GetPartition",
                        "glue:GetTables",
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["quicksight:*", "ds:*"],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:s3:::{bucket_name}/*",
                        f"arn:aws:s3:::{bucket_name}*"
                    ],
                    actions=["s3:*"],
                ),
            ],
        )

        qs_governance_role = iam.Role(
            self,
            id=f"{cf.PROJECT}-QuickSightPermissionMappingRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
                qs_governance_policy,
            ],
        )

        # Lambdas

        get_okta_information_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-GetOktaInfo",
            function_name=f"{cf.PROJECT}-GetOktaInformation",
            handler="get_okta_information.handler",
            role=qs_governance_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            environment={
                "OKTA_SECRET_ID": cf.OKTA_SECRET_ID,
                "QS_PREFIX": cf.QS_PREFIX,
                "QS_GOVERNANCE_BUCKET": bucket_name,
                "QS_FEDERATED_ROLE_NAME": f"{cf.QS_FEDERATED_ROLE_NAME}",
                "S3_PREFIX_USERS": cf.S3_PREFIX_USERS,
                "SLACK_SECRET_ID": cf.SLACK_SECRET_ID
            },
            memory_size=256,
            timeout=core.Duration.seconds(180),
            layers=[requests_layer],
        )

        # Lamda Okta to QuickSight Mappers

        qs_user_governance_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-QSUserGovernance",
            function_name=f"{cf.PROJECT}-QSUserGovernance",
            handler="qs_user_governance.handler",
            role=qs_governance_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            environment={
                "QS_FEDERATED_ROLE_NAME": f"{cf.QS_FEDERATED_ROLE_NAME}",
                "QS_PREFIX": cf.QS_PREFIX,
                "QS_ADMIN_OKTA_GROUP": cf.QS_ADMIN_OKTA_GROUP,
                "QS_AUTHOR_OKTA_GROUP": cf.QS_AUTHOR_OKTA_GROUP,
                "QS_READER_OKTA_GROUP": cf.QS_READER_OKTA_GROUP,
                "SLACK_SECRET_ID": cf.SLACK_SECRET_ID
            },
            memory_size=256,
            timeout=core.Duration.seconds(180),
            layers=[requests_layer],
        )

        qs_asset_governance_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-QSAssetGovernance",
            function_name=f"{cf.PROJECT}-QSAssetGovernance",
            handler="qs_asset_governance.handler",
            role=qs_governance_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            memory_size=256,
            timeout=core.Duration.seconds(180),
            layers=[requests_layer],
            environment={
                "QS_GOVERNANCE_BUCKET": f"{bucket_name}",
                "S3_PREFIX_USERS": cf.S3_PREFIX_USERS,
                "S3_PREFIX_POLICIES": cf.S3_PREFIX_POLICIES,
                "STAGE": cf.DEPLOYMENT_STAGE,
                "SLACK_SECRET_ID": cf.SLACK_SECRET_ID,
                "VERIFIED_EMAIL": cf.VERIFIED_EMAIL
            })

        qs_policy_governance_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-QSPolicyGovernance",
            function_name=f"{cf.PROJECT}-QSPolicyGovernance",
            handler="qs_policy_governance.handler",
            role=qs_governance_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            environment={
                "QS_ADMIN_OKTA_GROUP": cf.QS_ADMIN_OKTA_GROUP,
                "QS_AUTHOR_OKTA_GROUP": cf.QS_AUTHOR_OKTA_GROUP,
                "QS_AUTHOR_BASE_POLICY": cf.QS_AUTHOR_BASE_POLICY,
                "QS_PREFIX": cf.QS_PREFIX,
                "QS_GOVERNANCE_BUCKET": f"{bucket_name}",
                "S3_PREFIX_POLICIES": cf.S3_PREFIX_POLICIES,
                "QS_SUPERUSER": cf.QS_SUPERUSER,
                "DEPLOYMENT_STAGE": cf.DEPLOYMENT_STAGE,
                "SLACK_SECRET_ID": cf.SLACK_SECRET_ID
            },
            memory_size=256,
            timeout=core.Duration.seconds(180),
            layers=[requests_layer])

        qs_resource_cleanup_lambda = _lambda.Function(
            self,
            id=f"{cf.PROJECT}-QSResourceCleanup",
            function_name=f"{cf.PROJECT}-QSResourceCleanup",
            handler="qs_resource_cleanup.handler",
            role=qs_governance_role,
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(os.path.join(cf.PATH_SRC, "pkg")),
            memory_size=256,
            timeout=core.Duration.seconds(180),
            layers=[requests_layer],
            environment={
                "QS_PREFIX": cf.QS_PREFIX,
                "QS_SUPERUSER": cf.QS_SUPERUSER,
                "QS_GOVERNANCE_BUCKET": f"{bucket_name}",
                "S3_PREFIX_ASSETS": cf.S3_PREFIX_ASSETS,
                "S3_PREFIX_USERS": cf.S3_PREFIX_USERS,
                "S3_PREFIX_POLICIES": cf.S3_PREFIX_POLICIES,
                "SLACK_SECRET_ID": cf.SLACK_SECRET_ID
            })

        # -------------------------------
        # S3 Event Triggers
        # -------------------------------

        qs_user_governance_lambda.add_event_source(
            lambda_event_sources.S3EventSource(
                bucket=qs_gov_bucket,
                events=[s3.EventType.OBJECT_CREATED],
                filters=[s3.NotificationKeyFilter(prefix=cf.S3_PREFIX_USERS)],
            ))

        qs_policy_governance_lambda.add_event_source(
            lambda_event_sources.S3EventSource(
                bucket=qs_gov_bucket,
                events=[s3.EventType.OBJECT_CREATED],
                filters=[
                    s3.NotificationKeyFilter(prefix=cf.S3_PREFIX_POLICIES)
                ],
            ))

        # -------------------------------
        # CloudWatch Event Rules (30 minutes)
        # -------------------------------

        lambda_schedule = events.Schedule.rate(core.Duration.minutes(30))
        get_okta_info_target = events_targets.LambdaFunction(
            handler=get_okta_information_lambda)
        events.Rule(
            self,
            id=f"{cf.PROJECT}-GetOktaInfoScheduledEvent",
            description="The scheduled CloudWatch event trigger for the Lambda",
            enabled=True,
            schedule=lambda_schedule,
            targets=[get_okta_info_target],
        )

        qs_asset_gov_target = events_targets.LambdaFunction(
            handler=qs_asset_governance_lambda)
        events.Rule(
            self,
            id=f"{cf.PROJECT}-QSAssetGovScheduledEvent",
            description="The scheduled CloudWatch event trigger for the Lambda",
            enabled=True,
            schedule=lambda_schedule,
            targets=[qs_asset_gov_target],
        )

        qs_cleanup_target = events_targets.LambdaFunction(
            handler=qs_resource_cleanup_lambda)
        events.Rule(
            self,
            id=f"{cf.PROJECT}-QSCleanupScheduledEvent",
            description="The scheduled CloudWatch event trigger for the Lambda",
            enabled=True,
            schedule=lambda_schedule,
            targets=[qs_cleanup_target],
        )

        # -------------------------------
        # Athena WorkGroup
        # -------------------------------

        workgroup_encryption = athena.CfnWorkGroup.EncryptionConfigurationProperty(
            encryption_option='SSE_S3')

        workgroup_output = athena.CfnWorkGroup.ResultConfigurationProperty(
            output_location="s3://" + cf.ATHENA_OUTPUT_LOC,
            encryption_configuration=workgroup_encryption)

        quicksight_athena_workgroup = athena.CfnWorkGroup(
            self,
            id=f"{cf.PROJECT}-workgroup",
            name=f"{cf.QS_PREFIX}-workgroup",
            description="workgroup for QuickSight Data Source operations",
            recursive_delete_option=True,
            work_group_configuration=athena.CfnWorkGroup.
            WorkGroupConfigurationProperty(
                result_configuration=workgroup_output))
        workgroup_name = quicksight_athena_workgroup.name

        # -------------------------------
        # S3 Object Deployments
        # -------------------------------

        policy_manifest_deploy = s3_deploy.BucketDeployment(
            self,
            id=f"{cf.PROJECT}-PolicyManifestDeploy",
            sources=[
                s3_deploy.Source.asset(
                    os.path.join(cf.PATH_ROOT, "qs_config/policies"))
            ],
            destination_bucket=qs_gov_bucket,
            destination_key_prefix=cf.S3_PREFIX_POLICIES)

        # -------------------------------
        # QuickSight IAM Policy Assignments
        # -------------------------------

        qs_author_base_policy = iam.ManagedPolicy(
            self,
            id=f"{cf.PROJECT}-{cf.QS_AUTHOR_BASE_POLICY}",
            managed_policy_name=f"{cf.QS_AUTHOR_BASE_POLICY}",
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:athena:{cf.REGION}:{cf.ACCOUNT}:workgroup/{workgroup_name}"
                    ],
                    actions=[
                        "athena:GetNamedQuery", "athena:CancelQueryExecution",
                        "athena:CreateNamedQuery", "athena:DeleteNamedQuery",
                        "athena:StartQueryExecution",
                        "athena:StopQueryExecution", "athena:GetWorkGroup",
                        "athena:GetNamedQuery", "athena:GetQueryResults",
                        "athena:GetQueryExecution",
                        "athena:BatchGetQueryExecution",
                        "athena:BatchGetNamedQuery", "athena:ListNamedQueries",
                        "athena:ListQueryExecutions",
                        "athena:GetQueryResultsStream"
                    ]),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=[
                        "athena:ListWorkGroups", "athena:ListDataCatalogs",
                        "athena:StartQueryExecution",
                        "athena:GetQueryExecution",
                        "athena:GetQueryResultsStream",
                        "athena:ListTableMetadata", "athena:GetTableMetadata"
                    ]),
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    resources=[
                                        "arn:aws:s3:::ast-datalake-*",
                                        "arn:aws:s3:::aws-athena-query-*",
                                        f"arn:aws:s3:::{bucket_name}*",
                                        f"arn:aws:s3:::{cf.ATHENA_OUTPUT_LOC}*"
                                    ],
                                    actions=[
                                        "s3:List*",
                                        "s3:Get*",
                                    ]),
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=["s3:ListAllMyBuckets"]),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        "arn:aws:s3:::aws-athena-query-*",
                        f"arn:aws:s3:::{bucket_name}/{cf.QS_PREFIX}/athena-results*"
                    ],
                    actions=["s3:Put*"]),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=[
                        f"arn:aws:athena:{cf.REGION}:{cf.ACCOUNT}:datacatalog/AwsDataCatalog"
                    ],
                    actions=["athena:ListDatabases"]),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                    actions=["kms:Decrypt", "kms:GenerateDataKey"]),
            ],
        )
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id)

        lambda_dir = kwargs["lambda_dir"]

        # Note: A dead-letter queue is optional but it helps capture any failed messages
        dlq = sqs.Queue(self,
                        id="dead_letter_queue_id",
                        retention_period=Duration.days(7))
        dead_letter_queue = sqs.DeadLetterQueue(max_receive_count=1, queue=dlq)

        upload_queue = sqs.Queue(self,
                                 id="sample_queue_id",
                                 visibility_timeout=Duration.seconds(30),
                                 dead_letter_queue=dead_letter_queue)

        sqs_subscription = sns_subs.SqsSubscription(upload_queue,
                                                    raw_message_delivery=True)

        upload_event_topic = sns.Topic(self, id="sample_sns_topic_id")

        # This binds the SNS Topic to the SQS Queue
        upload_event_topic.add_subscription(sqs_subscription)

        # Note: Lifecycle Rules are optional but are included here to keep costs
        #       low by cleaning up old files or moving them to lower cost storage options
        s3_bucket = s3.Bucket(
            self,
            id="sample_bucket_id",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            versioned=True,
            lifecycle_rules=[
                s3.LifecycleRule(
                    enabled=True,
                    expiration=Duration.days(365),
                    transitions=[
                        s3.Transition(
                            storage_class=s3.StorageClass.INFREQUENT_ACCESS,
                            transition_after=Duration.days(30)),
                        s3.Transition(storage_class=s3.StorageClass.GLACIER,
                                      transition_after=Duration.days(90)),
                    ])
            ])

        # Note: If you don't specify a filter all uploads will trigger an event.
        #       Also, modifying the event type will handle other object operations
        # This binds the S3 bucket to the SNS Topic
        s3_bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED_PUT,
            s3n.SnsDestination(upload_event_topic),
            s3.NotificationKeyFilter(prefix="uploads", suffix=".csv"))

        function = _lambda.Function(
            self,
            "lambda_function",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="lambda_function.handler",
            code=_lambda.Code.from_asset(path=lambda_dir))

        # This binds the lambda to the SQS Queue
        invoke_event_source = lambda_events.SqsEventSource(upload_queue)
        function.add_event_source(invoke_event_source)

        # Examples of CloudFormation outputs
        CfnOutput(
            self,
            "UploadFileToS3Example",
            value="aws s3 cp <local-path-to-file> s3://{}/".format(
                s3_bucket.bucket_name),
            description=
            "Upload a file to S3 (using AWS CLI) to trigger the SQS chain",
        )
        CfnOutput(
            self,
            "UploadSqsQueueUrl",
            value=upload_queue.queue_url,
            description="Link to the SQS Queue triggered on S3 uploads",
        )
        CfnOutput(
            self,
            "LambdaFunctionName",
            value=function.function_name,
        )
        CfnOutput(
            self,
            "LambdaFunctionLogGroupName",
            value=function.log_group.log_group_name,
        )
    def __init__(self, scope: core.Construct, id: str, instance_id: str,
                 contact_flow_id: str, source_phone_number: str, timeout: int,
                 **kwargs):
        super().__init__(scope, id, **kwargs)

        web_bucket = _s3.Bucket(self,
                                "StaticWebBucket",
                                website_index_document="index.html",
                                website_error_document="index.html",
                                removal_policy=core.RemovalPolicy.DESTROY,
                                public_read_access=True)

        core.CfnOutput(self,
                       'WebBucketUrl',
                       value=web_bucket.bucket_domain_name)

        web_distribution = _clf.CloudFrontWebDistribution(
            self,
            'StaticWebDistribution',
            origin_configs=[
                _clf.SourceConfiguration(
                    s3_origin_source=_clf.S3OriginConfig(
                        s3_bucket_source=web_bucket),
                    behaviors=[_clf.Behavior(is_default_behavior=True)])
            ],
            viewer_protocol_policy=_clf.ViewerProtocolPolicy.REDIRECT_TO_HTTPS)

        _s3d.BucketDeployment(
            self,
            "S3StaticWebContentDeploymentWithInvalidation",
            sources=[
                _s3d.Source.asset(
                    f"{pathlib.Path(__file__).parent.absolute()}/site-content/build"
                )
            ],
            destination_bucket=web_bucket,
            distribution=web_distribution,
            distribution_paths=["/*"])

        file_bucket = _s3.Bucket(self,
                                 "FileBucket",
                                 removal_policy=core.RemovalPolicy.DESTROY)

        call_dead_letter_queue = _sqs.Queue(self,
                                            "CallDeadLetterQueue",
                                            fifo=True,
                                            content_based_deduplication=True)

        call_sqs_queue = _sqs.Queue(
            self,
            "CallSqsQueue",
            fifo=True,
            content_based_deduplication=True,
            visibility_timeout=core.Duration.seconds(120),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=1, queue=call_dead_letter_queue))

        async_call_dead_letter_queue = _sqs.Queue(
            self,
            "AsyncCallDeadLetterQueue",
            fifo=True,
            content_based_deduplication=True)

        async_callout_queue = _sqs.Queue(
            self,
            "AsyncCalloutQueue",
            fifo=True,
            content_based_deduplication=True,
            visibility_timeout=core.Duration.seconds(120),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=1, queue=async_call_dead_letter_queue))

        call_job_complete_sns_topic = _sns.Topic(
            self, "CallJobCompleteSnsTopic", display_name="CallJobCompletion")

        call_result_table = _dynamodb.Table(
            self,
            "CallResultDynamodbTable",
            billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamodb.Attribute(
                name="task_id", type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name="receiver_id",
                                         type=_dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        callout_record_table = _dynamodb.Table(
            self,
            "CallTaskDynamodbTable",
            billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamodb.Attribute(
                name="task_id", type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name="created_at",
                                         type=_dynamodb.AttributeType.NUMBER),
            removal_policy=core.RemovalPolicy.DESTROY)
        callout_record_table.add_global_secondary_index(
            partition_key=_dynamodb.Attribute(
                name='call_type', type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name='created_at',
                                         type=_dynamodb.AttributeType.NUMBER),
            index_name='CallTypeCreatedAtGlobalIndex',
            projection_type=_dynamodb.ProjectionType.ALL)

        python_function_layer = _lambda.LayerVersion(
            self,
            "LambdaPythonFunctionLayer",
            code=_lambda.Code.asset("aws_callouts_cdk/layer/_python"),
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_8
            ],
            license="Available under the MIT-0 license")

        nodejs_function_layer = _lambda.LayerVersion(
            self,
            "LambdaNodeJsFunctionLayer",
            code=_lambda.Code.asset("aws_callouts_cdk/layer/_nodejs"),
            compatible_runtimes=[
                _lambda.Runtime.NODEJS_10_X, _lambda.Runtime.NODEJS_12_X
            ],
            license="Available under the MIT-0 license")

        global_python_function_arguments = {
            "code": _lambda.Code.asset("aws_callouts_cdk/src/python"),
            "layers": [python_function_layer],
            "runtime": _lambda.Runtime.PYTHON_3_7
        }

        global_nodeje_function_arguments = {
            "code": _lambda.Code.asset("aws_callouts_cdk/src/nodejs"),
            "layers": [nodejs_function_layer],
            "runtime": _lambda.Runtime.NODEJS_12_X
        }

        get_callout_job_function = _lambda.Function(
            self,
            "GetCalloutJobFunction",
            handler="get_call_job.lambda_handler",
            **global_python_function_arguments)
        get_callout_job_function.add_environment(key="S3Bucket",
                                                 value=file_bucket.bucket_name)
        file_bucket.grant_read(get_callout_job_function)

        callout_function = _lambda.Function(self,
                                            "CalloutFunction",
                                            handler="send_call.lambda_handler",
                                            **global_python_function_arguments)
        callout_function.add_environment(
            key="ContactFlowArn",
            value=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}/contact-flow/{contact_flow_id}"
        )
        callout_function.add_environment(key="SourcePhoneNumber",
                                         value=source_phone_number)
        callout_function.add_environment(key="ExcelFileBucket",
                                         value=file_bucket.bucket_name)
        callout_function.add_environment(key="AsynCalloutQueueUrl",
                                         value=async_callout_queue.queue_url)
        callout_function.add_to_role_policy(statement=_iam.PolicyStatement(
            resources=[
                f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}/*"
            ],
            actions=["connect:StartOutboundVoiceContact"]))
        callout_function.add_event_source(source=_les.SqsEventSource(
            queue=async_callout_queue, batch_size=1))
        'arn:aws:connect:751225572132:ap-southeast-2:instance/9d0c7cc5-7d2a-42e4-a3dd-70f402e0d040'
        file_bucket.grant_read_write(callout_function)

        response_handler_function = _lambda.Function(
            self,
            "ResponseHandlerFunction",
            handler="response_handler.lambda_handler",
            **global_python_function_arguments)
        response_handler_function.add_permission(
            id="ResponseHandlerFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        send_task_success_function = _lambda.Function(
            self,
            "SendTaskSuccessFunction",
            handler="send_task_success.lambda_handler",
            **global_python_function_arguments)
        send_task_success_function.add_permission(
            id="SendTaskSuccessFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        get_call_result_function = _lambda.Function(
            self,
            "GetCallResultFunction",
            handler="get_call_result.lambda_handler",
            memory_size=512,
            **global_python_function_arguments)
        get_call_result_function.add_environment(
            key="CallResultDynamoDBTable", value=call_result_table.table_name)
        get_call_result_function.add_environment(key="S3Bucket",
                                                 value=file_bucket.bucket_name)
        call_result_table.grant_read_data(grantee=get_call_result_function)
        file_bucket.grant_read_write(get_call_result_function)

        iterator_function = _lambda.Function(
            self,
            "IteratorFunction",
            handler="iterator.lambda_handler",
            **global_python_function_arguments)
        iterator_function.add_permission(
            id="IteratorFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        create_appsync_call_task_function = _lambda.Function(
            self,
            "CreateAppSyncCallTaskFunction",
            handler="create_appsync_call_task.lambda_handler",
            **global_nodeje_function_arguments)
        create_appsync_call_task_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        create_appsync_call_task_function.add_environment(
            key="CallRecordTableName", value=callout_record_table.table_name)
        call_sqs_queue.grant_send_messages(create_appsync_call_task_function)
        callout_record_table.grant_write_data(
            create_appsync_call_task_function)

        create_call_report_record_function = _lambda.Function(
            self,
            "CreateCallReportRecordFunction",
            handler="create_call_report_record.lambda_handler",
            **global_nodeje_function_arguments)

        create_excel_call_task_function = _lambda.Function(
            self,
            "CreateExcelCallTaskFunction",
            handler="create_excel_call_task.lambda_handler",
            **global_python_function_arguments)
        create_excel_call_task_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        call_sqs_queue.grant_send_messages(create_excel_call_task_function)

        create_excel_call_task_function.add_event_source(
            source=_les.S3EventSource(bucket=file_bucket,
                                      events=[_s3.EventType.OBJECT_CREATED],
                                      filters=[
                                          _s3.NotificationKeyFilter(
                                              prefix="call_task",
                                              suffix=".xlsx")
                                      ]))

        start_callout_flow_function = _lambda.Function(
            self,
            "StartCalloutFlowFunction",
            handler="start_call_out_flow.lambda_handler",
            reserved_concurrent_executions=1,
            **global_python_function_arguments)
        start_callout_flow_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        start_callout_flow_function.add_environment(
            key="ResponseHandlerFunctionArn",
            value=response_handler_function.function_arn)
        start_callout_flow_function.add_environment(
            key="IteratorFunctionArn", value=iterator_function.function_arn)
        start_callout_flow_function.add_environment(
            key="SendTaskSuccessFunctionArn",
            value=send_task_success_function.function_arn)
        start_callout_flow_function.add_environment(
            key="S3Bucket", value=file_bucket.bucket_name)
        start_callout_flow_function.add_event_source(
            source=_les.SqsEventSource(queue=call_sqs_queue, batch_size=1))
        file_bucket.grant_read_write(start_callout_flow_function)

        call_state_machine_definition = {
            "Comment":
            "Reading messages from an SQS queue and iteratively processing each message.",
            "StartAt": "Start",
            "States": {
                "Start": {
                    "Type": "Pass",
                    "Next": "Process Call Messages"
                },
                "Process Call Messages": {
                    "Type": "Map",
                    "Next": "Get Call Result",
                    "InputPath": "$",
                    "ItemsPath": "$",
                    "OutputPath": "$.[0]",
                    "Iterator": {
                        "StartAt": "Get Call out job",
                        "States": {
                            "Get Call out job": {
                                "Type": "Task",
                                "Resource":
                                get_callout_job_function.function_arn,
                                "Next": "Callout with AWS Connect"
                            },
                            "Callout with AWS Connect": {
                                "Type":
                                "Task",
                                "Resource":
                                "arn:aws:states:::sqs:sendMessage.waitForTaskToken",
                                "TimeoutSeconds":
                                timeout,
                                "Parameters": {
                                    "QueueUrl": async_callout_queue.queue_url,
                                    "MessageGroupId": "1",
                                    "MessageBody": {
                                        "Message.$": "$",
                                        "TaskToken.$": "$$.Task.Token"
                                    }
                                },
                                "Catch": [{
                                    "ErrorEquals": ["States.Timeout"],
                                    "ResultPath": None,
                                    "Next": "Call Timeout"
                                }],
                                "Next":
                                "Save call result"
                            },
                            "Call Timeout": {
                                "Type": "Pass",
                                "ResultPath": None,
                                "Next": "Save call result"
                            },
                            "Save call result": {
                                "Type": "Task",
                                "Resource":
                                "arn:aws:states:::dynamodb:putItem",
                                "Parameters": {
                                    "TableName": call_result_table.table_name,
                                    "Item": {
                                        "receiver_id": {
                                            "S.$": "$.receiver_id"
                                        },
                                        "task_id": {
                                            "S.$": "$.task_id"
                                        },
                                        "username": {
                                            "S.$": "$.username"
                                        },
                                        "phone_number": {
                                            "S.$": "$.phone_number"
                                        },
                                        "status": {
                                            "S.$": "$.status"
                                        },
                                        "answers": {
                                            "S.$": "$.answers"
                                        },
                                        "error": {
                                            "S.$": "$.error"
                                        },
                                        "call_at": {
                                            "S.$": "$.call_at"
                                        }
                                    }
                                },
                                "ResultPath": "$.Result",
                                "OutputPath": "$.task_id",
                                "End": True
                            }
                        }
                    }
                },
                "Get Call Result": {
                    "Type": "Task",
                    "Resource": get_call_result_function.function_arn,
                    "Next": "Create Call Report Record"
                },
                "Create Call Report Record": {
                    "Type": "Task",
                    "Resource":
                    create_call_report_record_function.function_arn,
                    "Next": "Send Completion message to SNS"
                },
                "Send Completion message to SNS": {
                    "Type": "Task",
                    "Resource": "arn:aws:states:::sns:publish",
                    "Parameters": {
                        "TopicArn": call_job_complete_sns_topic.topic_arn,
                        "Message.$": "$"
                    },
                    "Next": "Finish"
                },
                "Finish": {
                    "Type": "Succeed"
                }
            }
        }
        callout_state_machine_role = _iam.Role(
            self,
            "CalloutStatesExecutionRole",
            assumed_by=_iam.ServicePrincipal(
                f"states.{self.region}.amazonaws.com"))
        callout_state_machine_role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    "sqs:SendMessage", "dynamodb:PutItem",
                    "lambda:InvokeFunction", "SNS:Publish"
                ],
                resources=[
                    async_callout_queue.queue_arn, call_result_table.table_arn,
                    get_callout_job_function.function_arn,
                    get_call_result_function.function_arn,
                    call_job_complete_sns_topic.topic_arn,
                    create_appsync_call_task_function.function_arn,
                    create_call_report_record_function.function_arn
                ]))
        callout_state_machine = _sfn.CfnStateMachine(
            self,
            "CalloutStateMachine",
            role_arn=callout_state_machine_role.role_arn,
            definition_string=json.dumps(call_state_machine_definition))
        send_task_success_function.add_to_role_policy(
            _iam.PolicyStatement(actions=["states:SendTaskSuccess"],
                                 resources=[callout_state_machine.ref]))

        start_callout_flow_function.add_environment(
            key="CalloutStateMachineArn", value=callout_state_machine.ref)
        start_callout_flow_function.add_to_role_policy(
            _iam.PolicyStatement(effect=_iam.Effect.ALLOW,
                                 resources=[callout_state_machine.ref],
                                 actions=['states:StartExecution']))

        user_pool = _cognito.UserPool(
            self, "UserPool", sign_in_type=_cognito.SignInType.USERNAME)

        user_pool_client = _cognito.UserPoolClient(self,
                                                   "UserPoolClient",
                                                   user_pool=user_pool)

        appsync_api = _appsync.GraphQLApi(
            self,
            "AppSyncApi",
            name="AWSCalloutApi",
            user_pool_config=_appsync.UserPoolConfig(
                user_pool=user_pool,
                default_action=_appsync.UserPoolDefaultAction.ALLOW),
            log_config=_appsync.LogConfig(
                field_log_level=_appsync.FieldLogLevel.ALL),
            schema_definition_file=
            f"{pathlib.Path(__file__).parent.absolute()}/schema.graphql")

        callout_record_ddb_ds = appsync_api.add_dynamo_db_data_source(
            name="CalloutRecordDdb",
            description="Callout Record DynamoDB Data Source",
            table=callout_record_table)
        callout_record_ddb_ds.create_resolver(
            type_name="Query",
            field_name="getLatestCallTaskRecords",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"Query","index":"CallTypeCreatedAtGlobalIndex","query":{"expression":"call_type = :call_type","expressionValues":{":call_type":{"S":"TASK"}}},"scanIndexForward":false,"limit":${ctx.args.limit}}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_list())
        callout_record_ddb_ds.create_resolver(
            type_name="Query",
            field_name="getLatestCallReportRecords",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"Query","index":"CallTypeCreatedAtGlobalIndex","query":{"expression":"call_type = :call_type","expressionValues":{":call_type":{"S":"REPORT"}}},"scanIndexForward":false,"limit":${ctx.args.limit}}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_list())
        callout_record_ddb_ds.create_resolver(
            type_name="Mutation",
            field_name="createCallReport",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"PutItem","key":{"task_id":{"S":"${ctx.args.report.task_id}"},"created_at":{"N":"${ctx.args.report.created_at}"}},"attributeValues":$util.dynamodb.toMapValuesJson($ctx.args.report)}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_item())

        call_task_lambda_ds = appsync_api.add_lambda_data_source(
            name="CallTaskLambda",
            description="Call Task Lambda Data Source",
            lambda_function=create_appsync_call_task_function)
        call_task_lambda_ds.create_resolver(
            type_name="Mutation",
            field_name="createCallTask",
            request_mapping_template=_appsync.MappingTemplate.lambda_request(
                "$utils.toJson($ctx.args)"),
            response_mapping_template=_appsync.MappingTemplate.lambda_result())

        create_call_report_record_function.add_environment(
            value=appsync_api.graph_ql_url, key="AppSyncGraphQlApiUrl")

        create_call_report_record_function.add_to_role_policy(
            statement=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=['appsync:GraphQL'],
                resources=[
                    f"{appsync_api.arn}/types/Mutation/fields/createCallReport"
                ]))

        core.CfnOutput(self,
                       id="OutputCallSqsQueue",
                       value=call_sqs_queue.queue_arn)
        core.CfnOutput(self,
                       id="OutputCallJobCompletionSNSTopic",
                       value=call_job_complete_sns_topic.topic_arn)
        core.CfnOutput(self,
                       id="OutputExcelFileS3Bucket",
                       value=file_bucket.bucket_name)
        core.CfnOutput(self,
                       id="OutputStaticWebS3Bucket",
                       value=web_bucket.bucket_name)
        core.CfnOutput(self,
                       id="OutputStaticWebUrl",
                       value=web_bucket.bucket_website_url)

        identity_pool = _cognito.CfnIdentityPool(
            self,
            "IdentityPool",
            allow_unauthenticated_identities=True,
            cognito_identity_providers=[
                _cognito.CfnIdentityPool.CognitoIdentityProviderProperty(
                    provider_name=user_pool.user_pool_provider_name,
                    client_id=user_pool_client.user_pool_client_id)
            ])
        identity_pool_unauthorized_role = _iam.Role(
            self,
            'IdentityPoolUnAuthorizedRole',
            assumed_by=_iam.FederatedPrincipal(
                federated="cognito-identity.amazonaws.com",
                assume_role_action="sts:AssumeRoleWithWebIdentity",
                conditions={
                    "StringEquals": {
                        "cognito-identity.amazonaws.com:aud": identity_pool.ref
                    },
                    "ForAnyValue:StringLike": {
                        "cognito-identity.amazonaws.com:amr": "unauthenticated"
                    }
                }))
        identity_pool_unauthorized_role.add_to_policy(
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["appsync:GraphQL"],
                resources=[
                    f"{appsync_api.arn}/types/*",
                    # f"{appsync_api.arn}/types/Query/fields/getLatestCallTaskRecords",
                    # f"{appsync_api.arn}/types/Query/fields/getLatestCallReportRecords",
                    # f"{appsync_api.arn}/types/Mutation/fields/createCallRecord",
                    # f"{appsync_api.arn}/types/Subscription/fields/createCallTask",
                    # f"{appsync_api.arn}/types/Subscription/fields/createCallReport"
                ]))

        _cognito.CfnIdentityPoolRoleAttachment(
            self,
            "CognitoIdentityPoolRoleAttachment",
            identity_pool_id=identity_pool.ref,
            roles={
                "unauthenticated": identity_pool_unauthorized_role.role_arn
            })

        core.CfnOutput(self, id="UserPoolId", value=user_pool.user_pool_id)
        core.CfnOutput(self,
                       id="UserPoolClientId",
                       value=user_pool_client.user_pool_client_id)
        core.CfnOutput(self, id="IdentityPoolId", value=identity_pool.ref)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if kwargs['env'].account == '828661178764':
            gbc_environment = 'devl'
        else:
            raise ValueError('Account not mapped!')

        # Define location of lambda deployment packages:
        bucket_source = s3.Bucket.from_bucket_name(self,
                                                   'bucket_source_id',
                                                   bucket_name='gbc-lambda')
        generic_loader = _lambda.S3Code(
            bucket_source, key='generic-loader/generic-loader-test2.zip')

        # Define attributes for the networking part:
        _vpc = ec2.Vpc.from_vpc_attributes(
            self,
            'myVPC',
            vpc_id='vpc-042f6b22897562107',
            availability_zones=['ca-central-1a', 'ca-central-1b'])

        _subnets = [
            ec2.Subnet.from_subnet_id(self,
                                      'subnet1',
                                      subnet_id='subnet-0579afb06d9cec8ed'),
            ec2.Subnet.from_subnet_id(self,
                                      'subnet2',
                                      subnet_id='subnet-07a0e458dc7ea0228')
        ]

        _security_group = [
            ec2.SecurityGroup.from_security_group_id(
                self, 'mySG', security_group_id='sg-0264ea677ccfef4ff')
        ]

        my_lambda = _lambda.Function(
            self,
            'redshift-generic-loader',
            runtime=_lambda.Runtime.PYTHON_3_6,
            code=generic_loader,
            timeout=core.Duration.seconds(30),
            handler='lambda_function.lambda_handler',
            role=iam.Role.from_role_arn(
                self,
                'myRole',
                role_arn='arn:aws:iam::828661178764:role/lambda-data-analytics',
                mutable=False),
            vpc=_vpc,
            vpc_subnets=ec2.SubnetSelection(subnets=_subnets),
            security_groups=_security_group)

        # Create main bucket for pipelines and register notifications:
        main_bucket = s3.Bucket(
            self,
            'new_bucket_id',
            bucket_name=f'gbc-analytics-prototype-1-cdk-{gbc_environment}',
            versioned=True,
            encryption=s3.BucketEncryption.S3_MANAGED)

        target_lambda = s3_notif.LambdaDestination(my_lambda)
        key_filter_1 = s3.NotificationKeyFilter(prefix='cdk-test/')
        main_bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                           target_lambda, key_filter_1)
Ejemplo n.º 24
0
    def __init__(self, scope: core.Construct, id: str, log_bucket: _s3.Bucket,
                 config_table: _dynamodb.Table, tshirt_size: str,
                 sink_bucket: _s3.Bucket, web_sale_stream: str,
                 web_customer_stream: str, web_customer_address_stream: str,
                 kinesis_key: _kms.Key, vpc: _ec2.Vpc, **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        stack = core.Stack.of(self)

        stream_source_bucket = AutoEmptyBucket(
            self,
            'StreamSource',
            bucket_name='ara-stream-source-' + core.Aws.ACCOUNT_ID,
            uuid='95505f50-0276-11eb-adc1-0242ac120002')

        service_role = _iam.Role(
            self,
            'StreamEmrServiceRole',
            assumed_by=_iam.ServicePrincipal('elasticmapreduce.amazonaws.com'))

        service_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AmazonElasticMapReduceRole'))

        cluster_role = _iam.Role(
            self,
            'StreamEmrClusterRole',
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com"))

        _iam.Policy(
            self,
            'StreamEmrClusterPolicy',
            statements=[
                _iam.PolicyStatement(actions=[
                    "glue:CreateDatabase",
                    "glue:UpdateDatabase",
                    "glue:DeleteDatabase",
                    "glue:GetDatabase",
                    "glue:GetDatabases",
                    "glue:CreateTable",
                    "glue:UpdateTable",
                    "glue:DeleteTable",
                    "glue:GetTable",
                    "glue:GetTables",
                    "glue:GetTableVersions",
                    "glue:CreatePartition",
                    "glue:BatchCreatePartition",
                    "glue:UpdatePartition",
                    "glue:DeletePartition",
                    "glue:BatchDeletePartition",
                    "glue:GetPartition",
                    "glue:GetPartitions",
                    "glue:BatchGetPartition",
                    "glue:CreateUserDefinedFunction",
                    "glue:UpdateUserDefinedFunction",
                    "glue:DeleteUserDefinedFunction",
                    "glue:GetUserDefinedFunction",
                    "glue:GetUserDefinedFunctions",
                    "cloudwatch:PutMetricData",
                    "dynamodb:ListTables",
                    "s3:HeadBucket",
                    "ec2:Describe*",
                ],
                                     resources=['*']),
                _iam.PolicyStatement(
                    actions=['s3:GetObject'],
                    resources=[
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES +
                        DataGenConfig.DSDGEN_INSTALL_SCRIPT, 'arn:aws:s3:::' +
                        ARA_BUCKET_NAME + BINARIES + DataGenConfig.JAR_FILE
                    ]),
                _iam.PolicyStatement(
                    actions=['s3:PutObject'],
                    resources=[log_bucket.bucket_arn + "/data-generator/*"]),
                _iam.PolicyStatement(
                    actions=[
                        "s3:AbortMultipartUpload", "s3:CreateBucket",
                        "s3:DeleteObject", "s3:GetBucketVersioning",
                        "s3:GetObject", "s3:GetObjectTagging",
                        "s3:GetObjectVersion", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads",
                        "s3:ListBucketVersions", "s3:ListMultipartUploadParts",
                        "s3:PutBucketVersioning", "s3:PutObject",
                        "s3:PutObjectTagging"
                    ],
                    resources=[
                        sink_bucket.bucket_arn + '/*', sink_bucket.bucket_arn,
                        stream_source_bucket.bucket.bucket_arn + '/*',
                        stream_source_bucket.bucket.bucket_arn
                    ])
            ],
            roles=[cluster_role])

        cluster_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        _iam.CfnInstanceProfile(self,
                                'StreamEmrClusterInstanceProfile',
                                roles=[cluster_role.role_name],
                                instance_profile_name=cluster_role.role_name)

        # Security Groups for the EMR cluster (private subnet)
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html#emr-sg-elasticmapreduce-master-private
        master_sg = _ec2.SecurityGroup(self,
                                       'ElasticMapReduce-Master-Private',
                                       vpc=vpc)
        slave_sg = _ec2.SecurityGroup(self,
                                      'ElasticMapReduce-Slave-Private',
                                      vpc=vpc)
        service_sg = _ec2.SecurityGroup(self,
                                        'ElasticMapReduce-ServiceAccess',
                                        vpc=vpc,
                                        allow_all_outbound=False)

        # Service SG used by the proxy instance
        service_sg.add_ingress_rule(master_sg, _ec2.Port.tcp(9443))
        service_sg.add_egress_rule(master_sg, _ec2.Port.tcp(8443))
        service_sg.add_egress_rule(slave_sg, _ec2.Port.tcp(8443))

        # EMR Master
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        # EMR Slave
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        with open('common/common_cdk/lambda/datagen_config.py', 'r') as f:
            lambda_source = f.read()

        configure_datagen_function = _lambda.SingletonFunction(
            self,
            'StreamConfigureDatagenLambda',
            uuid="a9904dec-01cf-11eb-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stream-datagen-config',
            environment={
                'TABLE_NAME': config_table.table_name,
                'JAR_LOCATION': BINARIES_LOCATION + DataGenConfig.JAR_FILE,
            },
            timeout=core.Duration.seconds(10))

        configure_datagen_function.role.add_to_policy(
            _iam.PolicyStatement(actions=[
                'dynamodb:GetItem',
                'dynamodb:PutItem',
            ],
                                 resources=[config_table.table_arn]))

        emr_cluster = _emr.CfnCluster(
            self,
            'StreamEmrCluster',
            name="StreamDatagenCluster",
            job_flow_role=cluster_role.role_name,
            service_role=service_role.role_name,
            release_label='emr-5.30.1',
            visible_to_all_users=True,
            log_uri=log_bucket.s3_url_for_object() + "/data-generator",
            applications=[
                _emr.CfnCluster.ApplicationProperty(name='hadoop'),
                _emr.CfnCluster.ApplicationProperty(name='spark')
            ],
            bootstrap_actions=[
                _emr.CfnCluster.BootstrapActionConfigProperty(
                    name="dsdgen-install",
                    script_bootstrap_action=_emr.CfnCluster.
                    ScriptBootstrapActionConfigProperty(
                        path=BINARIES_LOCATION +
                        DataGenConfig.DSDGEN_INSTALL_SCRIPT))
            ],
            instances=_emr.CfnCluster.JobFlowInstancesConfigProperty(
                emr_managed_master_security_group=master_sg.security_group_id,
                emr_managed_slave_security_group=slave_sg.security_group_id,
                service_access_security_group=service_sg.security_group_id,
                ec2_subnet_id=vpc.private_subnets[0].subnet_id,
                core_instance_group=_emr.CfnCluster.
                InstanceGroupConfigProperty(instance_count=DataGenConfig.
                                            BATCH_CLUSTER_SIZE[tshirt_size],
                                            instance_type='m5.xlarge'),
                master_instance_group=_emr.CfnCluster.
                InstanceGroupConfigProperty(instance_count=1,
                                            instance_type='m4.large')))

        configure_datagen = _sfn_tasks.LambdaInvoke(
            self,
            "ConfigureDatagenTask",
            lambda_function=configure_datagen_function,
            payload=_sfn.TaskInput.from_text(
                '{'
                '"Param": "stream_iterator",'
                '"Module": "stream",'
                '"SinkBucket": "' + sink_bucket.s3_url_for_object() + '",'
                '"Parallelism": "' +
                str(int(DataGenConfig.STREAM_DATA_SIZE[tshirt_size]) * 2) +
                '",'
                '"DataSize": "' + DataGenConfig.STREAM_DATA_SIZE[tshirt_size] +
                '",'
                '"TmpBucket": "' +
                str(stream_source_bucket.bucket.s3_url_for_object()) + '"'
                '}'),
            result_path='$.Config')

        add_datagen_step = _sfn.CustomState(
            self,
            'StreamAddDataGenStep',
            state_json={
                "Type": "Task",
                "Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
                "Parameters": {
                    "ClusterId.$": "$.Emr.Cluster.Id",
                    "Step": {
                        "Name": "DatagenStep",
                        "ActionOnFailure": "CONTINUE",
                        "HadoopJarStep": {
                            "Jar": "command-runner.jar",
                            "Args.$": "$.Config.Payload.StepParam"
                        }
                    }
                },
                "ResultPath": "$.Step",
                "Next": "StreamUpdateIterator"
            })

        update_iterator = _sfn_tasks.DynamoUpdateItem(
            self,
            'StreamUpdateIterator',
            table=config_table,
            key={
                'param':
                _sfn_tasks.DynamoAttributeValue.from_string('stream_iterator')
            },
            update_expression=
            'SET iterator = if_not_exists(iterator, :start) + :inc',
            expression_attribute_values={
                ":inc": _sfn_tasks.DynamoAttributeValue.from_number(1),
                ":start": _sfn_tasks.DynamoAttributeValue.from_number(0)
            },
            result_path=_sfn.JsonPath.DISCARD)

        definition = configure_datagen \
            .next(add_datagen_step) \
            .next(update_iterator)

        datagen_stepfunctions = _sfn.StateMachine(
            self,
            "StreamDataGenStepFunctions",
            definition=definition,
            timeout=core.Duration.minutes(30))

        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(actions=[
                'elasticmapreduce:AddJobFlowSteps',
                'elasticmapreduce:DescribeStep'
            ],
                                 resources=['*']))

        step_trigger = _events.Rule(self,
                                    'StreamStepTrigger',
                                    schedule=_events.Schedule.cron(
                                        minute='0/10',
                                        hour='*',
                                        month='*',
                                        week_day='*',
                                        year='*'))

        step_trigger.add_target(
            _events_targets.SfnStateMachine(
                machine=datagen_stepfunctions,
                input=_events.RuleTargetInput.from_object({
                    "Emr": {
                        "Cluster": {
                            "Id": core.Fn.ref(emr_cluster.logical_id)
                        }
                    }
                })))

        with open('common/common_cdk/lambda/stepfunctions_trigger.py',
                  'r') as f:
            lambda_source = f.read()

        stepfunctions_trigger_lambda = _lambda.SingletonFunction(
            self,
            'StreamStepFunctionsTriggerLambda',
            uuid="cf042246-01d0-11eb-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stepfunctions-stream-datagen-trigger')

        stepfunctions_trigger_lambda.role.add_to_policy(
            _iam.PolicyStatement(actions=["states:StartExecution"],
                                 resources=['*']))

        trigger_step_lambda_provider = _custom_resources.Provider(
            self,
            'StreamStepFunctionsTriggerLambdaProvider',
            on_event_handler=stepfunctions_trigger_lambda)

        core.CustomResource(
            self,
            'StreamStepFunctionsTrigger',
            service_token=trigger_step_lambda_provider.service_token,
            properties={"stepArn": datagen_stepfunctions.state_machine_arn})

        with open('common/common_cdk/lambda/stream_generator.py', 'r') as f:
            lambda_source = f.read()

        sale_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebSaleStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_sale_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(sale_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='sale', suffix='csv'))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=["kinesis:PutRecords"],
                                 resources=[
                                     stack.format_arn(
                                         service='kinesis',
                                         resource='stream',
                                         resource_name=web_sale_stream)
                                 ]))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))

        customer_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebCustomerStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_customer_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(
                customer_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='customer', suffix='csv'))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=["kinesis:PutRecords"],
                                 resources=[
                                     stack.format_arn(
                                         service='kinesis',
                                         resource='stream',
                                         resource_name=web_customer_stream)
                                 ]))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))

        address_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebCustomerAddressStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_customer_address_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(
                address_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='address', suffix='csv'))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=["kinesis:PutRecords"],
                resources=[
                    stack.format_arn(service='kinesis',
                                     resource='stream',
                                     resource_name=web_customer_address_stream)
                ]))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))
Ejemplo n.º 25
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Tag all constructs with the project for easy billing drilldown,
        # filtering, and organization.
        core.Tags.of(self).add('project', 'MediaTranscription')

        # Media files bucket
        media_bucket = s3.Bucket(
            self,
            'media-transcription-bucket',
            encryption=s3.BucketEncryption.S3_MANAGED,
        )

        # SQS queue for media files bucket event notifications
        media_bucket_event_queue = sqs.Queue(
            self,
            'media-transcription-event-notification-queue',
            queue_name='media-transcription-event-notification-queue',
            visibility_timeout=core.Duration.seconds(60),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3,
                queue=sqs.Queue(
                    self,
                    'media-transcription-event-notifications-dlq',
                    queue_name='media-transcription-event-notifications-dlq',
                )),
        )

        # S3 object created notifications sent to SQS queue
        media_bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            s3n.SqsDestination(media_bucket_event_queue),
            *[s3.NotificationKeyFilter(prefix='media-input/')],
        )

        # Lambda function to create/submit Transcribe jobs
        transcribe_job_init_fn = lambda_.Function(
            self,
            'transcribe-job-init-fn',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset(
                '../lambdas/transcribe-job-init-fn',
                # The following is just dumb.
                # The Lambda runtime doesn't use the latest boto3 by default.
                # In order to use the latest boto3, we have to pip install
                # and bundle locally using Docker.
                # Q: Why need the latest boto3?
                # A: https://github.com/boto/boto3/issues/2630
                # I'll have to delete the ECR containers to avoid cost.
                # TODO: Revert back to normal in like a month I guess.
                bundling={
                    'image':
                    lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    'command': [
                        'bash', '-c',
                        '\n        pip install -r requirements.txt -t /asset-output &&\n        cp -au . /asset-output\n        '
                    ]
                }),
            handler='fn.handler',
            reserved_concurrent_executions=1,  # Effectively single-threaded
        )
        # Triggered by SQS messages created for media file puts
        transcribe_job_init_fn.add_event_source(
            les.SqsEventSource(
                queue=media_bucket_event_queue,
                batch_size=5,
                enabled=True,
            ))
        # Grant access to start transcription jobs
        transcribe_job_init_fn.add_to_role_policy(
            statement=iam.PolicyStatement(
                actions=[
                    'transcribe:StartTranscriptionJob',
                ],
                resources=['*'],
                effect=iam.Effect.ALLOW,
            ))

        # Grant Lambda role to read and write to input and output portions of
        # the S3 bucket.
        # Q: Why grant Lambda the permissions instead of Transcribe service?
        # A: Two-fold:
        #   -  i) https://amzn.to/321Nx5I
        #   - ii) Granting just to this Lambda means other Transcribe jobs
        #         across the account cannot use this bucket (least privilege).
        media_bucket.grant_read(
            identity=transcribe_job_init_fn.grant_principal,
            objects_key_pattern='media-input/*')
        # Cannot specify a prefix for writes as Transcribe will not accept
        # a job unless it has write permission on the whole bucket.
        # Edit: The above statement was when I had to use '*' for writes. But
        #       now, I granted access to that .write_access_check_file.temp
        #       file and it seems to all work now?
        media_bucket.grant_write(
            identity=transcribe_job_init_fn.grant_principal,
            objects_key_pattern='transcribe-output-raw/*')
        # This is just as frustrating to you as it is to me.
        media_bucket.grant_write(
            identity=transcribe_job_init_fn.grant_principal,
            objects_key_pattern='.write_access_check_file.temp')

        # DynamoDB table for Jobs metadata
        jobs_metadata_table = ddb.Table(
            self,
            'MediaTranscription-TranscriptionJobs',
            table_name='MediaTranscription-TranscriptionJobs',
            partition_key=ddb.Attribute(
                name='Bucket-Key-ETag',
                type=ddb.AttributeType.STRING,
            ),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
        )
        jobs_metadata_table.grant(transcribe_job_init_fn.grant_principal, *[
            'dynamodb:GetItem',
            'dynamodb:PutItem',
        ])

        # Create IAM Group with read/write permissions to S3 bucket
        # TODO: Make this more federated and robust
        console_users_group = iam.Group(self, 'MediaTranscriptionConsoleUsers')
        console_users_group.attach_inline_policy(policy=iam.Policy(
            self,
            'MediaTranscriptionConsoleUserS3Access',
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        's3:ListBucket',
                    ],
                    resources=[
                        media_bucket.bucket_arn,
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        's3:GetObject',
                        's3:PutObject',
                    ],
                    resources=[
                        media_bucket.arn_for_objects('media-input/*'),
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        's3:GetObject',
                    ],
                    resources=[
                        media_bucket.arn_for_objects(
                            'transcribe-output-raw/*'),
                    ],
                ),
            ],
        ))
Ejemplo n.º 26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open("stack/config.yml", 'r') as stream:
            configs = yaml.safe_load(stream)

        ### S3 core
        images_S3_bucket = _s3.Bucket(self, "ICS_IMAGES")

        images_S3_bucket.add_cors_rule(
            allowed_methods=[_s3.HttpMethods.POST],
            allowed_origins=["*"] # add API gateway web resource URL
        )

        ### SQS core
        image_deadletter_queue = _sqs.Queue(self, "ICS_IMAGES_DEADLETTER_QUEUE")
        image_queue = _sqs.Queue(self, "ICS_IMAGES_QUEUE",
            dead_letter_queue={
                "max_receive_count": configs["DeadLetterQueue"]["MaxReceiveCount"],
                "queue": image_deadletter_queue
            })

        ### api gateway core
        api_gateway = RestApi(self, 'ICS_API_GATEWAY', rest_api_name='ImageContentSearchApiGateway')
        api_gateway_resource = api_gateway.root.add_resource(configs["ProjectName"])
        api_gateway_landing_page_resource = api_gateway_resource.add_resource('web')
        api_gateway_get_signedurl_resource = api_gateway_resource.add_resource('signedUrl')
        api_gateway_image_search_resource = api_gateway_resource.add_resource('search')

        ### landing page function
        get_landing_page_function = Function(self, "ICS_GET_LANDING_PAGE",
            function_name="ICS_GET_LANDING_PAGE",
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/landingPage"))

        get_landing_page_integration = LambdaIntegration(
            get_landing_page_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_landing_page_resource.add_method('GET', get_landing_page_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        ### cognito
        required_attribute = _cognito.StandardAttribute(required=True)

        users_pool = _cognito.UserPool(self, "ICS_USERS_POOL",
            auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up
            standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up
            self_sign_up_enabled=configs["Cognito"]["SelfSignUp"])

        user_pool_app_client = _cognito.CfnUserPoolClient(self, "ICS_USERS_POOL_APP_CLIENT", 
            supported_identity_providers=["COGNITO"],
            allowed_o_auth_flows=["implicit"],
            allowed_o_auth_scopes=configs["Cognito"]["AllowedOAuthScopes"],
            user_pool_id=users_pool.user_pool_id,
            callback_ur_ls=[api_gateway_landing_page_resource.url],
            allowed_o_auth_flows_user_pool_client=True,
            explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"])

        user_pool_domain = _cognito.UserPoolDomain(self, "ICS_USERS_POOL_DOMAIN", 
            user_pool=users_pool, 
            cognito_domain=_cognito.CognitoDomainOptions(domain_prefix=configs["Cognito"]["DomainPrefix"]))

        ### get signed URL function
        get_signedurl_function = Function(self, "ICS_GET_SIGNED_URL",
            function_name="ICS_GET_SIGNED_URL",
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_SIGNEDURL_EXPIRY_SECONDS": configs["Functions"]["DefaultSignedUrlExpirySeconds"]
            },
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/getSignedUrl"))

        get_signedurl_integration = LambdaIntegration(
            get_signedurl_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_get_signedurl_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            rest_api_id=api_gateway_get_signedurl_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            type="COGNITO_USER_POOLS",
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_get_signedurl_resource.add_method('GET', get_signedurl_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_get_signedurl_authorizer.ref)

        images_S3_bucket.grant_put(get_signedurl_function, objects_key_pattern="new/*")

        ### image massage function
        image_massage_function = Function(self, "ICS_IMAGE_MASSAGE",
            function_name="ICS_IMAGE_MASSAGE",
            timeout=core.Duration.seconds(6),
            runtime=Runtime.PYTHON_3_7,
            environment={"ICS_IMAGE_MASSAGE": image_queue.queue_name},
            handler="main.handler",
            code=Code.asset("./src/imageMassage"))

        images_S3_bucket.grant_write(image_massage_function, "processed/*")
        images_S3_bucket.grant_delete(image_massage_function, "new/*")
        images_S3_bucket.grant_read(image_massage_function, "new/*")
        
        new_image_added_notification = _s3notification.LambdaDestination(image_massage_function)

        images_S3_bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, 
            new_image_added_notification, 
            _s3.NotificationKeyFilter(prefix="new/")
            )

        image_queue.grant_send_messages(image_massage_function)

        ### image analyzer function
        image_analyzer_function = Function(self, "ICS_IMAGE_ANALYSIS",
            function_name="ICS_IMAGE_ANALYSIS",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(10),
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "REGION": core.Aws.REGION,
                },
            handler="main.handler",
            code=Code.asset("./src/imageAnalysis")) 

        image_analyzer_function.add_event_source(_lambda_event_source.SqsEventSource(queue=image_queue, batch_size=10))
        image_queue.grant_consume_messages(image_massage_function)

        lambda_rekognition_access = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"],
            resources=["*"]                    
        )

        image_analyzer_function.add_to_role_policy(lambda_rekognition_access)
        images_S3_bucket.grant_read(image_analyzer_function, "processed/*")

        ### API gateway finalizing
        self.add_cors_options(api_gateway_get_signedurl_resource)
        self.add_cors_options(api_gateway_landing_page_resource)
        self.add_cors_options(api_gateway_image_search_resource)

        ### database 
        database_secret = _secrets_manager.Secret(self, "ICS_DATABASE_SECRET",
            secret_name="rds-db-credentials/image-content-search-rds-secret",
            generate_secret_string=_secrets_manager.SecretStringGenerator(
                generate_string_key='password',
                secret_string_template='{"username": "******"}',
                exclude_punctuation=True,
                exclude_characters='/@\" \\\'',
                require_each_included_type=True
            )
        )

        database = _rds.CfnDBCluster(self, "ICS_DATABASE",
            engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
            engine_mode="serverless",
            database_name=configs["Database"]["Name"],
            enable_http_endpoint=True,
            deletion_protection=configs["Database"]["DeletionProtection"],
            master_username=database_secret.secret_value_from_json("username").to_string(),
            master_user_password=database_secret.secret_value_from_json("password").to_string(),
            scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty(
                auto_pause=configs["Database"]["Scaling"]["AutoPause"],
                min_capacity=configs["Database"]["Scaling"]["Min"],
                max_capacity=configs["Database"]["Scaling"]["Max"],
                seconds_until_auto_pause=configs["Database"]["Scaling"]["SecondsToAutoPause"]
            ),
        )

        database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref)
   
        secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"ICS_DATABASE_SECRET_TARGET",
            target_type="AWS::RDS::DBCluster",
            target_id=database.ref,
            secret_id=database_secret.secret_arn
        )

        secret_target.node.add_dependency(database)

        ### database function
        image_data_function_role = _iam.Role(self, "ICS_IMAGE_DATA_FUNCTION_ROLE",
            role_name="ICS_IMAGE_DATA_FUNCTION_ROLE",
            assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess")
            ]
        )
        
        image_data_function = Function(self, "ICS_IMAGE_DATA",
            function_name="ICS_IMAGE_DATA",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(5),
            role=image_data_function_role,
            environment={
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "CLUSTER_ARN": database_cluster_arn,
                "CREDENTIALS_ARN": database_secret.secret_arn,
                "DB_NAME": database.database_name,
                "REGION": core.Aws.REGION
                },
            handler="main.handler",
            code=Code.asset("./src/imageData")
        ) 

        image_search_integration = LambdaIntegration(
            image_data_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_image_search_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            rest_api_id=api_gateway_image_search_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            type="COGNITO_USER_POOLS", 
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_image_search_resource.add_method('POST', image_search_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_image_search_authorizer.ref)


        lambda_access_search = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["translate:TranslateText"],
            resources=["*"]            
        ) 

        image_data_function.add_to_role_policy(lambda_access_search)

        ### custom resource
        lambda_provider = Provider(self, 'ICS_IMAGE_DATA_PROVIDER', 
            on_event_handler=image_data_function
        )

        core.CustomResource(self, 'ICS_IMAGE_DATA_RESOURCE', 
            service_token=lambda_provider.service_token,
            pascal_case_properties=False,
            resource_type="Custom::SchemaCreation",
            properties={
                "source": "Cloudformation"
            }
        )

        ### event bridge
        event_bus = _events.EventBus(self, "ICS_IMAGE_CONTENT_BUS")

        event_rule = _events.Rule(self, "ICS_IMAGE_CONTENT_RULE",
            rule_name="ICS_IMAGE_CONTENT_RULE",
            description="The event from image analyzer to store the data",
            event_bus=event_bus,
            event_pattern=_events.EventPattern(resources=[image_analyzer_function.function_arn]),
        )

        event_rule.add_target(_event_targets.LambdaFunction(image_data_function))

        event_bus.grant_put_events(image_analyzer_function)
        image_analyzer_function.add_environment("EVENT_BUS", event_bus.event_bus_name)

        ### outputs
        core.CfnOutput(self, 'CognitoHostedUILogin',
            value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(user_pool_domain.domain_name, core.Aws.REGION, user_pool_app_client.ref, '+'.join(user_pool_app_client.allowed_o_auth_scopes), api_gateway_landing_page_resource.url),
            description='The Cognito Hosted UI Login Page'
        )
Ejemplo n.º 27
0
    def __init__(self, scope: core.Construct, id: str, config_dict,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        """ Get VPC details """
        Ivpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id=config_dict['vpc_id'])
        """ Get sunet seclection context created """
        subnet_1 = ec2.Subnet.from_subnet_attributes(
            self,
            "subnet_1",
            subnet_id=config_dict['SubnetIds'].split(",")[0],
            availability_zone=config_dict['AvailabilityZones'].split(",")[0])
        subnet_2 = ec2.Subnet.from_subnet_attributes(
            self,
            "subnet_2",
            subnet_id=config_dict['SubnetIds'].split(",")[1],
            availability_zone=config_dict['AvailabilityZones'].split(",")[1])
        """ Create Security Group for Lambda Functions """
        lambda_security_group = "datalake-lambda-sg"

        createLambdaSecurityGroup = ec2.SecurityGroup(
            self,
            "createLambdaSecurityGroup",
            vpc=Ivpc,
            allow_all_outbound=True,
            description="This security group will be used for Lambda Funcs",
            security_group_name=lambda_security_group)
        """ Create the Datalake Bucket """
        createDatalakeBucket = s3.Bucket(
            self,
            "createCompRegBucket",
            bucket_name=config_dict['datalake_bucket_name'],
            block_public_access=s3.BlockPublicAccess(
                block_public_acls=True,
                block_public_policy=True,
                ignore_public_acls=True,
                restrict_public_buckets=True))

        core.CfnOutput(self,
                       "createCompRegBucketName",
                       value=createDatalakeBucket.bucket_name)
        """ Create Comp Reg Lambda Function """
        createCompRegLambda = _lambda.Function(
            self,
            "createCompRegLambda",
            function_name="datalake-comp-reg-trigger",
            description=
            "This lambda function will trigger the compound reg pipeline.",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="trigger_compound_reg_pipeline.lambda_handler",
            code=_lambda.Code.asset('lambdas'),
            timeout=core.Duration.seconds(90),
            vpc=Ivpc,
            vpc_subnets=ec2.SubnetSelection(subnets=[subnet_1, subnet_2]),
            security_group=createLambdaSecurityGroup,
            initial_policy=[
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    actions=["s3:*", "batch:*"],
                                    resources=["*"])
            ])
        """ Add s3 event trigger to above lambda function """
        createCompRegLambda.add_event_source(
            S3EventSource(createDatalakeBucket,
                          events=[s3.EventType.OBJECT_CREATED],
                          filters=[
                              s3.NotificationKeyFilter(
                                  prefix="compound_reg/triggers/",
                                  suffix=".trigger")
                          ]))
Ejemplo n.º 28
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Get the DynamoDB environment variables from context
        DYNAMODB_ENV = self.node.try_get_context('dynamodb')

        # Create lambda function. It gets the code from the ./assets folder
        from_s3_to_dynamo_db_function = _lambda.Function(
            self,
            "FromS3ToDynamoDBFunction",
            function_name="from-s3-to-dynamodb",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="s3_trigger.lambda_handler",
            code=_lambda.Code.asset("./cdk/assets"),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                'LOG_LEVEL': 'INFO',
                'TABLE_NAME': DYNAMODB_ENV['TABLE_NAME']
            })

        # create log group to manage it with cdk
        _logs.LogGroup(
            self,
            "FromS3ToDynamoDBLogGroup",
            log_group_name=
            f"/aws/lambda/{from_s3_to_dynamo_db_function.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY)

        # create s3 bucket with encryption, versioning and blocked public access
        s3_to_dynamo_db_bucket = _s3.Bucket(
            self,
            "S3ToDynamoDBBucket",
            bucket_name="s3-to-dynamo-db-workshop",
            encryption=_s3.BucketEncryption.S3_MANAGED,
            versioned=True,
            block_public_access=_s3.BlockPublicAccess.BLOCK_ALL)

        # Grant read access to Lambda Function to access S3 Bucket
        s3_to_dynamo_db_bucket.grant_read(from_s3_to_dynamo_db_function)

        # Create s3 notification for lambda function
        notification = _s3_notifications.LambdaDestination(
            from_s3_to_dynamo_db_function)

        # Assign notification for the s3 event type
        s3_to_dynamo_db_bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED, notification,
            _s3.NotificationKeyFilter(suffix='.csv'))

        # Create DynamoDb table
        dynamo_db = _dynamodb.Table(
            self,
            'FromS3ToDynamoDBTable',
            table_name=DYNAMODB_ENV['TABLE_NAME'],
            billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamodb.Attribute(
                name=DYNAMODB_ENV['PARTITION_KEY_NAME'],
                type=_dynamodb.AttributeType.STRING))

        # Grant full access to Lambda Function to access DynamoDb
        dynamo_db.grant_full_access(from_s3_to_dynamo_db_function)
Ejemplo n.º 29
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ################################################
        # DynamoDB
        ################################################
        ddb_sims_tbl = ddb.Table(self,
                                 'RecommendationSIMS',
                                 table_name='RecommendationSIMS',
                                 removal_policy=core.RemovalPolicy.DESTROY,
                                 partition_key={
                                     'name': 'itemid',
                                     'type': ddb.AttributeType.STRING
                                 },
                                 billing_mode=ddb.BillingMode.PROVISIONED,
                                 read_capacity=5,
                                 write_capacity=50)

        ddb_sims_tbl_rw_policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[ddb_sims_tbl.table_arn],
            actions=[
                "dynamodb:BatchGetItem", "dynamodb:Describe*",
                "dynamodb:List*", "dynamodb:GetItem", "dynamodb:Query",
                "dynamodb:Scan", "dynamodb:BatchWriteItem",
                "dynamodb:DeleteItem", "dynamodb:PutItem",
                "dynamodb:UpdateItem"
            ])

        ################################################
        # Lambda + S3
        ################################################
        ddb_lambda_fn = _lambda.Function(
            self,
            'UpsertManhwakyungToDynmodbDB',
            function_name='UpsertManhwakyungToDynmodbDB',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('src/lambda/UpsertManhwakyungToDynmodbDB'),
            handler='manhwakyung_s3_to_dynamodb.lambda_handler',
            description=
            'Store recommendation results in dynamoDB (Recipe: SIMS)',
            environment={
                'REGION_NAME': kwargs['env'].region,
                'DDB_SIMS_TABLE': ddb_sims_tbl.table_name
            },
            timeout=core.Duration.minutes(1))

        s3_bucket = s3.Bucket(
            self,
            "data",
            #removal_policy=core.RemovalPolicy.DESTROY
        )

        # https://stackoverflow.com/questions/60087302/how-to-add-resource-policy-to-existing-s3-bucket-with-cdk-in-javascript
        # https://stackoverflow.com/questions/60282173/lookup-s3-bucket-and-add-a-trigger-to-invoke-a-lambda
        #s3_bucket = s3.Bucket.from_bucket_name(self, "ExistingBucket",
        #    bucket_name="team3-recommendation-system-personalize-data-jhhwang-cdk")

        s3_event_filter = s3.NotificationKeyFilter(
            prefix="results/by-title-id/", suffix=".out")
        s3_event_source = S3EventSource(
            s3_bucket,
            events=[s3.EventType.OBJECT_CREATED_PUT],
            filters=[s3_event_filter])
        ddb_lambda_fn.add_event_source(s3_event_source)

        s3_read_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            #resources = [s3_bucket.bucket_arn, "{}/*".format(s3_bucket.bucket_arn)],
            resources=["*"],
            actions=["s3:Get*", "s3:List*"])

        ddb_lambda_fn.add_to_role_policy(s3_read_statement)
        ddb_lambda_fn.add_to_role_policy(ddb_sims_tbl_rw_policy_statement)

        ##############################################################
        # APIGW + Lambda
        ##############################################################
        sims_title_lambda_fn = _lambda.Function(
            self,
            'RecommendSimsByTitleAPI',
            function_name='RecommendSimsByTitleAPI',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('src/lambda/RecommendSimsByTitleAPI'),
            handler='recommend_sims_by_title.lambda_handler',
            description='API to provide recommended results (Recipe: SIMS)',
            environment={
                'REGION_NAME': kwargs['env'].region,
                'DDB_SIMS_TABLE': ddb_sims_tbl.table_name,
                'DEF_ITEM_LIMIT': "10",
            },
            timeout=core.Duration.minutes(1))
        ddb_sims_tbl.grant_read_write_data(sims_title_lambda_fn)

        sims_title_api = apigw.LambdaRestApi(
            self,
            "RecommendSimsByTitle",
            handler=sims_title_lambda_fn,
            proxy=False,
            rest_api_name="RecommendSimsByTitle",
            description=
            'Webtoon is recommended based on webtoon id. (Recipe: SIMS)',
            endpoint_types=[apigw.EndpointType.REGIONAL],
            deploy=True,
            deploy_options=apigw.StageOptions(stage_name="v1"),
        )

        recomm_titles = sims_title_api.root.add_resource('recommended-titles')
        by_title = recomm_titles.add_resource('by-title')
        by_title.add_method("GET")
        by_title_id = by_title.add_resource('{id}')
        by_title_id.add_method("GET")
        by_title_id.add_cors_preflight(allow_origins=["*"],
                                       allow_methods=["*"])
Ejemplo n.º 30
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.output_bucket = aws_s3.Bucket(
            self, "BucketTwitterStreamOutput",
            bucket_name = self.stack_name,
        )

        self.bucket_url = self.output_bucket.bucket_regional_domain_name

        # Because kinesis firehose bindings are to direct CF, we have to create IAM policy/role and attach on our own
        self.iam_role = aws_iam.Role(
            self, "IAMRoleTwitterStreamKinesisFHToS3",
            role_name="KinesisFirehoseToS3-{}".format(self.stack_name),
            assumed_by=aws_iam.ServicePrincipal(service='firehose.amazonaws.com'),
        )

        # S3 bucket actions
        self.s3_iam_policy_statement = aws_iam.PolicyStatement()
        actions = ["s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:PutObject"]
        for action in actions:
            self.s3_iam_policy_statement.add_actions(action)
        self.s3_iam_policy_statement.add_resources(self.output_bucket.bucket_arn)
        self.s3_iam_policy_statement.add_resources(self.output_bucket.bucket_arn + "/*")

        # CW error log setup
        self.s3_error_logs_group = aws_logs.LogGroup(
            self, "S3ErrorLogsGroup",
            log_group_name="{}-s3-errors".format(self.stack_name)
        )

        self.s3_error_logs_stream = aws_logs.LogStream(
            self, "S3ErrorLogsStream",
            log_group=self.s3_error_logs_group,
            log_stream_name='s3Backup'
        )

        self.firehose = aws_kinesisfirehose.CfnDeliveryStream(
            self, "FirehoseTwitterStream",
            delivery_stream_name = "{}-raw".format(self.stack_name),
            delivery_stream_type = "DirectPut",
            s3_destination_configuration={
                'bucketArn': self.output_bucket.bucket_arn,
                'bufferingHints': {
                    'intervalInSeconds': 120,
                    'sizeInMBs': 10
                },
                'compressionFormat': 'UNCOMPRESSED',
                'roleArn': self.iam_role.role_arn,
                'cloudWatchLoggingOptions': {
                    'enabled': True,
                    'logGroupName': "{}-raw".format(self.stack_name),
                    'logStreamName': 's3BackupRaw'
                },
                'prefix': 'twitter-raw/'
            },
        )

        # TODO: Only attach what's needed for this policy, right now i'm lazy and attaching all policies
        self.iam_policy = aws_iam.Policy(
            self, "IAMPolicyTwitterStreamKinesisFHToS3",
            policy_name="KinesisFirehoseToS3-{}".format(self.stack_name),
            statements=[self.s3_iam_policy_statement],
        )

        self.iam_policy.attach_to_role(self.iam_role)

        # Because kinesis firehose bindings are to direct CF, we have to create IAM policy/role and attach on our own
        self.curator_firehose = aws_kinesisfirehose.CfnDeliveryStream(
            self, "CuratorFirehoseStream",
            delivery_stream_name = "{}-curator".format(self.stack_name),
            delivery_stream_type = "DirectPut",
            s3_destination_configuration={
                'bucketArn': self.output_bucket.bucket_arn,
                'bufferingHints': {
                    'intervalInSeconds': 120,
                    'sizeInMBs': 10
                },
                'compressionFormat': 'UNCOMPRESSED',
                'roleArn': self.iam_role.role_arn,
                'cloudWatchLoggingOptions': {
                    'enabled': True,
                    'logGroupName': "{}-curator".format(self.stack_name),
                    'logStreamName': 's3BackupCurator'
                },
                'prefix': 'twitter-curated/'
            },
        )

        def zip_package():
            cwd = os.getcwd()
            file_name = 'curator-lambda.zip'
            zip_file = cwd + '/' + file_name

            os.chdir('src/')
            sh.zip('-r9', zip_file, '.')
            os.chdir(cwd)

            return file_name, zip_file

        _, zip_file = zip_package()

        self.twitter_stream_curator_lambda_function = aws_lambda.Function(
            self, "TwitterStreamCuratorLambdaFunction",
            function_name="{}-curator".format(self.stack_name),
            code=aws_lambda.AssetCode(zip_file),
            handler="sentiment_analysis.lambda_handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            tracing=aws_lambda.Tracing.ACTIVE,
            description="Triggers from S3 PUT event for twitter stream data and transorms it to clean json syntax with sentiment analysis attached",
            environment={
                "STACK_NAME": self.stack_name,
                "FIREHOSE_STREAM": self.curator_firehose.delivery_stream_name
            },
            memory_size=128,
            timeout=core.Duration.seconds(120),
            log_retention=aws_logs.RetentionDays.ONE_WEEK,
        )

        # Permission to talk to comprehend for sentiment analysis
        self.comprehend_iam_policy_statement = aws_iam.PolicyStatement()
        self.comprehend_iam_policy_statement.add_actions('comprehend:*')
        self.comprehend_iam_policy_statement.add_all_resources()
        self.twitter_stream_curator_lambda_function.add_to_role_policy(self.comprehend_iam_policy_statement)

        # Permission to put in kinesis firehose
        self.curator_firehose_iam_policy_statement = aws_iam.PolicyStatement()
        self.curator_firehose_iam_policy_statement.add_actions('firehose:Put*')
        self.curator_firehose_iam_policy_statement.add_resources(self.curator_firehose.attr_arn)
        self.twitter_stream_curator_lambda_function.add_to_role_policy(self.curator_firehose_iam_policy_statement)

        # Attaching the policy to the IAM role for KFH
        self.output_bucket.grant_read(self.twitter_stream_curator_lambda_function)

        self.twitter_stream_curator_lambda_function.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                bucket=self.output_bucket,
                events=[
                    aws_s3.EventType.OBJECT_CREATED
                ],
                filters=[
                    aws_s3.NotificationKeyFilter(
                        prefix="twitter-raw/"
                    )
                ]
            )
        )