コード例 #1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        s3_source, s3_destination = self.s3_buckets()
        s3_batch_role = self.s3_batch_role(s3_source, s3_destination)

        vpc, subnets = self.vpc_network(s3_destination.bucket_arn)
        cluster, task_definition = self.ecs_cluster(vpc,
                                                    s3_destination.bucket_arn)

        fn_create_batch_job, fn_process_transfer_task, fn_create_s3batch_manifest = self.lambda_functions(
            s3_batch_role, s3_destination.bucket_name, cluster.cluster_name,
            subnets, task_definition)

        self.s3_grant_fn_create_s3batch_manifest(s3_source, s3_destination,
                                                 fn_create_s3batch_manifest)
        s3_destination.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            s3n.LambdaDestination(fn_create_s3batch_manifest), {
                "prefix":
                f'{self.s3_source_bucket_name}/demoDataBucketInventory0/',
                "suffix": '.json'
            })

        s3_destination.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            s3n.LambdaDestination(fn_create_batch_job), {
                "prefix": f'csv_manifest/',
                "suffix": '.csv'
            })

        self.event_rules(fn_process_transfer_task)
        self.ssm_parameter_store(task_definition.obtain_execution_role())
コード例 #2
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        shoe_company_ingestion_bucket_name = 'devassoc-shoe-company-ingestion'
        shoe_company_ingestion_bucket = s3.Bucket(
            self,
            'shoe-company-ingestion-bucket',
            bucket_name=shoe_company_ingestion_bucket_name,
            access_control=s3.BucketAccessControl.PRIVATE,
            removal_policy=core.RemovalPolicy.DESTROY,
            auto_delete_objects=True)
        core.CfnOutput(self,
                       'new-ingestion-bucket',
                       value=shoe_company_ingestion_bucket.bucket_name)

        shoe_company_json_bucket_name = 'devassoc-shoe-company-json'
        shoe_company_json_bucket = s3.Bucket(
            self,
            'shoe-company-json-bucket',
            bucket_name=shoe_company_json_bucket_name,
            access_control=s3.BucketAccessControl.PRIVATE,
            removal_policy=core.RemovalPolicy.DESTROY,
            auto_delete_objects=True)
        core.CfnOutput(self,
                       'new-json-bucket',
                       value=shoe_company_json_bucket.bucket_name)

        lambda_role = iam.Role(
            self,
            'lambda-role',
            role_name='PayrollProcessingLambdaRole',
            description=
            'Provides lambda with access to s3 and cloudwatch to execute payroll processing',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))
        lambda_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name('AWSLambdaExecute'))

        this_dir = path.dirname(__file__)
        conversion_function = lam.Function(
            self,
            'conversion-function',
            function_name='PayrollProcessing',
            runtime=lam.Runtime.PYTHON_3_7,
            handler='conversion.lambda_handler',
            code=lam.Code.from_asset(path.join(this_dir, 'lambda')),
            role=lambda_role,
            description=
            'Converts payroll csvs to json and puts results in s3 bucket',
            timeout=core.Duration.minutes(3),
            memory_size=128)
        conversion_function.add_permission(
            'lambdas3permission',
            principal=iam.ServicePrincipal('s3.amazonaws.com'),
            action='lambda:InvokeFunction',
            source_arn=shoe_company_ingestion_bucket.bucket_arn,
            source_account=kwargs.get('env')['account'])
        shoe_company_ingestion_bucket.add_object_created_notification(
            s3n.LambdaDestination(conversion_function),
            s3.NotificationKeyFilter(suffix='.csv'))
コード例 #3
0
ファイル: Zach_S3_Stack.py プロジェクト: WZQ1397/config
  def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
    super().__init__(scope, id, **kwargs)

    ZachS3Bucket = s3.Bucket(self, 'ZachS3Bucket',versioned=False,
                             removal_policy=core.RemovalPolicy.DESTROY,
                             # block_public_access=s3.BlockPublicAccess()
                             )
    ZachS3BucketAllowMethod = [s3.HttpMethods.GET, s3.HttpMethods.POST]
    ZachS3Bucket.add_cors_rule(allowed_methods=ZachS3BucketAllowMethod
                               , allowed_origins=['192.168.0.0/24'])
    ZachS3BucketLongTermStorageClass = s3.StorageClass.INFREQUENT_ACCESS
    ZachS3Bucket.add_lifecycle_rule(enabled=True,
                                    expiration=core.Duration.days(42),
                                    prefix="Zach-tsjr",
                                    tag_filters={"Environment": "Dev", "FileType": "Log"},
                                    transitions=[s3.Transition(storage_class=ZachS3BucketLongTermStorageClass,
                                                               transition_after=core.Duration.days(30))]
                                    )
    s3_logger_handler=lm.Function(
      self, "ZachS3BucketLambda",
      runtime=lm.Runtime.PYTHON_3_7,
      handler="s3recorder.handler",
      code=lm.Code.asset('aws_cdk_python\Zach_Lambda_Stack'),
      environment={
        "S3_BUCKET": ZachS3Bucket.bucket_name
      }
    )
    notify_lambda = s3n.LambdaDestination(s3_logger_handler)
    ZachS3Bucket.add_event_notification(s3.EventType.OBJECT_CREATED,notify_lambda)
    # ZachS3Bucket.grant_write(notify_lambda)
    core.CfnOutput(self, id="ZachS3BucketARN", value=ZachS3Bucket.bucket_arn)
    core.CfnOutput(self,id="ZachS3BucketName",value=ZachS3Bucket.bucket_name)
    core.CfnOutput(self,id="ZachS3BucketOverview",value=ZachS3Bucket.to_string())
コード例 #4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        lambdaFn = _lambda.Function(self, "SampleLambdaFunction", 
            code=_lambda.Code.from_asset('function/'),
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            function_name="sample_lambda_function"
        )

        # 環境変数を追加
        lambdaFn.add_environment(key="STAGE", value="DEV")

        # s3バケットを作成し、通知イベントを設定
        bucket = _s3.Bucket(self, "SampleBucket", bucket_name="kimi-first-cdk-bucket")
        notification = aws_s3_notifications.LambdaDestination(lambdaFn)
        bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, notification, _s3.NotificationKeyFilter(prefix="hoge", suffix=".csv"))

        # 定期的に実行するイベントを設定
        rule = _events.Rule(self, "SampleEventRule", 
            rule_name="schedule_trigger_event",
            schedule=_events.Schedule.expression("cron(10 * * * ? *)")
        )
        rule.add_target(_targets.LambdaFunction(lambdaFn))
コード例 #5
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        function = lb.Function(
            self,
            "lambda_function",
            runtime=lb.Runtime.PYTHON_3_7,
            handler="lambdahandler.lambda_handler",
            code=lb.Code.asset("./lambda/"),
            role=mm.Role.from_role_arn(
                self,
                "Role",
                "arn:aws:iam::919238404395:role/triggerrole",
                mutable=False))
        # create s3 bucket
        bucket = s3.Bucket(self,
                           "mybucket",
                           bucket_name="mydemoobuckett420",
                           public_read_access=True)
        #core.CfnOutput(self,"mybucketname", value=bucket.bucket_name)

        # create s3 notification for lambda function
        notification = nik.LambdaDestination(function)

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                      notification)
コード例 #6
0
    def __init__(self, scope: core.Construct, id: str,
                 new_lambda: _lambda.Function, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # Lambda Role
        role = iam.Role(
            self,
            "lambda_role",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            role_name="lambda_role")

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3FullAccess"))
        # role.add_managed_policy(
        #     iam.ManagedPolicy.from_aws_managed_policy_name("AWSLambdaBasicExecutionRole")
        # )

        # create s3 buckets
        source_bucket = s3.Bucket(self,
                                  "cdk-source-bucket",
                                  bucket_name="demo-source-865")
        dest_bucket = s3.Bucket(self,
                                "cdk-dest-bucket",
                                bucket_name="demo-dest-865",
                                encryption=s3.BucketEncryption.KMS_MANAGED)

        # create lambda function
        first_function = _lambda.Function(
            self,
            "first_lambda_function",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="lambda-handler.main",
            environment={'DestinationBucket': dest_bucket.bucket_name},
            role=role,
            code=_lambda.Code.asset("./lambda/copy_object"))

        # create s3 notification for lambda function
        notification = s3_notifications.LambdaDestination(first_function)

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        source_bucket.add_object_created_notification(notification)

        new_notification = s3_notifications.LambdaDestination(new_lambda)
        dest_bucket.add_object_created_notification(new_notification)
コード例 #7
0
ファイル: app.py プロジェクト: sampromises/yelp-orchestrator
 def create_yelp_parser(self):
     yelp_parser = self.create_lambda_with_error_alarm("yelp_parser")
     self.page_bucket.add_event_notification(
         aws_s3.EventType.OBJECT_CREATED,
         aws_s3_notifications.LambdaDestination(yelp_parser),
     )
     self.yelp_parser = yelp_parser
     return self.yelp_parser
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Add your stack resources below):
        # Create an S3 Bucket for storing our web store assets
        kk_store = _s3.Bucket(self, "kkStore", versioned=True)

        # DynamoDB Table
        kk_store_assets_table = _dynamodb.Table(
            self,
            "kkStoreAssetsDDBTable",
            table_name="kk_store_assets_tables",
            partition_key=_dynamodb.Attribute(
                name="_id", type=_dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        # Read Lambda Code
        try:
            with open("advanced_use_cases/lambda_src/s3_event_processor.py",
                      mode="r") as f:
                kk_store_processor_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")

        # Deploy the lambda function
        kk_store_processor_fn = _lambda.Function(
            self,
            "kkStoreProcessorFn",
            function_name="kk_store_processor_fn",
            description="Process store events and update DDB",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(kk_store_processor_fn_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "DDB_TABLE_NAME": f"{kk_store_assets_table.table_name}"
            })

        # Add DynamoDB Write Privileges To Lambda
        kk_store_assets_table.grant_read_write_data(kk_store_processor_fn)

        # Create Custom Loggroup
        kk_store_lg = _logs.LogGroup(
            self,
            "kkStoreLogGroup",
            log_group_name=f"/aws/lambda/{kk_store_processor_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Create s3 notification for lambda function
        kk_store_backend = _s3_notifications.LambdaDestination(
            kk_store_processor_fn)

        # Assign notification for the s3 event type (ex: OBJECT_CREATED)
        kk_store.add_event_notification(_s3.EventType.OBJECT_CREATED,
                                        kk_store_backend)
コード例 #9
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create new IAM group and user
        group = iam.Group(self, "RekGroup")
        user = iam.User(self, "RekUser")

        # add IAM user to the new group
        user.add_to_group(group)

        # create S3 bucket to hold images
        # give new user access to the bucket
        bucket = s3.Bucket(self, 'Bucket')
        bucket.grant_read_write(user)

        # create DynamoDB table to hold Rekognition results
        table = ddb.Table(self,
                          'Classifications',
                          partition_key={
                              'name': 'image_name',
                              'type': ddb.AttributeType.STRING
                          })

        # create Lambda function
        lambda_function = _lambda.Function(
            self,
            'RekFunction',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='rekfunction.handler',
            code=_lambda.Code.from_asset(
                'rekognition_lambda_s3_trigger/lambda'),
            environment={
                'BUCKET_NAME': bucket.bucket_name,
                'TABLE_NAME': table.table_name
            })

        # add Rekognition permissions for Lambda function
        statement = iam.PolicyStatement()
        statement.add_actions("rekognition:DetectLabels")
        statement.add_resources("*")
        lambda_function.add_to_role_policy(statement)

        # create trigger for Lambda function with image type suffixes
        notification = s3_notifications.LambdaDestination(lambda_function)
        notification.bind(self, bucket)
        bucket.add_object_created_notification(
            notification, s3.NotificationKeyFilter(suffix='.jpg'))
        bucket.add_object_created_notification(
            notification, s3.NotificationKeyFilter(suffix='.jpeg'))
        bucket.add_object_created_notification(
            notification, s3.NotificationKeyFilter(suffix='.png'))

        # grant permissions for lambda to read/write to DynamoDB table and bucket
        table.grant_read_write_data(lambda_function)
        bucket.grant_read_write(lambda_function)
コード例 #10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # Lambda Role
        role = iam.Role(
            self,
            "lambda_role",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            role_name="lambda_role")

        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3FullAccess"))
        role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AWSLambdaBasicExecutionRole"))

        # create s3 buckets
        source_bucket = s3.Bucket(self,
                                  "cdk-source-bucket",
                                  bucket_name="demo-source-865")

        dest_bucket = s3.Bucket(self,
                                "cdk-dest-bucket",
                                bucket_name="demo-dest-865",
                                encryption=s3.BucketEncryption.KMS_MANAGED)

        # create lambda function
        function = _lambda.Function(
            self,
            "lambda_function",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="lambda-handler.main",
            environment={'DestinationBucket': dest_bucket.bucket_name},
            role=role,
            code=_lambda.Code.asset("./lambda"))

        # create s3 notification for lambda function
        notification = aws_s3_notifications.LambdaDestination(function)

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        source_bucket.add_object_created_notification(notification)

        # This will apply the Aspect class defined below
        core.Aspects.of(self).add(EncryptionAspect())

        # This will apply an aspect to tag all resources in the stack
        core.Tags.of(self).add("Key", "Value")
        core.Tags.of(self).add("Project", "Demo-App")
コード例 #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        function = lambda_.Function(
            self,
            "lambda_function",
            runtime=lambda_.Runtime.PYTHON_3_7,
            handler="lambda-handler.main",
            code=lambda_.Code.asset("./lambda"),
        )

        s3 = _s3.Bucket(self, "Polly_Stage")

        notification = notifications.LambdaDestination(function)

        s3.add_event_notification(_s3.EventType.OBJECT_CREATED, notification)
コード例 #12
0
 def __create_s3_source_bucket(
         self, bucket_name: str,
         s3_trigger_lambda_function: aws_lambda.Function) -> aws_s3.Bucket:
     s3_bucket = aws_s3.Bucket(self,
                               'S3SourceBucket',
                               bucket_name=bucket_name,
                               block_public_access=BlockPublicAccess(
                                   block_public_acls=True,
                                   block_public_policy=True,
                                   ignore_public_acls=True,
                                   restrict_public_buckets=True))
     s3_bucket.add_event_notification(
         EventType.OBJECT_CREATED,
         aws_s3_notifications.LambdaDestination(s3_trigger_lambda_function),
         (NotificationKeyFilter(prefix='artifacts/', suffix='.zip')))
     return s3_bucket
コード例 #13
0
    def __init__(
        self,
        scope: core.Construct,
        construct_id: str,
        elastic_domain: aes.Domain,
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)
        indexing_lambda = _lambda.Function(
            self,
            "IndexingHandler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset(LAMBDA_PATH),
            handler="lambda_function.lambda_handler",
            environment={
                "EMBEDDER_IP": config.get_embedder_ip(),
                "ES_URL": elastic_domain.domain_endpoint,
                "ES_USER": config.get_es_credentials()[0],
                "ES_PASSWORD": config.get_es_credentials()[1],
                "INDEX_NAME": config.get_es_index(),
            },
        )
        notification = s3n.LambdaDestination(indexing_lambda)

        block_public_access = s3.BlockPublicAccess(
            block_public_acls=True,
            block_public_policy=True,
            ignore_public_acls=True,
            restrict_public_buckets=True)
        bucket = s3.Bucket(self,
                           "DocsDestination",
                           block_public_access=block_public_access,
                           removal_policy=core.RemovalPolicy.DESTROY)
        bucket.grant_read(indexing_lambda)
        bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            notification,
            s3.NotificationKeyFilter(prefix="wikipages/"),
        )

        core.Tags.of(indexing_lambda).add("system-id", config.get_system_id())
        core.Tags.of(bucket).add("system-id", config.get_system_id())

        core.CfnOutput(self, "S3BucketName", value=bucket.bucket_name)
        core.CfnOutput(self,
                       "IndexingLambdaName",
                       value=indexing_lambda.function_name)
コード例 #14
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Primary S3 Bucket
        main_bucket = aws_s3.Bucket(self, "multipagepdfa2i", removal_policy=core.RemovalPolicy.DESTROY)
        
        # Create sqs queue
        page_sqs = aws_sqs.Queue(
            self, "multipagepdfa2i_page_sqs",
            queue_name = "multipagepdfa2i_page_sqs",
            visibility_timeout=core.Duration.minutes(3)
        )
        
        # Create all of the Lambda Functions
        lambda_functions = self.create_lambda_functions(page_sqs)

        # Create notification that triggers kick off lambda on pdf being uploaded to kickoff
        kickoff_notification = aws_s3_notifications.LambdaDestination(lambda_functions["kickoff"])
        
        lambda_functions["analyzepdf"].add_event_source(aws_lambda_event_sources.SqsEventSource(page_sqs, batch_size=3))

        main_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,  
            kickoff_notification,
            aws_s3.NotificationKeyFilter(prefix="uploads/", suffix="pdf")
        )

        self.configure_dynamo_table("multia2ipdf_callback", "jobid", "callback_token")
        self.configure_dynamo_table("multipagepdfa2i_upload_ids", "id", "key")       

        self.create_state_machine(lambda_functions, page_sqs)

        human_complete_target = aws_events_targets.LambdaFunction(lambda_functions["humancomplete"])

        human_review_event_pattern = aws_events.EventPattern(
            source=["aws.sagemaker"],
            detail_type=["SageMaker A2I HumanLoop Status Change"]
        )

        aws_events.Rule(self, 
            "multipadepdfa2i_HumanReviewComplete", 
            event_pattern=human_review_event_pattern,
            targets=[human_complete_target]
        )

        
コード例 #15
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create lambda function
        function = _lambda.Function(self,
                                    "lambda_function",
                                    runtime=_lambda.Runtime.PYTHON_3_7,
                                    handler="lambda-handler.main",
                                    code=_lambda.Code.from_asset("./lambda"))
        # create s3 bucket
        s3 = _s3.Bucket(self, "s3bucket")

        # create s3 notification for lambda function
        notification = aws_s3_notifications.LambdaDestination(function)

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3.add_event_notification(_s3.EventType.OBJECT_CREATED, notification)
コード例 #16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        function = _lambda.Function(self,
                                    "lambda_function",
                                    runtime=_lambda.Runtime.PYTHON_3_7,
                                    handler="lambda-handler.main",
                                    code=_lambda.Code.asset("./lambda"))

        s3_bucket = s3.Bucket(self, "s3Bucket")
        notification = aws_s3_notifications.LambdaDestination(function)

        s3_bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                         notification)

        queue = sqs.Queue(self, "test_queue")

        queue.grant_send_messages(function)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # create lambda function
        function = _lambda.Function(self,
                                    "lambda_function",
                                    runtime=_lambda.Runtime.PYTHON_3_7,
                                    handler="lambda-handler.main",
                                    code=_lambda.InlineCode('boom'))

        # create s3 bucket
        s3 = _s3.Bucket(self, "s3bucket")

        # create s3 notification for lambda function
        notification = aws_s3_notifications.LambdaDestination(function)

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3.add_event_notification(_s3.EventType.OBJECT_CREATED, notification)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Define aws lambda function
        my_lambda = _lambda.Function(self,
                                     "HelloHandler",
                                     runtime=_lambda.Runtime.PYTHON_3_7,
                                     handler="hello.handler",
                                     code=_lambda.Code.asset('lambda'))

        # Define S3 bucket
        my_bucket = s3.Bucket(self, "ssl-s3-lambda-event-raw")

        #Create the s3 notification objects which points to Lambda
        notification = aws_s3_notifications.LambdaDestination(my_lambda)

        #Add Filters if required
        filter1 = s3.NotificationKeyFilter(prefix="home/")
        #Add event trigger from s3 to lambda
        my_bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                         notification, filter1)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create the S3 bucket
        my_bucket = s3.Bucket(self,"my_s3_bucket_raw")

        #create dynamo db table
        self._table = ddb.Table(
            self, 'Employee',
            partition_key={'name': 'Emp_Id', 'type': ddb.AttributeType.NUMBER}
        )
        
        #Create lambda function
        my_lambda = _lambda.Function(
                            self,
                            id="EventFunction",
                            runtime=_lambda.Runtime.PYTHON_3_7,
                            code=_lambda.Code.asset("lambda"),
                            handler="event.handler" ,
                            environment={
                                "Table_Name":self._table.table_name
                            }                           
        )

        #Add Filters
        filter1=s3.NotificationKeyFilter(prefix="home/",suffix=".json")
        
        #Create Notification
        s3_lambda_notification = notification.LambdaDestination(my_lambda)

        #link s3 and lambda
        my_bucket.add_event_notification(s3.EventType.OBJECT_CREATED,s3_lambda_notification,filter1)
        
        #grant lambda read/write access to lambda function
        self._table.grant_read_write_data(my_lambda)
        _lambda.IFunction.grant_invoke(self,my_lambda)

        #Grant s3 read access to lambda function
        my_bucket.grant_read(my_lambda)
        
コード例 #20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # space for feeder Lambda function
        feeder = aws_lambda.Function(self,
                                                    id='_feeder',
                                                    code=aws_lambda.Code.asset('./code'),
                                                    handler='feeder.handler',
                                                    runtime=aws_lambda.Runtime.PYTHON_3_7,
                                                    description='Feeder function for the Witness project')

        # space for saver Lambda function
        saver = aws_lambda.Function(self,
                                                    id='_saver',
                                                    code=aws_lambda.Code.asset('./code'),
                                                    handler='saver.handler',
                                                    runtime=aws_lambda.Runtime.PYTHON_3_7,
                                                    description='Saver function for the Witness project')
        # space for feeder lambda trigger
        archive.add_event_notification(aws_s3.EventType.OBJECT_CREATED_PUT, s3n.LambdaDestination(feeder))


        # space for stepfunction
        feederTask = aws_stepfunctions.Task(        self,
                                                    id='_feederTask',
                                                    task=aws_tasks.InvokeFunction(feeder))

        saverTask = aws_stepfunctions.Task(         self,
                                                    id='_saverTask',
                                                    task=aws_tasks.InvokeFunction(saver))                                            

        definition = feederTask.next(saverTask)

        orchestrator = aws_stepfunctions.StateMachine(self,
                                                    id='_orchestrator',
                                                    state_machine_name='witness_orchestrator',
                                                    definition=definition)
コード例 #21
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        power_transformers = aws_dynamodb.Table(
            self,
            "PowerTransformers",
            table_name="PowerTransformers",
            partition_key=aws_dynamodb.Attribute(
                name="name", type=aws_dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        function = _lambda.Function(
            self,
            "power_transformers_data_enrichment",
            function_name="power_transformers_data_enrichment",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.handler",
            code=_lambda.Code.asset("./lambda/data-enrichment"))

        function.add_environment('TABLE_NAME', power_transformers.table_name)
        function.add_to_role_policy(
            iam.PolicyStatement(actions=['dynamodb:GetItem'],
                                resources=[f"{power_transformers.table_arn}"],
                                effect=iam.Effect.ALLOW))

        function.add_permission(
            principal=iam.ServicePrincipal('iotanalytics.amazonaws.com'),
            action='lambda:InvokeFunction',
            id='pt-iot-analytics')

        bucket = s3.Bucket(
            self,
            'PowerTransformersTelemetryBucket',
            bucket_name=f"{props['projectName'].lower()}-{core.Aws.ACCOUNT_ID}",
            removal_policy=core.RemovalPolicy.DESTROY)

        output_bucket = s3.Bucket(
            self,
            'PowerTransformersProcessedDataBucket',
            bucket_name=
            f"{props['projectName'].lower()}-output-{core.Aws.ACCOUNT_ID}",
            removal_policy=core.RemovalPolicy.DESTROY)

        # Apply least privilege
        s3_role = iam.Role(
            self,
            "IotAnalyticsS3Role",
            assumed_by=iam.ServicePrincipal("iotanalytics.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonS3FullAccess')
            ])

        # s3_role.add_to_policy(iam.PolicyStatement(actions=["s3:PutObject", "s3:DeleteObject", "s3:GetBucketLocation"],
        #                       resources=[f"{bucket.bucket_arn}", f"{bucket.bucket_arn}/*"], effect=iam.Effect.ALLOW))

        # Apply least privilege
        s3_output_role = iam.Role(
            self,
            "IotAnalyticsS3OutputRole",
            assumed_by=iam.ServicePrincipal("iotanalytics.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonS3FullAccess')
            ],
        )

        # s3_output_role.add_to_policy(iam.PolicyStatement(actions=["s3:PutObject", "s3:DeleteObject", "s3:GetBucketLocation"],
        #                       resources=[f"{output_bucket.bucket_arn}", f"{output_bucket.bucket_arn}/*"], effect=iam.Effect.ALLOW))

        project_name = props['projectName'].lower().replace('-', '_')

        channel_name = f"{project_name}_channel"
        datastore_name = f"{project_name}_datastore"

        channel_s3 = CHANNEL.CustomerManagedS3Property(
            bucket=bucket.bucket_name,
            key_prefix='raw/',
            role_arn=s3_role.role_arn)
        channel_storage = CHANNEL.ChannelStorageProperty(
            customer_managed_s3=channel_s3)

        CHANNEL(self,
                'iot_channel',
                channel_name=channel_name,
                channel_storage=channel_storage)

        datastore_s3 = DATASTORE.CustomerManagedS3Property(
            bucket=bucket.bucket_name,
            key_prefix='processed/',
            role_arn=s3_role.role_arn)

        datastore_storage = DATASTORE.DatastoreStorageProperty(
            customer_managed_s3=datastore_s3)

        datastore = DATASTORE(self,
                              'iot_datastore',
                              datastore_name=datastore_name,
                              datastore_storage=datastore_storage)

        channel_activity = PIPELINE.ChannelProperty(name='ChannelActivity',
                                                    channel_name=channel_name,
                                                    next='LambdaActivity')
        lambda_activity = PIPELINE.LambdaProperty(
            name='LambdaActivity',
            lambda_name='power_transformers_data_enrichment',
            next='DatastoreActivity',
            batch_size=10)
        datastore_activity = PIPELINE.DatastoreProperty(
            name='DatastoreActivity', datastore_name=datastore_name)

        pipeline_activities = PIPELINE.ActivityProperty(
            channel=channel_activity,
            lambda_=lambda_activity,
            datastore=datastore_activity)

        pipeline = PIPELINE(self,
                            'iot_pipeline',
                            pipeline_name=f"{project_name}_pipeline",
                            pipeline_activities=[pipeline_activities])

        pipeline.add_depends_on(datastore)

        query_action = DATASET.QueryActionProperty(
            sql_query=f"SELECT * FROM {datastore_name}")
        action = DATASET.ActionProperty(query_action=query_action,
                                        action_name='sqlAction')
        schedule_expression = DATASET.ScheduleProperty(
            schedule_expression='cron(1/5 * * * ? *)')
        trigger_schedule = DATASET.TriggerProperty(
            schedule=schedule_expression)

        dataset_s3_destination = DATASET.S3DestinationConfigurationProperty(
            bucket=output_bucket.bucket_name,
            key=
            'dataset/Version/!{iotanalytics:scheduleTime}_!{iotanalytics:versionId}.csv',
            role_arn=s3_output_role.role_arn)

        dataset_destination = DATASET.DatasetContentDeliveryRuleDestinationProperty(
            s3_destination_configuration=dataset_s3_destination)

        content_delivery_rules = DATASET.DatasetContentDeliveryRuleProperty(
            destination=dataset_destination)

        dataset = DATASET(self,
                          'iot_dataset',
                          dataset_name=f"{project_name}_dataset",
                          actions=[action],
                          triggers=[trigger_schedule],
                          content_delivery_rules=[content_delivery_rules])

        dataset.add_depends_on(datastore)

        user_pool = aws_cognito.UserPool(
            self,
            'kibanaUserPool',
            self_sign_up_enabled=False,
            sign_in_aliases=aws_cognito.SignInAliases(username=True,
                                                      email=True))

        aws_cognito.CfnUserPoolDomain(
            self,
            'userPoolDomain',
            user_pool_id=user_pool.user_pool_id,
            domain=
            f"{props['projectName'].lower()}-{''.join(random.choices(string.ascii_lowercase + string.digits, k=6))}"
        )

        user_pool_client = aws_cognito.UserPoolClient(self,
                                                      'kibanaClientId',
                                                      user_pool=user_pool,
                                                      generate_secret=True)

        identity_provider = aws_cognito.CfnIdentityPool.CognitoIdentityProviderProperty(
            client_id=user_pool_client.user_pool_client_id,
            provider_name=user_pool.user_pool_provider_name)

        identity_pool = aws_cognito.CfnIdentityPool(
            self,
            'identityPool',
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[identity_provider])

        # Apply least privilege
        cognito_authenticated_role = iam.Role(
            self,
            "CognitoAuthRole",
            assumed_by=iam.FederatedPrincipal(
                "cognito-identity.amazonaws.com",
                assume_role_action='sts:AssumeRoleWithWebIdentity',
                conditions={
                    'StringEquals': {
                        'cognito-identity.amazonaws.com:aud': identity_pool.ref
                    },
                    'ForAnyValue:StringLike': {
                        'cognito-identity.amazonaws.com:amr': 'authenticated'
                    }
                }),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonESFullAccess')
            ])

        aws_cognito.CfnIdentityPoolRoleAttachment(
            self,
            'identityPoolRoleAttachment',
            identity_pool_id=identity_pool.ref,
            roles={'authenticated': cognito_authenticated_role.role_arn})

        cognito_options = DOMAIN.CognitoOptionsProperty(
            enabled=True,
            user_pool_id=user_pool.user_pool_id,
            identity_pool_id=identity_pool.ref,
            role_arn=
            f"arn:aws:iam::{core.Aws.ACCOUNT_ID}:role/service-role/CognitoAccessForAmazonES"
        )

        ebs_options = DOMAIN.EBSOptionsProperty(ebs_enabled=True,
                                                volume_size=10,
                                                volume_type='gp2')
        elasticsearch_cluster_config = DOMAIN.ElasticsearchClusterConfigProperty(
            instance_count=1, instance_type='r5.large.elasticsearch')
        encryption_at_rest_options = DOMAIN.EncryptionAtRestOptionsProperty(
            enabled=True)
        node_to_node_encryption_options = DOMAIN.NodeToNodeEncryptionOptionsProperty(
            enabled=True)
        snapshot_options = DOMAIN.SnapshotOptionsProperty(
            automated_snapshot_start_hour=0)

        es_domain_arn = f"arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:domain/{props['projectName'].lower()}/*"

        es_policy_statement = iam.PolicyStatement(actions=['es:*'],
                                                  resources=[es_domain_arn])

        es_policy_statement.add_arn_principal(
            cognito_authenticated_role.role_arn)

        policy_document = iam.PolicyDocument()

        policy_document.add_statements(es_policy_statement)

        domain = DOMAIN(
            self,
            'elasticsearch',
            domain_name=f"{props['projectName'].lower()}",
            cognito_options=cognito_options,
            ebs_options=ebs_options,
            elasticsearch_cluster_config=elasticsearch_cluster_config,
            encryption_at_rest_options=encryption_at_rest_options,
            node_to_node_encryption_options=node_to_node_encryption_options,
            snapshot_options=snapshot_options,
            elasticsearch_version='6.8',
            access_policies=policy_document)

        function = _lambda.Function(
            self,
            "load_data_from_s3_to_es",
            function_name="load_data_from_s3_to_es",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.handler",
            code=_lambda.Code.asset("./lambda/load-data-from-s3-to-es.zip"))

        function.add_environment('ES_HOST', domain.attr_domain_endpoint)
        function.add_environment('ES_REGION', f"{core.Aws.REGION}")

        function.add_to_role_policy(
            iam.PolicyStatement(actions=['es:ESHttpPost'],
                                resources=[es_domain_arn],
                                effect=iam.Effect.ALLOW))
        function.add_to_role_policy(
            iam.PolicyStatement(actions=['s3:GetObject'],
                                resources=[f"{output_bucket.bucket_arn}/*"],
                                effect=iam.Effect.ALLOW))

        notification = aws_s3_notifications.LambdaDestination(function)

        output_bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                             notification)

        load_ddb_custom_resource = LoadDDBDataCustomResource(
            self,
            "LoadDDBData",
            table_name=power_transformers.table_name,
            table_arn=power_transformers.table_arn)

        load_ddb_custom_resource.node.add_dependency(power_transformers)

        load_es_index_custom_resource = LoadESIndexCustomResource(
            self,
            "LoadESIndex",
            es_host=domain.attr_domain_endpoint,
            es_region=f"{core.Aws.REGION}",
            es_domain_arn=es_domain_arn)

        load_es_index_custom_resource.node.add_dependency(domain)

        load_kibana_dashboards_custom_resource = LoadKibanaDashboardsCustomResource(
            self,
            "LoadKibanaDashboards",
            es_host=domain.attr_domain_endpoint,
            es_region=f"{core.Aws.REGION}",
            es_domain_arn=es_domain_arn)

        load_kibana_dashboards_custom_resource.node.add_dependency(
            load_es_index_custom_resource)
コード例 #22
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # region IAM
        sa_role = aws_iam.Role(
            self,
            "Role",
            role_name="SaRole",
            assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"))

        sa_role.add_to_policy(
            aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW,
                                    resources=["*"],
                                    actions=[
                                        "cloudwatch:*", "s3:*", "logs:*",
                                        "dynamodb:*", "iam:*"
                                    ]))
        # endregion IAM

        # region S3
        mask_images_bucket = aws_s3.Bucket(self, 'MaskImagesBucket')
        # endregion S3

        # region DB
        masks_db = aws_dynamodb.Table(
            self,
            'MasksTable',
            table_name='Masks',
            partition_key=aws_dynamodb.Attribute(
                name='id', type=aws_dynamodb.AttributeType.STRING),
            sort_key=aws_dynamodb.Attribute(
                name='mask_name', type=aws_dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)
        # endregion DB

        # region Lambda
        fetch_lambda = _lambda.Function(
            self,
            'DynamoFetch',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset('lambda'),
            handler='dynamo_fetch.handler',
            environment=dict(DYNAMO_TABLE_NAME=masks_db.table_name),
            role=sa_role)

        insert_lambda = _lambda.Function(
            self,
            'DynamoInsert',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset('lambda'),
            handler='dynamo_insert.handler',
            environment=dict(DYNAMO_TABLE_NAME=masks_db.table_name),
            role=sa_role)
        # endregion

        # region API
        base_api = aws_apigateway.LambdaRestApi(
            self,
            'SaApi',
            rest_api_name='SaApi',
            handler=fetch_lambda,
            default_cors_preflight_options=aws_apigateway.CorsOptions(
                allow_origins=aws_apigateway.Cors.ALL_ORIGINS))

        # endregion API

        # region Frontend
        frontend_bucket = aws_s3.Bucket(self,
                                        "CreateReactAppBucket",
                                        website_index_document="index.html")

        frontend_src = aws_s3_deployment.BucketDeployment(
            self,
            "DeployCRA",
            sources=[
                aws_s3_deployment.Source.asset("../frontend/sa-app/build")
            ],
            destination_bucket=frontend_bucket)

        oia = aws_cloudfront.OriginAccessIdentity(self, 'OIA')

        frontend_bucket.grant_read(oia)

        cloudFront = aws_cloudfront.CloudFrontWebDistribution(
            self,
            "CDKCRAStaticDistribution",
            origin_configs=[
                aws_cloudfront.SourceConfiguration(
                    s3_origin_source=aws_cloudfront.S3OriginConfig(
                        s3_bucket_source=frontend_bucket,
                        origin_access_identity=oia),
                    behaviors=[
                        aws_cloudfront.Behavior(
                            is_default_behavior=True,
                            default_ttl=core.Duration.seconds(0),
                            max_ttl=core.Duration.seconds(0),
                            min_ttl=core.Duration.seconds(0))
                    ])
            ])
        # endregion

        # region S3 triggers
        new_mask_image_notification = aws_s3_notifications.LambdaDestination(
            insert_lambda)
        mask_images_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, new_mask_image_notification)
コード例 #23
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        
        s3 = _s3.Bucket(self, "testbuckforall")
        print(s3)
        
        lambda_dir_path = os.path.join(os.getcwd(), "ir_cdk_stacks", "IN-API-01")
        function = _lambda.Function(self, "lambda_function",
                                    runtime=_lambda.Runtime.PYTHON_2_7, memory_size = 512, #timeout=core.Duration(120),
                                    handler="parser.lambda_handler",
                                    code=_lambda.Code.asset(lambda_dir_path))
                                    
        function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["s3:*"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                )
            )
        function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["waf-regional:UpdateIPSet","waf-regional:GetIPSet","waf-regional:GetChangeToken"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                )
            )
        function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["logs:*"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                )
            )
        function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["cloudformation:DescribeStacks"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                )
            )
        function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["SNS:Publish"],
                    effect=iam.Effect.ALLOW,
                    resources=["arn:aws:sns:us-east-1:544820149332:IN-API-01-IPBlocked"],
                )
            )
        function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["cloudwatch:PutMetricData"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                )
            )
        function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["lambda:*"],
                    effect=iam.Effect.ALLOW,
                    resources=["*"],
                )
            )

        # create s3 notification for lambda function
        notification = aws_s3_notifications.LambdaDestination(function)

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3.add_event_notification(_s3.EventType.OBJECT_CREATED, notification)
コード例 #24
0
ファイル: cdk.py プロジェクト: zanhsieh/aws-cdk-examples
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open("stack/config.yml", 'r') as stream:
            configs = yaml.safe_load(stream)

        ### S3 core
        images_S3_bucket = _s3.Bucket(self, "ICS_IMAGES")

        images_S3_bucket.add_cors_rule(
            allowed_methods=[_s3.HttpMethods.POST],
            allowed_origins=["*"] # add API gateway web resource URL
        )

        ### SQS core
        image_deadletter_queue = _sqs.Queue(self, "ICS_IMAGES_DEADLETTER_QUEUE")
        image_queue = _sqs.Queue(self, "ICS_IMAGES_QUEUE",
            dead_letter_queue={
                "max_receive_count": configs["DeadLetterQueue"]["MaxReceiveCount"],
                "queue": image_deadletter_queue
            })

        ### api gateway core
        api_gateway = RestApi(self, 'ICS_API_GATEWAY', rest_api_name='ImageContentSearchApiGateway')
        api_gateway_resource = api_gateway.root.add_resource(configs["ProjectName"])
        api_gateway_landing_page_resource = api_gateway_resource.add_resource('web')
        api_gateway_get_signedurl_resource = api_gateway_resource.add_resource('signedUrl')
        api_gateway_image_search_resource = api_gateway_resource.add_resource('search')

        ### landing page function
        get_landing_page_function = Function(self, "ICS_GET_LANDING_PAGE",
            function_name="ICS_GET_LANDING_PAGE",
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/landingPage"))

        get_landing_page_integration = LambdaIntegration(
            get_landing_page_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_landing_page_resource.add_method('GET', get_landing_page_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        ### cognito
        required_attribute = _cognito.StandardAttribute(required=True)

        users_pool = _cognito.UserPool(self, "ICS_USERS_POOL",
            auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up
            standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up
            self_sign_up_enabled=configs["Cognito"]["SelfSignUp"])

        user_pool_app_client = _cognito.CfnUserPoolClient(self, "ICS_USERS_POOL_APP_CLIENT", 
            supported_identity_providers=["COGNITO"],
            allowed_o_auth_flows=["implicit"],
            allowed_o_auth_scopes=configs["Cognito"]["AllowedOAuthScopes"],
            user_pool_id=users_pool.user_pool_id,
            callback_ur_ls=[api_gateway_landing_page_resource.url],
            allowed_o_auth_flows_user_pool_client=True,
            explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"])

        user_pool_domain = _cognito.UserPoolDomain(self, "ICS_USERS_POOL_DOMAIN", 
            user_pool=users_pool, 
            cognito_domain=_cognito.CognitoDomainOptions(domain_prefix=configs["Cognito"]["DomainPrefix"]))

        ### get signed URL function
        get_signedurl_function = Function(self, "ICS_GET_SIGNED_URL",
            function_name="ICS_GET_SIGNED_URL",
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_SIGNEDURL_EXPIRY_SECONDS": configs["Functions"]["DefaultSignedUrlExpirySeconds"]
            },
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/getSignedUrl"))

        get_signedurl_integration = LambdaIntegration(
            get_signedurl_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_get_signedurl_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            rest_api_id=api_gateway_get_signedurl_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            type="COGNITO_USER_POOLS",
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_get_signedurl_resource.add_method('GET', get_signedurl_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_get_signedurl_authorizer.ref)

        images_S3_bucket.grant_put(get_signedurl_function, objects_key_pattern="new/*")

        ### image massage function
        image_massage_function = Function(self, "ICS_IMAGE_MASSAGE",
            function_name="ICS_IMAGE_MASSAGE",
            timeout=core.Duration.seconds(6),
            runtime=Runtime.PYTHON_3_7,
            environment={"ICS_IMAGE_MASSAGE": image_queue.queue_name},
            handler="main.handler",
            code=Code.asset("./src/imageMassage"))

        images_S3_bucket.grant_write(image_massage_function, "processed/*")
        images_S3_bucket.grant_delete(image_massage_function, "new/*")
        images_S3_bucket.grant_read(image_massage_function, "new/*")
        
        new_image_added_notification = _s3notification.LambdaDestination(image_massage_function)

        images_S3_bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, 
            new_image_added_notification, 
            _s3.NotificationKeyFilter(prefix="new/")
            )

        image_queue.grant_send_messages(image_massage_function)

        ### image analyzer function
        image_analyzer_function = Function(self, "ICS_IMAGE_ANALYSIS",
            function_name="ICS_IMAGE_ANALYSIS",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(10),
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "REGION": core.Aws.REGION,
                },
            handler="main.handler",
            code=Code.asset("./src/imageAnalysis")) 

        image_analyzer_function.add_event_source(_lambda_event_source.SqsEventSource(queue=image_queue, batch_size=10))
        image_queue.grant_consume_messages(image_massage_function)

        lambda_rekognition_access = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"],
            resources=["*"]                    
        )

        image_analyzer_function.add_to_role_policy(lambda_rekognition_access)
        images_S3_bucket.grant_read(image_analyzer_function, "processed/*")

        ### API gateway finalizing
        self.add_cors_options(api_gateway_get_signedurl_resource)
        self.add_cors_options(api_gateway_landing_page_resource)
        self.add_cors_options(api_gateway_image_search_resource)

        ### database 
        database_secret = _secrets_manager.Secret(self, "ICS_DATABASE_SECRET",
            secret_name="rds-db-credentials/image-content-search-rds-secret",
            generate_secret_string=_secrets_manager.SecretStringGenerator(
                generate_string_key='password',
                secret_string_template='{"username": "******"}',
                exclude_punctuation=True,
                exclude_characters='/@\" \\\'',
                require_each_included_type=True
            )
        )

        database = _rds.CfnDBCluster(self, "ICS_DATABASE",
            engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
            engine_mode="serverless",
            database_name=configs["Database"]["Name"],
            enable_http_endpoint=True,
            deletion_protection=configs["Database"]["DeletionProtection"],
            master_username=database_secret.secret_value_from_json("username").to_string(),
            master_user_password=database_secret.secret_value_from_json("password").to_string(),
            scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty(
                auto_pause=configs["Database"]["Scaling"]["AutoPause"],
                min_capacity=configs["Database"]["Scaling"]["Min"],
                max_capacity=configs["Database"]["Scaling"]["Max"],
                seconds_until_auto_pause=configs["Database"]["Scaling"]["SecondsToAutoPause"]
            ),
        )

        database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref)
   
        secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"ICS_DATABASE_SECRET_TARGET",
            target_type="AWS::RDS::DBCluster",
            target_id=database.ref,
            secret_id=database_secret.secret_arn
        )

        secret_target.node.add_dependency(database)

        ### database function
        image_data_function_role = _iam.Role(self, "ICS_IMAGE_DATA_FUNCTION_ROLE",
            role_name="ICS_IMAGE_DATA_FUNCTION_ROLE",
            assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess")
            ]
        )
        
        image_data_function = Function(self, "ICS_IMAGE_DATA",
            function_name="ICS_IMAGE_DATA",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(5),
            role=image_data_function_role,
            environment={
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "CLUSTER_ARN": database_cluster_arn,
                "CREDENTIALS_ARN": database_secret.secret_arn,
                "DB_NAME": database.database_name,
                "REGION": core.Aws.REGION
                },
            handler="main.handler",
            code=Code.asset("./src/imageData")
        ) 

        image_search_integration = LambdaIntegration(
            image_data_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_image_search_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            rest_api_id=api_gateway_image_search_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            type="COGNITO_USER_POOLS", 
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_image_search_resource.add_method('POST', image_search_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_image_search_authorizer.ref)


        lambda_access_search = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["translate:TranslateText"],
            resources=["*"]            
        ) 

        image_data_function.add_to_role_policy(lambda_access_search)

        ### custom resource
        lambda_provider = Provider(self, 'ICS_IMAGE_DATA_PROVIDER', 
            on_event_handler=image_data_function
        )

        core.CustomResource(self, 'ICS_IMAGE_DATA_RESOURCE', 
            service_token=lambda_provider.service_token,
            pascal_case_properties=False,
            resource_type="Custom::SchemaCreation",
            properties={
                "source": "Cloudformation"
            }
        )

        ### event bridge
        event_bus = _events.EventBus(self, "ICS_IMAGE_CONTENT_BUS")

        event_rule = _events.Rule(self, "ICS_IMAGE_CONTENT_RULE",
            rule_name="ICS_IMAGE_CONTENT_RULE",
            description="The event from image analyzer to store the data",
            event_bus=event_bus,
            event_pattern=_events.EventPattern(resources=[image_analyzer_function.function_arn]),
        )

        event_rule.add_target(_event_targets.LambdaFunction(image_data_function))

        event_bus.grant_put_events(image_analyzer_function)
        image_analyzer_function.add_environment("EVENT_BUS", event_bus.event_bus_name)

        ### outputs
        core.CfnOutput(self, 'CognitoHostedUILogin',
            value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(user_pool_domain.domain_name, core.Aws.REGION, user_pool_app_client.ref, '+'.join(user_pool_app_client.allowed_o_auth_scopes), api_gateway_landing_page_resource.url),
            description='The Cognito Hosted UI Login Page'
        )
コード例 #25
0
ファイル: cdk_stack.py プロジェクト: drumadrian/s3workflow
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        parse_image_list_file = aws_lambda.Function(
            self,
            'parse_image_list_file',
            handler='parse_image_list_file.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('parse_image_list_file'),
            memory_size=10240,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        list_objects = aws_lambda.Function(
            self,
            'list_objects',
            handler='list_objects.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('list_objects'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        get_size_and_store = aws_lambda.Function(
            self,
            'get_size_and_store',
            handler='get_size_and_store.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('get_size_and_store'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        ###########################################################################
        # AMAZON S3 BUCKETS
        ###########################################################################
        images_bucket = aws_s3.Bucket(self, "images_bucket")

        ###########################################################################
        # LAMBDA SUPPLEMENTAL POLICIES
        ###########################################################################
        lambda_supplemental_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:Get*", "s3:Head*", "s3:List*", "sqs:*", "es:*"],
            resources=["*"])

        parse_image_list_file.add_to_role_policy(
            lambda_supplemental_policy_statement)
        list_objects.add_to_role_policy(lambda_supplemental_policy_statement)
        get_size_and_store.add_to_role_policy(
            lambda_supplemental_policy_statement)

        ###########################################################################
        # AWS SNS TOPICS
        ###########################################################################
        # notification_topic = aws_sns.Topic(self, "notification_topic")

        ###########################################################################
        # ADD AMAZON S3 BUCKET NOTIFICATIONS
        ###########################################################################
        images_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,
            aws_s3_notifications.LambdaDestination(parse_image_list_file))

        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        comprehend_queue_iqueue = aws_sqs.Queue(self,
                                                "comprehend_queue_iqueue")
        comprehend_queue_iqueue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=comprehend_queue_iqueue)
        comprehend_queue = aws_sqs.Queue(
            self,
            "comprehend_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=comprehend_queue_iqueue_dlq)

        rekognition_queue_iqueue = aws_sqs.Queue(self,
                                                 "rekognition_queue_iqueue")
        rekognition_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=rekognition_queue_iqueue)
        rekognition_queue = aws_sqs.Queue(
            self,
            "rekognition_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=rekognition_queue_dlq)

        object_queue_iqueue = aws_sqs.Queue(self, "object_queue_iqueue")
        object_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10,
                                                   queue=object_queue_iqueue)
        object_queue = aws_sqs.Queue(
            self,
            "object_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=object_queue_dlq)

        ###########################################################################
        # AWS LAMBDA SQS EVENT SOURCE
        ###########################################################################
        get_size_and_store.add_event_source(
            SqsEventSource(object_queue, batch_size=10))

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN
        ###########################################################################
        s3workflow_domain = aws_elasticsearch.Domain(
            self,
            "s3workflow_domain",
            version=aws_elasticsearch.ElasticsearchVersion.V7_1,
            capacity={
                "master_nodes": 3,
                "data_nodes": 4
            },
            ebs={"volume_size": 100},
            zone_awareness={"availability_zone_count": 2},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            })

        ###########################################################################
        # AMAZON COGNITO USER POOL
        ###########################################################################
        s3workflow_pool = aws_cognito.UserPool(
            self,
            "s3workflow-pool",
            account_recovery=None,
            auto_verify=None,
            custom_attributes=None,
            email_settings=None,
            enable_sms_role=None,
            lambda_triggers=None,
            mfa=None,
            mfa_second_factor=None,
            password_policy=None,
            self_sign_up_enabled=None,
            sign_in_aliases=aws_cognito.SignInAliases(email=True,
                                                      phone=None,
                                                      preferred_username=None,
                                                      username=True),
            sign_in_case_sensitive=None,
            sms_role=None,
            sms_role_external_id=None,
            standard_attributes=None,
            user_invitation=None,
            user_pool_name=None,
            user_verification=None)

        ###########################################################################
        # AMAZON VPC
        ###########################################################################
        vpc = aws_ec2.Vpc(self, "s3workflowVPC",
                          max_azs=3)  # default is all AZs in region

        ###########################################################################
        # AMAZON ECS CLUSTER
        ###########################################################################
        cluster = aws_ecs.Cluster(self, "s3", vpc=vpc)

        ###########################################################################
        # AMAZON ECS Repositories
        ###########################################################################
        rekognition_repository = aws_ecr.Repository(
            self,
            "rekognition_repository",
            image_scan_on_push=True,
            removal_policy=core.RemovalPolicy("DESTROY"))
        comprehend_repository = aws_ecr.Repository(
            self,
            "comprehend_repository",
            image_scan_on_push=True,
            removal_policy=core.RemovalPolicy("DESTROY"))

        ###########################################################################
        # AMAZON ECS Roles and Policies
        ###########################################################################
        task_execution_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*", "ecr:*"
            ],
            resources=["*"])
        task_execution_policy_document = aws_iam.PolicyDocument()
        task_execution_policy_document.add_statements(
            task_execution_policy_statement)
        task_execution_policy = aws_iam.Policy(
            self,
            "task_execution_policy",
            document=task_execution_policy_document)
        task_execution_role = aws_iam.Role(
            self,
            "task_execution_role",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        task_execution_role.attach_inline_policy(task_execution_policy)

        task_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "logs:*", "xray:*", "sqs:*", "s3:*", "rekognition:*",
                "comprehend:*", "es:*"
            ],
            resources=["*"])
        task_policy_document = aws_iam.PolicyDocument()
        task_policy_document.add_statements(task_policy_statement)
        task_policy = aws_iam.Policy(self,
                                     "task_policy",
                                     document=task_policy_document)
        task_role = aws_iam.Role(
            self,
            "task_role",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        task_role.attach_inline_policy(task_policy)

        ###########################################################################
        # AMAZON ECS Task definitions
        ###########################################################################
        rekognition_task_definition = aws_ecs.TaskDefinition(
            self,
            "rekognition_task_definition",
            compatibility=aws_ecs.Compatibility("FARGATE"),
            cpu="1024",
            # ipc_mode=None,
            memory_mib="2048",
            network_mode=aws_ecs.NetworkMode("AWS_VPC"),
            # pid_mode=None,                                      #Not supported in Fargate and Windows containers
            # placement_constraints=None,
            execution_role=task_execution_role,
            # family=None,
            # proxy_configuration=None,
            task_role=task_role
            # volumes=None
        )

        comprehend_task_definition = aws_ecs.TaskDefinition(
            self,
            "comprehend_task_definition",
            compatibility=aws_ecs.Compatibility("FARGATE"),
            cpu="1024",
            # ipc_mode=None,
            memory_mib="2048",
            network_mode=aws_ecs.NetworkMode("AWS_VPC"),
            # pid_mode=None,                                      #Not supported in Fargate and Windows containers
            # placement_constraints=None,
            execution_role=task_execution_role,
            # family=None,
            # proxy_configuration=None,
            task_role=task_role
            # volumes=None
        )

        ###########################################################################
        # AMAZON ECS Images
        ###########################################################################
        rekognition_ecr_image = aws_ecs.EcrImage(
            repository=rekognition_repository, tag="latest")
        comprehend_ecr_image = aws_ecs.EcrImage(
            repository=comprehend_repository, tag="latest")

        ###########################################################################
        # ENVIRONMENT VARIABLES
        ###########################################################################
        environment_variables = {}
        environment_variables["COMPREHEND_QUEUE"] = comprehend_queue.queue_url
        environment_variables[
            "REKOGNITION_QUEUE"] = rekognition_queue.queue_url
        environment_variables["IMAGES_BUCKET"] = images_bucket.bucket_name
        environment_variables[
            "ELASTICSEARCH_HOST"] = s3workflow_domain.domain_endpoint

        parse_image_list_file.add_environment(
            "ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint)
        parse_image_list_file.add_environment("QUEUEURL",
                                              rekognition_queue.queue_url)
        parse_image_list_file.add_environment("DEBUG", "False")
        parse_image_list_file.add_environment("BUCKET", "-")
        parse_image_list_file.add_environment("KEY", "-")

        list_objects.add_environment("QUEUEURL", object_queue.queue_url)
        list_objects.add_environment("ELASTICSEARCH_HOST",
                                     s3workflow_domain.domain_endpoint)
        list_objects.add_environment("S3_BUCKET_NAME",
                                     images_bucket.bucket_name)
        list_objects.add_environment("S3_BUCKET_PREFIX", "images/")
        list_objects.add_environment("S3_BUCKET_SUFFIX", "")
        list_objects.add_environment("LOGGING_LEVEL", "INFO")

        get_size_and_store.add_environment("QUEUEURL", object_queue.queue_url)
        get_size_and_store.add_environment("ELASTICSEARCH_HOST",
                                           s3workflow_domain.domain_endpoint)
        get_size_and_store.add_environment("S3_BUCKET_NAME",
                                           images_bucket.bucket_name)
        get_size_and_store.add_environment("S3_BUCKET_PREFIX", "images/")
        get_size_and_store.add_environment("S3_BUCKET_SUFFIX", "")
        get_size_and_store.add_environment("LOGGING_LEVEL", "INFO")

        ###########################################################################
        # ECS Log Drivers
        ###########################################################################
        rekognition_task_log_driver = aws_ecs.LogDriver.aws_logs(
            stream_prefix="s3workflow",
            log_retention=aws_logs.RetentionDays("ONE_DAY"))
        comprehend_task_log_driver = aws_ecs.LogDriver.aws_logs(
            stream_prefix="s3workflow",
            log_retention=aws_logs.RetentionDays("ONE_DAY"))

        ###########################################################################
        # ECS Task Definitions
        ###########################################################################
        rekognition_task_definition.add_container(
            "rekognition_task_definition",
            image=rekognition_ecr_image,
            memory_reservation_mib=1024,
            environment=environment_variables,
            logging=rekognition_task_log_driver)

        comprehend_task_definition.add_container(
            "comprehend_task_definition",
            image=comprehend_ecr_image,
            memory_reservation_mib=1024,
            environment=environment_variables,
            logging=comprehend_task_log_driver)

        ###########################################################################
        # AWS ROUTE53 HOSTED ZONE
        ###########################################################################
        hosted_zone = aws_route53.HostedZone(
            self,
            "hosted_zone",
            zone_name="s3workflow.com",
            comment="private hosted zone for s3workflow system")
        hosted_zone.add_vpc(vpc)
コード例 #26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # self.node.apply_aspect(core.Aws.ACCOUNT_ID)

        dap_environment = os.environ["ENV_VALUE"]

        src_bucket_name = self.node.try_get_context("src_bucket_name")
        src_bucket_name = src_bucket_name.replace("env", dap_environment)

        dest_bucket_name = self.node.try_get_context("dest_bucket_name")
        dest_bucket_name = dest_bucket_name.replace("env", dap_environment)

        source_bucket = s3.Bucket(
            self, 'sourceBucket',
            bucket_name=src_bucket_name,
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL
        )

        destination_bucket = s3.Bucket(
            self, 'destinationBucket',
            bucket_name=dest_bucket_name,
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL
        )

        lambda_iam_role = iam.Role(
            self, 'lambdaRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            role_name="lambda-hello-role",
            managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonS3FullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                "CloudWatchFullAccess"
            )]
        )

        lambda_layer = _lambda.LayerVersion(
            self, 'lambdaLayer',
            code=_lambda.Code.asset('lambda_layers/python_libs.zip'),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            layer_version_name='lambda-layer-cdk',
            description='A Lambda layer for common utility'
        )

        my_lambda = _lambda.Function(
            self, 'HelloHandler',
            role=lambda_iam_role,
            function_name='hellohandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='hello.handler',
            layers=[lambda_layer],
            memory_size=128,
            environment={
                'env_test_key': 'AWS CDK',
                'destination_bucket': dest_bucket_name
            }
        )

        notification = s3_notify.LambdaDestination(my_lambda)

        source_bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED_PUT, notification)

        s3deploy.BucketDeployment(
            self, 'DeployGlueJob',
            sources=[s3deploy.Source.asset("glue")],
            destination_bucket=destination_bucket,
            destination_key_prefix="web/dap"
        )
コード例 #27
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if self.node.try_get_context('vpc_type'):
            validate_cdk_json(self)

        ES_LOADER_TIMEOUT = 600
        ######################################################################
        # REGION mapping / ELB & Lambda Arch
        ######################################################################
        elb_id_temp = region_info.FactName.ELBV2_ACCOUNT
        elb_map_temp = region_info.RegionInfo.region_map(elb_id_temp)
        region_dict = {}
        for region in elb_map_temp:
            # ELB account ID
            region_dict[region] = {'ElbV2AccountId': elb_map_temp[region]}
            # Lambda Arch
            if region in ('us-east-1', 'us-east-2', 'us-west-2', 'ap-south-1',
                          'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1',
                          'eu-central-1', 'eu-west-1', 'eu-west-2'):
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.ARM_64.name)
            else:
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.X86_64.name)
        region_mapping = core.CfnMapping(
            scope=self, id='RegionMap', mapping=region_dict)

        ######################################################################
        # get params
        ######################################################################
        allow_source_address = core.CfnParameter(
            self, 'AllowedSourceIpAddresses', allowed_pattern=r'^[0-9./\s]*',
            description='Space-delimited list of CIDR blocks',
            default='10.0.0.0/8 172.16.0.0/12 192.168.0.0/16')
        sns_email = core.CfnParameter(
            self, 'SnsEmail', allowed_pattern=r'^[0-9a-zA-Z@_\-\+\.]*',
            description=('Input your email as SNS topic, where Amazon '
                         'OpenSearch Service will send alerts to'),
            default='*****@*****.**')
        geoip_license_key = core.CfnParameter(
            self, 'GeoLite2LicenseKey', allowed_pattern=r'^[0-9a-zA-Z]{16}$',
            default='xxxxxxxxxxxxxxxx',
            description=("If you wolud like to enrich geoip locaiton such as "
                         "IP address's country, get a license key form MaxMind"
                         " and input the key. If you not, keep "
                         "xxxxxxxxxxxxxxxx"))
        reserved_concurrency = core.CfnParameter(
            self, 'ReservedConcurrency', default=10, type='Number',
            description=('Input reserved concurrency. Increase this value if '
                         'there are steady logs delay despite no errors'))
        aes_domain_name = self.node.try_get_context('aes_domain_name')
        bucket = f'{aes_domain_name}-{core.Aws.ACCOUNT_ID}'
        s3bucket_name_geo = f'{bucket}-geo'
        s3bucket_name_log = f'{bucket}-log'
        s3bucket_name_snapshot = f'{bucket}-snapshot'

        # organizations / multiaccount
        org_id = self.node.try_get_context('organizations').get('org_id')
        org_mgmt_id = self.node.try_get_context(
            'organizations').get('management_id')
        org_member_ids = self.node.try_get_context(
            'organizations').get('member_ids')
        no_org_ids = self.node.try_get_context(
            'no_organizations').get('aws_accounts')

        # Overwrite default S3 bucket name as customer name
        temp_geo = self.node.try_get_context('s3_bucket_name').get('geo')
        if temp_geo:
            s3bucket_name_geo = temp_geo
        else:
            print('Using default bucket names')
        temp_log = self.node.try_get_context('s3_bucket_name').get('log')
        if temp_log:
            s3bucket_name_log = temp_log
        elif org_id or no_org_ids:
            s3bucket_name_log = f'{aes_domain_name}-{self.account}-log'
        else:
            print('Using default bucket names')
        temp_snap = self.node.try_get_context('s3_bucket_name').get('snapshot')
        if temp_snap:
            s3bucket_name_snapshot = temp_snap
        else:
            print('Using default bucket names')
        kms_cmk_alias = self.node.try_get_context('kms_cmk_alias')
        if not kms_cmk_alias:
            kms_cmk_alias = 'aes-siem-key'
            print('Using default key alais')

        ######################################################################
        # deploy VPC when context is defined as using VPC
        ######################################################################
        # vpc_type is 'new' or 'import' or None
        vpc_type = self.node.try_get_context('vpc_type')

        if vpc_type == 'new':
            is_vpc = True
            vpc_cidr = self.node.try_get_context('new_vpc_nw_cidr_block')
            subnet_cidr_mask = int(
                self.node.try_get_context('new_vpc_subnet_cidr_mask'))
            is_vpc = True
            # VPC
            vpc_aes_siem = aws_ec2.Vpc(
                self, 'VpcAesSiem', cidr=vpc_cidr,
                max_azs=3, nat_gateways=0,
                subnet_configuration=[
                    aws_ec2.SubnetConfiguration(
                        subnet_type=aws_ec2.SubnetType.ISOLATED,
                        name='aes-siem-subnet', cidr_mask=subnet_cidr_mask)])
            subnet1 = vpc_aes_siem.isolated_subnets[0]
            subnets = [{'subnet_type': aws_ec2.SubnetType.ISOLATED}]
            vpc_subnets = aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED)
            vpc_aes_siem_opt = vpc_aes_siem.node.default_child.cfn_options
            vpc_aes_siem_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
            for subnet in vpc_aes_siem.isolated_subnets:
                subnet_opt = subnet.node.default_child.cfn_options
                subnet_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
        elif vpc_type == 'import':
            vpc_id = self.node.try_get_context('imported_vpc_id')
            vpc_aes_siem = aws_ec2.Vpc.from_lookup(
                self, 'VpcAesSiem', vpc_id=vpc_id)

            subnet_ids = get_subnet_ids(self)
            subnets = []
            for number, subnet_id in enumerate(subnet_ids, 1):
                obj_id = 'Subenet' + str(number)
                subnet = aws_ec2.Subnet.from_subnet_id(self, obj_id, subnet_id)
                subnets.append(subnet)
            subnet1 = subnets[0]
            vpc_subnets = aws_ec2.SubnetSelection(subnets=subnets)

        if vpc_type:
            is_vpc = True
            # Security Group
            sg_vpc_noinbound_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcNoinboundSecurityGroup',
                security_group_name='aes-siem-noinbound-vpc-sg',
                vpc=vpc_aes_siem)

            sg_vpc_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcSecurityGroup',
                security_group_name='aes-siem-vpc-sg',
                vpc=vpc_aes_siem)
            sg_vpc_aes_siem.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(vpc_aes_siem.vpc_cidr_block),
                connection=aws_ec2.Port.tcp(443),)
            sg_vpc_opt = sg_vpc_aes_siem.node.default_child.cfn_options
            sg_vpc_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

            # VPC Endpoint
            vpc_aes_siem.add_gateway_endpoint(
                'S3Endpoint', service=aws_ec2.GatewayVpcEndpointAwsService.S3,
                subnets=subnets)
            vpc_aes_siem.add_interface_endpoint(
                'SQSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.SQS,)
            vpc_aes_siem.add_interface_endpoint(
                'KMSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.KMS,)
        else:
            is_vpc = False

        is_vpc = core.CfnCondition(
            self, 'IsVpc', expression=core.Fn.condition_equals(is_vpc, True))
        """
        CloudFormation実行時の条件式の書き方
        ClassのBasesが aws_cdk.core.Resource の時は、
        node.default_child.cfn_options.condition = is_vpc
        ClassのBasesが aws_cdk.core.CfnResource の時は、
        cfn_options.condition = is_vpc
        """

        ######################################################################
        # create cmk of KMS to encrypt S3 bucket
        ######################################################################
        kms_aes_siem = aws_kms.Key(
            self, 'KmsAesSiemLog', description='CMK for SIEM solution',
            removal_policy=core.RemovalPolicy.RETAIN)

        aws_kms.Alias(
            self, 'KmsAesSiemLogAlias', alias_name=kms_cmk_alias,
            target_key=kms_aes_siem,
            removal_policy=core.RemovalPolicy.RETAIN)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow GuardDuty to use the key',
                actions=['kms:GenerateDataKey'],
                principals=[aws_iam.ServicePrincipal(
                    'guardduty.amazonaws.com')],
                resources=['*'],),)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow VPC Flow Logs to use the key',
                actions=['kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*',
                         'kms:GenerateDataKey*', 'kms:DescribeKey'],
                principals=[aws_iam.ServicePrincipal(
                    'delivery.logs.amazonaws.com')],
                resources=['*'],),)
        # basic policy
        key_policy_basic1 = aws_iam.PolicyStatement(
            sid='Allow principals in the account to decrypt log files',
            actions=['kms:DescribeKey', 'kms:ReEncryptFrom'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_basic1)

        # for Athena
        key_policy_athena = aws_iam.PolicyStatement(
            sid='Allow Athena to query s3 objects with this key',
            actions=['kms:Decrypt', 'kms:DescribeKey', 'kms:Encrypt',
                     'kms:GenerateDataKey*', 'kms:ReEncrypt*'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],
            conditions={'ForAnyValue:StringEquals': {
                'aws:CalledVia': 'athena.amazonaws.com'}})
        kms_aes_siem.add_to_resource_policy(key_policy_athena)

        # for CloudTrail
        key_policy_trail1 = aws_iam.PolicyStatement(
            sid='Allow CloudTrail to describe key',
            actions=['kms:DescribeKey'],
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_trail1)

        key_policy_trail2 = aws_iam.PolicyStatement(
            sid=('Allow CloudTrail to encrypt logs'),
            actions=['kms:GenerateDataKey*'],
            principals=[aws_iam.ServicePrincipal(
                'cloudtrail.amazonaws.com')],
            resources=['*'],
            conditions={'StringLike': {
                'kms:EncryptionContext:aws:cloudtrail:arn': [
                    f'arn:aws:cloudtrail:*:{core.Aws.ACCOUNT_ID}:trail/*']}})
        kms_aes_siem.add_to_resource_policy(key_policy_trail2)

        ######################################################################
        # create s3 bucket
        ######################################################################
        block_pub = aws_s3.BlockPublicAccess(
            block_public_acls=True,
            ignore_public_acls=True,
            block_public_policy=True,
            restrict_public_buckets=True
        )
        s3_geo = aws_s3.Bucket(
            self, 'S3BucketForGeoip', block_public_access=block_pub,
            bucket_name=s3bucket_name_geo,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for log collector
        s3_log = aws_s3.Bucket(
            self, 'S3BucketForLog', block_public_access=block_pub,
            bucket_name=s3bucket_name_log, versioned=True,
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for aes snapshot
        s3_snapshot = aws_s3.Bucket(
            self, 'S3BucketForSnapshot', block_public_access=block_pub,
            bucket_name=s3bucket_name_snapshot,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        ######################################################################
        # IAM Role
        ######################################################################
        # delopyment policy for lambda deploy-aes
        arn_prefix = f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
        loggroup_aes = f'log-group:/aws/aes/domains/{aes_domain_name}/*'
        loggroup_opensearch = (
            f'log-group:/aws/OpenSearchService/domains/{aes_domain_name}/*')
        loggroup_lambda = 'log-group:/aws/lambda/aes-siem-*'
        policydoc_create_loggroup = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:PutResourcePolicy',
                        'logs:DescribeLogGroups',
                        'logs:DescribeLogStreams'
                    ],
                    resources=[f'{arn_prefix}:*', ]
                ),
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:CreateLogGroup', 'logs:CreateLogStream',
                        'logs:PutLogEvents', 'logs:PutRetentionPolicy'],
                    resources=[
                        f'{arn_prefix}:{loggroup_aes}',
                        f'{arn_prefix}:{loggroup_opensearch}',
                        f'{arn_prefix}:{loggroup_lambda}',
                    ],
                )
            ]
        )

        policydoc_crhelper = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'lambda:AddPermission',
                        'lambda:RemovePermission',
                        'events:ListRules',
                        'events:PutRule',
                        'events:DeleteRule',
                        'events:PutTargets',
                        'events:RemoveTargets'],
                    resources=['*']
                )
            ]
        )

        # snaphot rule for AES
        policydoc_snapshot = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['s3:ListBucket'],
                    resources=[s3_snapshot.bucket_arn]
                ),
                aws_iam.PolicyStatement(
                    actions=['s3:GetObject', 's3:PutObject',
                             's3:DeleteObject'],
                    resources=[s3_snapshot.bucket_arn + '/*']
                )
            ]
        )
        aes_siem_snapshot_role = aws_iam.Role(
            self, 'AesSiemSnapshotRole',
            role_name='aes-siem-snapshot-role',
            inline_policies=[policydoc_snapshot, ],
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        policydoc_assume_snapshrole = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['iam:PassRole'],
                    resources=[aes_siem_snapshot_role.role_arn]
                ),
            ]
        )

        aes_siem_deploy_role_for_lambda = aws_iam.Role(
            self, 'AesSiemDeployRoleForLambda',
            role_name='aes-siem-deploy-role-for-lambda',
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonOpenSearchServiceFullAccess'),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
            ],
            inline_policies=[policydoc_assume_snapshrole, policydoc_snapshot,
                             policydoc_create_loggroup, policydoc_crhelper],
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        if vpc_type:
            aes_siem_deploy_role_for_lambda.add_managed_policy(
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaVPCAccessExecutionRole')
            )

        # for alert from Amazon OpenSearch Service
        aes_siem_sns_role = aws_iam.Role(
            self, 'AesSiemSnsRole',
            role_name='aes-siem-sns-role',
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        # EC2 role
        aes_siem_es_loader_ec2_role = aws_iam.Role(
            self, 'AesSiemEsLoaderEC2Role',
            role_name='aes-siem-es-loader-for-ec2',
            assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com'),
        )

        aws_iam.CfnInstanceProfile(
            self, 'AesSiemEsLoaderEC2InstanceProfile',
            instance_profile_name=aes_siem_es_loader_ec2_role.role_name,
            roles=[aes_siem_es_loader_ec2_role.role_name]
        )

        ######################################################################
        # in VPC
        ######################################################################
        aes_role_exist = check_iam_role('/aws-service-role/es.amazonaws.com/')
        if vpc_type and not aes_role_exist:
            slr_aes = aws_iam.CfnServiceLinkedRole(
                self, 'AWSServiceRoleForAmazonOpenSearchService',
                aws_service_name='es.amazonaws.com',
                description='Created by cloudformation of siem stack'
            )
            slr_aes.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # SQS for es-laoder's DLQ
        ######################################################################
        sqs_aes_siem_dlq = aws_sqs.Queue(
            self, 'AesSiemDlq', queue_name='aes-siem-dlq',
            retention_period=core.Duration.days(14))

        sqs_aes_siem_splitted_logs = aws_sqs.Queue(
            self, 'AesSiemSqsSplitLogs',
            queue_name='aes-siem-sqs-splitted-logs',
            dead_letter_queue=aws_sqs.DeadLetterQueue(
                max_receive_count=2, queue=sqs_aes_siem_dlq),
            visibility_timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            retention_period=core.Duration.days(14))

        ######################################################################
        # Setup Lambda
        ######################################################################
        # setup lambda of es_loader
        lambda_es_loader_vpc_kwargs = {}
        if vpc_type:
            lambda_es_loader_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': vpc_subnets,
            }

        lambda_es_loader = aws_lambda.Function(
            self, 'LambdaEsLoader', **lambda_es_loader_vpc_kwargs,
            function_name='aes-siem-es-loader',
            description=f'{SOLUTION_NAME} / es-loader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/es_loader.zip'),
            code=aws_lambda.Code.asset('../lambda/es_loader'),
            handler='index.lambda_handler',
            memory_size=2048,
            timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            reserved_concurrent_executions=(
                reserved_concurrency.value_as_number),
            dead_letter_queue_enabled=True,
            dead_letter_queue=sqs_aes_siem_dlq,
            environment={
                'GEOIP_BUCKET': s3bucket_name_geo, 'LOG_LEVEL': 'info',
                'POWERTOOLS_LOGGER_LOG_EVENT': 'false',
                'POWERTOOLS_SERVICE_NAME': 'es-loader',
                'POWERTOOLS_METRICS_NAMESPACE': 'SIEM'})
        es_loader_newver = lambda_es_loader.add_version(
            name=__version__, description=__version__)
        es_loader_opt = es_loader_newver.node.default_child.cfn_options
        es_loader_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # send only
        # sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage')
        # send and reieve. but it must be loop
        sqs_aes_siem_dlq.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        sqs_aes_siem_splitted_logs.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        lambda_es_loader.add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                sqs_aes_siem_splitted_logs, batch_size=1))

        # es-loaer on EC2 role
        sqs_aes_siem_dlq.grant(
            aes_siem_es_loader_ec2_role, 'sqs:GetQueue*', 'sqs:ListQueues*',
            'sqs:ReceiveMessage*', 'sqs:DeleteMessage*')

        lambda_geo = aws_lambda.Function(
            self, 'LambdaGeoipDownloader',
            function_name='aes-siem-geoip-downloader',
            description=f'{SOLUTION_NAME} / geoip-downloader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/geoip_downloader'),
            handler='index.lambda_handler',
            memory_size=320,
            timeout=core.Duration.seconds(300),
            environment={
                's3bucket_name': s3bucket_name_geo,
                'license_key': geoip_license_key.value_as_string,
            }
        )
        lambda_geo_newver = lambda_geo.add_version(
            name=__version__, description=__version__)
        lamba_geo_opt = lambda_geo_newver.node.default_child.cfn_options
        lamba_geo_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # setup OpenSearch Service
        ######################################################################
        lambda_deploy_es = aws_lambda.Function(
            self, 'LambdaDeployAES',
            function_name='aes-siem-deploy-aes',
            description=f'{SOLUTION_NAME} / opensearch domain deployment',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/deploy_es.zip'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_domain_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_deploy_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_deploy_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_deploy_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_deploy_es.add_environment('vpc_subnet_id', 'None')
            lambda_deploy_es.add_environment('security_group_id', 'None')
        deploy_es_newver = lambda_deploy_es.add_version(
            name=__version__, description=__version__)
        deploy_es_opt = deploy_es_newver.node.default_child.cfn_options
        deploy_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # execute lambda_deploy_es to deploy Amaozon ES Domain
        aes_domain = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainDeployedR2',
            service_token=lambda_deploy_es.function_arn,)
        aes_domain.add_override('Properties.ConfigVersion', __version__)

        es_endpoint = aes_domain.get_att('es_endpoint').to_string()
        lambda_es_loader.add_environment('ES_ENDPOINT', es_endpoint)
        lambda_es_loader.add_environment(
            'SQS_SPLITTED_LOGS_URL', sqs_aes_siem_splitted_logs.queue_url)

        lambda_configure_es_vpc_kwargs = {}
        if vpc_type:
            lambda_configure_es_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': aws_ec2.SubnetSelection(subnets=[subnet1, ]), }
        lambda_configure_es = aws_lambda.Function(
            self, 'LambdaConfigureAES', **lambda_configure_es_vpc_kwargs,
            function_name='aes-siem-configure-aes',
            description=f'{SOLUTION_NAME} / opensearch configuration',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_config_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
                'es_endpoint': es_endpoint,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_configure_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_configure_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_configure_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_configure_es.add_environment('vpc_subnet_id', 'None')
            lambda_configure_es.add_environment('security_group_id', 'None')
        configure_es_newver = lambda_configure_es.add_version(
            name=__version__, description=__version__)
        configure_es_opt = configure_es_newver.node.default_child.cfn_options
        configure_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        aes_config = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainConfiguredR2',
            service_token=lambda_configure_es.function_arn,)
        aes_config.add_override('Properties.ConfigVersion', __version__)
        aes_config.add_depends_on(aes_domain)
        aes_config.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        es_arn = (f'arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
                  f':domain/{aes_domain_name}')
        # grant permission to es_loader role
        inline_policy_to_load_entries_into_es = aws_iam.Policy(
            self, 'aes-siem-policy-to-load-entries-to-es',
            policy_name='aes-siem-policy-to-load-entries-to-es',
            statements=[
                aws_iam.PolicyStatement(
                    actions=['es:*'],
                    resources=[es_arn + '/*', ]),
            ]
        )
        lambda_es_loader.role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)
        aes_siem_es_loader_ec2_role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)

        # grant additional permission to es_loader role
        additional_kms_cmks = self.node.try_get_context('additional_kms_cmks')
        if additional_kms_cmks:
            inline_policy_access_to_additional_cmks = aws_iam.Policy(
                self, 'access_to_additional_cmks',
                policy_name='access_to_additional_cmks',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['kms:Decrypt'],
                        resources=sorted(set(additional_kms_cmks))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
        additional_buckets = self.node.try_get_context('additional_s3_buckets')

        if additional_buckets:
            buckets_list = []
            for bucket in additional_buckets:
                buckets_list.append(f'arn:aws:s3:::{bucket}')
                buckets_list.append(f'arn:aws:s3:::{bucket}/*')
            inline_policy_access_to_additional_buckets = aws_iam.Policy(
                self, 'access_to_additional_buckets',
                policy_name='access_to_additional_buckets',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['s3:GetObject*', 's3:GetBucket*', 's3:List*'],
                        resources=sorted(set(buckets_list))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)

        kms_aes_siem.grant_decrypt(lambda_es_loader)
        kms_aes_siem.grant_decrypt(aes_siem_es_loader_ec2_role)

        ######################################################################
        # s3 notification and grant permisssion
        ######################################################################
        s3_geo.grant_read_write(lambda_geo)
        s3_geo.grant_read(lambda_es_loader)
        s3_geo.grant_read(aes_siem_es_loader_ec2_role)
        s3_log.grant_read(lambda_es_loader)
        s3_log.grant_read(aes_siem_es_loader_ec2_role)

        # create s3 notification for es_loader
        notification = aws_s3_notifications.LambdaDestination(lambda_es_loader)

        # assign notification for the s3 PUT event type
        # most log system use PUT, but also CLB use POST & Multipart Upload
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='AWSLogs/'))

        # For user logs, not AWS logs
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='UserLogs/'))

        # Download geoip to S3 once by executing lambda_geo
        get_geodb = aws_cloudformation.CfnCustomResource(
            self, 'ExecLambdaGeoipDownloader',
            service_token=lambda_geo.function_arn,)
        get_geodb.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # Download geoip every day at 6PM UTC
        rule = aws_events.Rule(
            self, 'CwlRuleLambdaGeoipDownloaderDilly',
            schedule=aws_events.Schedule.rate(core.Duration.hours(12)))
        rule.add_target(aws_events_targets.LambdaFunction(lambda_geo))

        ######################################################################
        # bucket policy
        ######################################################################
        s3_awspath = s3_log.bucket_arn + '/AWSLogs/' + core.Aws.ACCOUNT_ID
        bucket_policy_common1 = aws_iam.PolicyStatement(
            sid='ELB Policy',
            principals=[aws_iam.AccountPrincipal(
                account_id=region_mapping.find_in_map(
                    core.Aws.REGION, 'ElbV2AccountId'))],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],)
        # NLB / ALB / R53resolver / VPC Flow Logs
        bucket_policy_elb1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_elb2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_common1)
        s3_log.add_to_resource_policy(bucket_policy_elb1)
        s3_log.add_to_resource_policy(bucket_policy_elb2)

        # CloudTrail
        bucket_policy_trail1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For Cloudtrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:GetBucketAcl'], resources=[s3_log.bucket_arn],)
        bucket_policy_trail2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For CloudTrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_trail1)
        s3_log.add_to_resource_policy(bucket_policy_trail2)

        # GuardDuty
        bucket_policy_gd1 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to use the getBucketLocation operation',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:GetBucketLocation'], resources=[s3_log.bucket_arn],)
        bucket_policy_gd2 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to upload objects to the bucket',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_log.bucket_arn + '/*'],)
        bucket_policy_gd5 = aws_iam.PolicyStatement(
            sid='Deny non-HTTPS access', effect=aws_iam.Effect.DENY,
            actions=['s3:*'], resources=[s3_log.bucket_arn + '/*'],
            conditions={'Bool': {'aws:SecureTransport': 'false'}})
        bucket_policy_gd5.add_any_principal()
        s3_log.add_to_resource_policy(bucket_policy_gd1)
        s3_log.add_to_resource_policy(bucket_policy_gd2)
        s3_log.add_to_resource_policy(bucket_policy_gd5)

        # Config
        bucket_policy_config1 = aws_iam.PolicyStatement(
            sid='AWSConfig BucketPermissionsCheck and BucketExistenceCheck',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_config2 = aws_iam.PolicyStatement(
            sid='AWSConfigBucketDelivery',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/Config/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_config1)
        s3_log.add_to_resource_policy(bucket_policy_config2)

        # geoip
        bucket_policy_geo1 = aws_iam.PolicyStatement(
            sid='Allow geoip downloader and es-loader to read/write',
            principals=[lambda_es_loader.role, lambda_geo.role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_geo.bucket_arn + '/*'],)
        s3_geo.add_to_resource_policy(bucket_policy_geo1)

        # ES Snapshot
        bucket_policy_snapshot = aws_iam.PolicyStatement(
            sid='Allow ES to store snapshot',
            principals=[aes_siem_snapshot_role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_snapshot.bucket_arn + '/*'],)
        s3_snapshot.add_to_resource_policy(bucket_policy_snapshot)

        ######################################################################
        # for multiaccount / organizaitons
        ######################################################################
        if org_id or no_org_ids:
            ##################################################################
            # KMS key policy for multiaccount / organizaitons
            ##################################################################
            # for CloudTrail
            cond_tail2 = self.make_resource_list(
                path='arn:aws:cloudtrail:*:', tail=':trail/*',
                keys=self.list_without_none(org_mgmt_id, no_org_ids))
            key_policy_mul_trail2 = aws_iam.PolicyStatement(
                sid=('Allow CloudTrail to encrypt logs for multiaccounts'),
                actions=['kms:GenerateDataKey*'],
                principals=[aws_iam.ServicePrincipal(
                    'cloudtrail.amazonaws.com')],
                resources=['*'],
                conditions={'StringLike': {
                    'kms:EncryptionContext:aws:cloudtrail:arn': cond_tail2}})
            kms_aes_siem.add_to_resource_policy(key_policy_mul_trail2)

            # for replicaiton
            key_policy_rep1 = aws_iam.PolicyStatement(
                sid=('Enable cross account encrypt access for S3 Cross Region '
                     'Replication'),
                actions=['kms:Encrypt'],
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                resources=['*'],)
            kms_aes_siem.add_to_resource_policy(key_policy_rep1)

            ##################################################################
            # Buckdet Policy for multiaccount / organizaitons
            ##################################################################
            s3_log_bucket_arn = 'arn:aws:s3:::' + s3bucket_name_log

            # for CloudTrail
            s3_mulpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_org_trail = aws_iam.PolicyStatement(
                sid='AWSCloudTrailWrite for Multiaccounts / Organizations',
                principals=[
                    aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_mulpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_org_trail)

            # config
            s3_conf_multpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/Config/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_mul_config2 = aws_iam.PolicyStatement(
                sid='AWSConfigBucketDelivery',
                principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_conf_multpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_mul_config2)

            # for replication
            bucket_policy_rep1 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on objects',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:ReplicateDelete', 's3:ReplicateObject',
                         's3:ReplicateTags', 's3:GetObjectVersionTagging',
                         's3:ObjectOwnerOverrideToBucketOwner'],
                resources=[f'{s3_log_bucket_arn}/*'])
            bucket_policy_rep2 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on bucket',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:List*', 's3:GetBucketVersioning',
                         's3:PutBucketVersioning'],
                resources=[f'{s3_log_bucket_arn}'])
            s3_log.add_to_resource_policy(bucket_policy_rep1)
            s3_log.add_to_resource_policy(bucket_policy_rep2)

        ######################################################################
        # SNS topic for Amazon OpenSearch Service Alert
        ######################################################################
        sns_topic = aws_sns.Topic(
            self, 'SnsTopic', topic_name='aes-siem-alert',
            display_name='AES SIEM')

        sns_topic.add_subscription(aws_sns_subscriptions.EmailSubscription(
            email_address=sns_email.value_as_string))
        sns_topic.grant_publish(aes_siem_sns_role)

        ######################################################################
        # output of CFn
        ######################################################################
        kibanaurl = f'https://{es_endpoint}/_dashboards/'
        kibanaadmin = aes_domain.get_att('kibanaadmin').to_string()
        kibanapass = aes_domain.get_att('kibanapass').to_string()

        core.CfnOutput(self, 'RoleDeploy', export_name='role-deploy',
                       value=aes_siem_deploy_role_for_lambda.role_arn)
        core.CfnOutput(self, 'DashboardsUrl', export_name='dashboards-url',
                       value=kibanaurl)
        core.CfnOutput(self, 'DashboardsPassword',
                       export_name='dashboards-pass', value=kibanapass,
                       description=('Please change the password in OpenSearch '
                                    'Dashboards ASAP'))
        core.CfnOutput(self, 'DashboardsAdminID',
                       export_name='dashboards-admin', value=kibanaadmin)
コード例 #28
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create unique ID
        min_char = 8
        max_char = 12
        allchar = string.ascii_lowercase + string.digits
        uniqueID = "".join(
            choice(allchar) for x in range(randint(min_char, max_char)))

        # --------------------------
        # create 1st lambda function
        function = _lambda.Function(
            self,
            "simple_transcribe",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.lambda_handler",
            code=_lambda.Code.from_asset("./lambda/simple_transcribe"),
            environment={
                "CUSTOM_VOCABULARY": "custom-vocab",
                "LANGUAGE": "nl-NL"
            },
            timeout=core.Duration.seconds(60))
        # add policy Statement to launch a Transcribe job
        transcribe_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "transcribe:GetTranscriptionJob",
                "transcribe:StartTranscriptionJob"
            ],
            resources=["*"])
        function.add_to_role_policy(transcribe_policy)
        # add policy Statement to read S3 objects
        s3_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "s3:GetAccessPoint", "s3:GetLifecycleConfiguration",
                "s3:GetBucketTagging", "s3:GetInventoryConfiguration",
                "s3:GetObjectVersionTagging", "s3:ListBucketVersions",
                "s3:GetBucketLogging", "s3:GetAccelerateConfiguration",
                "s3:GetBucketPolicy", "s3:GetObjectVersionTorrent",
                "s3:GetObjectAcl", "s3:GetEncryptionConfiguration",
                "s3:GetBucketObjectLockConfiguration",
                "s3:GetBucketRequestPayment", "s3:GetAccessPointPolicyStatus",
                "s3:GetObjectVersionAcl", "s3:GetObjectTagging",
                "s3:GetMetricsConfiguration", "s3:GetBucketPublicAccessBlock",
                "s3:GetBucketPolicyStatus", "s3:ListBucketMultipartUploads",
                "s3:GetObjectRetention", "s3:GetBucketWebsite",
                "s3:ListAccessPoints", "s3:ListJobs", "s3:GetBucketVersioning",
                "s3:GetBucketAcl", "s3:GetObjectLegalHold",
                "s3:GetBucketNotification", "s3:GetReplicationConfiguration",
                "s3:ListMultipartUploadParts", "s3:GetObject",
                "s3:GetObjectTorrent", "s3:GetAccountPublicAccessBlock",
                "s3:DescribeJob", "s3:GetBucketCORS",
                "s3:GetAnalyticsConfiguration",
                "s3:GetObjectVersionForReplication", "s3:GetBucketLocation",
                "s3:GetAccessPointPolicy", "s3:GetObjectVersion"
            ],
            resources=["*"])
        function.add_to_role_policy(s3_policy)

        # create s3 bucket for source files
        s3 = _s3.Bucket(self,
                        "transcribe-bucket-in-{}".format(uniqueID),
                        bucket_name="transcribe-source-{}".format(uniqueID))

        # create s3 notification to trigger 1st lambda function
        notification = _s3nots.LambdaDestination(function)

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3.add_event_notification(_s3.EventType.OBJECT_CREATED, notification)

        # --------------------------
        # create 2nd lambda function
        function2 = _lambda.Function(
            self,
            "simple_transcribe_report",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="lambda_function.lambda_handler",
            code=_lambda.Code.from_asset("./lambda/simple_transcribe_report"),
            environment={
                "S3_BUCKET": "transcribe-results-{}".format(uniqueID),
                "SOURCE_LANGUAGE": "nl",
                "TARGET_LANGUAGE": "en"
            },
            timeout=core.Duration.seconds(60))
        # add policy Statement to launch a Transcribe job
        function2.add_to_role_policy(transcribe_policy)
        # add policy Statement to call Translate
        translate_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=["translate:TranslateText"],
            resources=["*"])
        function2.add_to_role_policy(translate_policy)
        # add policy Statement to call Comprehend
        comprehend_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "comprehend:DetectDominantLanguage",
                "comprehend:DetectEntities", "comprehend:DetectKeyPhrases",
                "comprehend:DetectSentiment"
            ],
            resources=["*"])
        function2.add_to_role_policy(comprehend_policy)
        # add policy Statement to write to S3
        s3_policy2 = _iam.PolicyStatement(effect=_iam.Effect.ALLOW,
                                          actions=["s3:PutObject"],
                                          resources=["*"])
        function2.add_to_role_policy(s3_policy2)

        # create s3 bucket for output files
        s3 = _s3.Bucket(self,
                        "transcribe-bucket-out-{}".format(uniqueID),
                        bucket_name="transcribe-results-{}".format(uniqueID))

        # create event rule and target to trigger the 2nd lambda function
        rule = _events.Rule(
            self,
            "Rule",
            rule_name="simple-transcribe-done",
            event_pattern=_events.EventPattern(
                detail={"TranscriptionJobStatus": ["COMPLETED", "FAILED"]},
                detail_type=["Transcribe Job State Change"],
                source=["aws.transcribe"],
            ),
        )
        rule.add_target(targets.LambdaFunction(function2))
コード例 #29
0
    def __init__(
        self,
        scope: core.Construct,
        construct_id: str,
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Get some context properties
        log_level = self.node.try_get_context("log_level")
        api_name = self.node.try_get_context("api_name")
        stage_name = self.node.try_get_context("stage_name")
        endpoint_filter = self.node.try_get_context("endpoint_filter")
        api_lambda_memory = self.node.try_get_context("api_lambda_memory")
        api_lambda_timeout = self.node.try_get_context("api_lambda_timeout")
        metrics_lambda_memory = self.node.try_get_context("metrics_lambda_memory")
        metrics_lambda_timeout = self.node.try_get_context("metrics_lambda_timeout")
        dynamodb_read_capacity = self.node.try_get_context("dynamodb_read_capacity")
        dynamodb_write_capacity = self.node.try_get_context("dynamodb_write_capacity")
        delivery_sync = self.node.try_get_context("delivery_sync")
        firehose_interval = self.node.try_get_context("firehose_interval")
        firehose_mb_size = self.node.try_get_context("firehose_mb_size")

        # Create dynamodb tables and kinesis stream per project
        assignment_table_name = f"{api_name}-assignment-{stage_name}"
        metrics_table_name = f"{api_name}-metrics-{stage_name}"
        delivery_stream_name = f"{api_name}-events-{stage_name}"
        log_stream_name = "ApiEvents"

        assignment_table = aws_dynamodb.Table(
            self,
            "AssignmentTable",
            table_name=assignment_table_name,
            partition_key=aws_dynamodb.Attribute(
                name="user_id",
                type=aws_dynamodb.AttributeType.STRING,
            ),
            sort_key=aws_dynamodb.Attribute(
                name="endpoint_name",
                type=aws_dynamodb.AttributeType.STRING,
            ),
            read_capacity=dynamodb_read_capacity,
            write_capacity=dynamodb_write_capacity,
            removal_policy=core.RemovalPolicy.DESTROY,
            time_to_live_attribute="ttl",
        )

        metrics_table = aws_dynamodb.Table(
            self,
            "MetricsTable",
            table_name=metrics_table_name,
            partition_key=aws_dynamodb.Attribute(
                name="endpoint_name", type=aws_dynamodb.AttributeType.STRING
            ),
            read_capacity=dynamodb_read_capacity,
            write_capacity=dynamodb_write_capacity,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        # Create lambda layer for "aws-xray-sdk" and latest "boto3"
        xray_layer = aws_lambda.LayerVersion(
            self,
            "XRayLayer",
            code=aws_lambda.AssetCode.from_asset("layers"),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_7],
            description="A layer containing AWS X-Ray SDK for Python",
        )

        # Create Lambda function to read from assignment and metrics table, log metrics
        # 2048MB is ~3% higher than 768 MB, it runs 2.5x faster
        # https://aws.amazon.com/blogs/aws/new-for-aws-lambda-functions-with-up-to-10-gb-of-memory-and-6-vcpus/
        lambda_invoke = aws_lambda.Function(
            self,
            "ApiFunction",
            code=aws_lambda.AssetCode.from_asset("lambda/api"),
            handler="lambda_invoke.lambda_handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(api_lambda_timeout),
            memory_size=api_lambda_memory,
            environment={
                "ASSIGNMENT_TABLE": assignment_table.table_name,
                "METRICS_TABLE": metrics_table.table_name,
                "DELIVERY_STREAM_NAME": delivery_stream_name,
                "DELIVERY_SYNC": "true" if delivery_sync else "false",
                "LOG_LEVEL": log_level,
            },
            layers=[xray_layer],
            tracing=aws_lambda.Tracing.ACTIVE,
        )

        # Grant read/write permissions to assignment and metrics tables
        assignment_table.grant_read_data(lambda_invoke)
        assignment_table.grant_write_data(lambda_invoke)
        metrics_table.grant_read_data(lambda_invoke)

        # Add sagemaker invoke
        lambda_invoke.add_to_role_policy(
            aws_iam.PolicyStatement(
                actions=[
                    "sagemaker:InvokeEndpoint",
                ],
                resources=[
                    "arn:aws:sagemaker:{}:{}:endpoint/{}".format(
                        self.region, self.account, endpoint_filter
                    ),
                ],
            )
        )

        # Create API Gateway for api lambda, which will create an output
        aws_apigateway.LambdaRestApi(
            self,
            "Api",
            rest_api_name=api_name,
            deploy_options=aws_apigateway.StageOptions(stage_name=stage_name),
            proxy=True,
            handler=lambda_invoke,
        )

        # Create lambda function for processing metrics
        lambda_register = aws_lambda.Function(
            self,
            "RegisterFunction",
            code=aws_lambda.AssetCode.from_asset("lambda/api"),
            handler="lambda_register.lambda_handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(metrics_lambda_timeout),
            memory_size=metrics_lambda_memory,
            environment={
                "METRICS_TABLE": metrics_table.table_name,
                "DELIVERY_STREAM_NAME": delivery_stream_name,
                "DELIVERY_SYNC": "true" if delivery_sync else "false",
                "LOG_LEVEL": log_level,
            },
            layers=[xray_layer],
            tracing=aws_lambda.Tracing.ACTIVE,
        )

        # Add write metrics
        metrics_table.grant_write_data(lambda_register)

        # Add sagemaker invoke
        lambda_register.add_to_role_policy(
            aws_iam.PolicyStatement(
                actions=[
                    "sagemaker:DescribeEndpoint",
                ],
                resources=[
                    "arn:aws:sagemaker:{}:{}:endpoint/{}".format(
                        self.region, self.account, endpoint_filter
                    ),
                ],
            )
        )

        # Grant permissions to the service catalog use role
        service_catalog_role = aws_iam.Role.from_role_arn(
            self,
            "RegisterRole",
            f"arn:{self.partition}:iam::{self.account}:role/service-role/AmazonSageMakerServiceCatalogProductsUseRole",
        )
        lambda_register.grant_invoke(service_catalog_role)

        # Return the register lambda function as output
        core.CfnOutput(self, "RegisterLambda", value=lambda_register.function_name)

        # Get cloudwatch put metrics policy ()
        cloudwatch_metric_policy = aws_iam.PolicyStatement(
            actions=["cloudwatch:PutMetricData"], resources=["*"]
        )

        # If we are only using sync delivery, don't require firehose or s3 buckets
        if delivery_sync:
            metrics_table.grant_write_data(lambda_invoke)
            lambda_invoke.add_to_role_policy(cloudwatch_metric_policy)
            print("# No Firehose")
            return

        # Add kinesis stream logging
        lambda_invoke.add_to_role_policy(
            aws_iam.PolicyStatement(
                actions=[
                    "firehose:PutRecord",
                ],
                resources=[
                    "arn:aws:firehose:{}:{}:deliverystream/{}".format(
                        self.region, self.account, delivery_stream_name
                    ),
                ],
            )
        )

        # Create s3 bucket for event logging (name must be < 63 chars)
        s3_logs = aws_s3.Bucket(
            self,
            "S3Logs",
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        firehose_role = aws_iam.Role(
            self,
            "KinesisFirehoseRole",
            assumed_by=aws_iam.ServicePrincipal("firehose.amazonaws.com"),
        )

        firehose_role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=[
                    "s3:AbortMultipartUpload",
                    "s3:GetBucketLocation",
                    "s3:GetObject",
                    "s3:ListBucket",
                    "s3:ListBucketMultipartUploads",
                    "s3:PutObject",
                ],
                resources=[s3_logs.bucket_arn, f"{s3_logs.bucket_arn}/*"],
            )
        )

        # Create LogGroup and Stream, and add permissions to role
        firehose_log_group = aws_logs.LogGroup(self, "FirehoseLogGroup")
        firehose_log_stream = firehose_log_group.add_stream(log_stream_name)

        firehose_role.add_to_policy(
            aws_iam.PolicyStatement(
                actions=[
                    "logs:PutLogEvents",
                ],
                resources=[
                    f"arn:{self.partition}:logs:{self.region}:{self.account}:log-group:{firehose_log_group.log_group_name}:log-stream:{firehose_log_stream.log_stream_name}",
                ],
            )
        )

        # Creat the firehose delivery stream with s3 destination
        aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "KensisLogs",
            delivery_stream_name=delivery_stream_name,
            s3_destination_configuration=aws_kinesisfirehose.CfnDeliveryStream.S3DestinationConfigurationProperty(
                bucket_arn=s3_logs.bucket_arn,
                compression_format="GZIP",
                role_arn=firehose_role.role_arn,
                prefix=f"{stage_name}/",
                cloud_watch_logging_options=aws_kinesisfirehose.CfnDeliveryStream.CloudWatchLoggingOptionsProperty(
                    enabled=True,
                    log_group_name=firehose_log_group.log_group_name,
                    log_stream_name=firehose_log_stream.log_stream_name,
                ),
                buffering_hints=aws_kinesisfirehose.CfnDeliveryStream.BufferingHintsProperty(
                    interval_in_seconds=firehose_interval,
                    size_in_m_bs=firehose_mb_size,
                ),
            ),
        )

        # Create lambda function for processing metrics
        lambda_metrics = aws_lambda.Function(
            self,
            "MetricsFunction",
            code=aws_lambda.AssetCode.from_asset("lambda/api"),
            handler="lambda_metrics.lambda_handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(metrics_lambda_timeout),
            memory_size=metrics_lambda_memory,
            environment={
                "METRICS_TABLE": metrics_table.table_name,
                "DELIVERY_STREAM_NAME": delivery_stream_name,
                "LOG_LEVEL": log_level,
            },
            layers=[xray_layer],
            tracing=aws_lambda.Tracing.ACTIVE,
        )

        # Add write metrics for dynamodb table
        metrics_table.grant_write_data(lambda_metrics)

        # Add put metrics for cloudwatch
        lambda_metrics.add_to_role_policy(cloudwatch_metric_policy)

        # Allow metrics to read form S3 and write to DynamoDB
        s3_logs.grant_read(lambda_metrics)

        # Create S3 logs notification for processing lambda
        notification = aws_s3_notifications.LambdaDestination(lambda_metrics)
        s3_logs.add_event_notification(aws_s3.EventType.OBJECT_CREATED, notification)
コード例 #30
0
ファイル: faropt_stack.py プロジェクト: chriscoombs/faropt
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Build and push faropt container
        dockercontainer = ecs.ContainerImage.from_asset(
            directory='Dockerstuff', build_args=['-t faropt .'])

        # Create vpc
        vpc = ec2.Vpc(self, 'MyVpc', max_azs=3)  # default is all AZs in region
        subnets = vpc.private_subnets

        # Create log groups for workers
        w_logs = logs.LogGroup(self,
                               'faroptlogGroup',
                               log_group_name='faroptlogGroup')

        # #Create role for ECS
        nRole = iam.Role(self,
                         'ECSExecutionRole',
                         assumed_by=iam.ServicePrincipal('ecs-tasks'))

        nPolicy = iam.Policy(
            self,
            "ECSExecutionPolicy",
            policy_name="ECSExecutionPolicy",
            statements=[
                iam.PolicyStatement(actions=[
                    'ecr:BatchCheckLayerAvailability',
                    'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage',
                    'ecr:GetAuthorizationToken', 'logs:CreateLogStream',
                    'logs:PutLogEvents', 'sagemaker:*', 's3:*',
                    'cloudwatch:PutMetricData'
                ],
                                    resources=[
                                        '*',
                                    ]),
            ]).attach_to_role(nRole)

        # Create ECS cluster
        cluster = ecs.Cluster(self,
                              'FarOptCluster',
                              vpc=vpc,
                              cluster_name='FarOptCluster')

        nspace = cluster.add_default_cloud_map_namespace(
            name='local-faropt', type=sd.NamespaceType.DNS_PRIVATE, vpc=vpc)

        # create s3 bucket

        s3 = _s3.Bucket(self, "s3bucket")
        s3async = _s3.Bucket(self, "s3async")

        #
        pkey1 = ddb.Attribute(name='jobid', type=ddb.AttributeType.STRING)
        jobtable = ddb.Table(self,
                             "FaroptJobTable",
                             table_name='FaroptJobTable',
                             partition_key=pkey1)  #,

        pkey2 = ddb.Attribute(name='recipeid', type=ddb.AttributeType.STRING)
        recipetable = ddb.Table(self,
                                "FaroptRecipeTable",
                                table_name='FaroptRecipeTable',
                                partition_key=pkey2)  #,
        # billing_mode=None, encryption=None, encryption_key=None,
        # point_in_time_recovery=None, read_capacity=None, removal_policy=None,
        # replication_regions=None, server_side_encryption=None,
        # sort_key=None, stream=None, time_to_live_attribute=None, write_capacity=None)

        # -------------------- Add worker task ------------------------

        faroptTask = ecs.TaskDefinition(
            self,
            'taskDefinitionScheduler',
            cpu='4096',
            memory_mib='16384',
            network_mode=ecs.NetworkMode.AWS_VPC,
            placement_constraints=None,
            execution_role=nRole,
            family='Faropt-Scheduler',
            task_role=nRole,
            compatibility=ecs.Compatibility.FARGATE)

        faroptTask.add_container('FarOptImage',
                                 image=dockercontainer,
                                 cpu=4096,
                                 memory_limit_mib=16384,
                                 memory_reservation_mib=16384,
                                 environment={'s3bucket': s3.bucket_name},
                                 logging=ecs.LogDriver.aws_logs(
                                     stream_prefix='faroptlogs',
                                     log_group=w_logs))

        # ------------------------------------------------------
        # Try to trigger a fargate task from Lambda on S3 trigger

        # create lambda function
        function = _lambda.Function(self,
                                    "lambda_function",
                                    runtime=_lambda.Runtime.PYTHON_3_7,
                                    handler="lambda-handler.main",
                                    code=_lambda.Code.asset("./lambda"),
                                    environment={
                                        'cluster_name': cluster.cluster_name,
                                        'launch_type': 'FARGATE',
                                        'task_definition':
                                        faroptTask.to_string(),
                                        'task_family': faroptTask.family,
                                        'subnet1': subnets[0].subnet_id,
                                        'subnet2': subnets[-1].subnet_id,
                                        'bucket': s3.bucket_name
                                    },
                                    initial_policy=[
                                        iam.PolicyStatement(actions=[
                                            'ecs:RunTask',
                                            'ecs:PutAccountSetting', 's3:*',
                                            'iam:PassRole'
                                        ],
                                                            resources=['*'])
                                    ])

        # create s3 notification for lambda function
        notification = aws_s3_notifications.LambdaDestination(function)

        # assign notification for the s3 event type (ex: OBJECT_CREATED)
        s3.add_event_notification(_s3.EventType.OBJECT_CREATED, notification)

        # Lambda opt function with layer

        # 1- create layer

        layercode2 = _lambda.Code.from_asset(
            path="./layers/orblacknp.zip")  # adding np to the layer
        layer2 = _lambda.LayerVersion(self, id="layer2", code=layercode2)

        # 2- create function
        function2 = _lambda.Function(self,
                                     "lambda_function2",
                                     runtime=_lambda.Runtime.PYTHON_3_7,
                                     handler="lambda-handler.main",
                                     code=_lambda.Code.asset("./lambda2"),
                                     environment={
                                         'cluster_name': cluster.cluster_name,
                                         'launch_type': 'FARGATE',
                                         'task_definition':
                                         faroptTask.to_string(),
                                         'task_family': faroptTask.family,
                                         'subnet1': subnets[0].subnet_id,
                                         'subnet2': subnets[-1].subnet_id,
                                         'bucket': s3.bucket_name
                                     },
                                     timeout=core.Duration.seconds(900),
                                     memory_size=10240,
                                     layers=[layer2],
                                     initial_policy=[
                                         iam.PolicyStatement(actions=[
                                             'ecs:RunTask',
                                             'ecs:PutAccountSetting', 's3:*',
                                             'iam:PassRole',
                                             'cloudwatch:PutMetricData'
                                         ],
                                                             resources=['*'])
                                     ])

        # Lambda API resolver with faropt layer
        # 1- create layer

        layercode3 = _lambda.Code.from_asset(
            path="./layers/faroptlayer.zip")  # adding np to the layer
        layer3 = _lambda.LayerVersion(self, id="layer3", code=layercode3)

        # 2- create function
        function3 = _lambda.Function(
            self,
            "lambda_function3",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="lambda-handler.lambda_handler",
            code=_lambda.Code.asset("./lambda3"),
            environment={
                'cluster_name': cluster.cluster_name,
                'launch_type': 'FARGATE',
                'task_definition': faroptTask.to_string(),
                'task_family': faroptTask.family,
                'subnet1': subnets[0].subnet_id,
                'subnet2': subnets[-1].subnet_id,
                'bucket': s3.bucket_name
            },
            timeout=core.Duration.seconds(120),
            memory_size=2048,
            layers=[layer3],
            initial_policy=[
                iam.PolicyStatement(actions=[
                    'ecs:RunTask', 'ecs:PutAccountSetting', 's3:*',
                    'iam:PassRole', 'cloudwatch:PutMetricData', 'ecr:*',
                    'dynamodb:*', "cloudformation:Describe*",
                    "cloudformation:Get*", "cloudformation:List*",
                    "logs:CreateLogStream", "logs:PutLogEvents"
                ],
                                    resources=['*'])
            ])

        # OUTPUTS
        core.CfnOutput(self,
                       's3output',
                       value=s3.bucket_name,
                       export_name='bucket')
        core.CfnOutput(self,
                       'jobtable',
                       value=jobtable.table_name,
                       export_name='jobtable')
        core.CfnOutput(self,
                       'recipetable',
                       value=recipetable.table_name,
                       export_name='recipetable')
        core.CfnOutput(self,
                       's3asyncoutput',
                       value=s3async.bucket_name,
                       export_name='asyncbucket')
        core.CfnOutput(self,
                       'lambdaopt',
                       value=function2.function_name,
                       export_name='lambdaopt')