Exemple #1
0
    def __init__(self, scope: core.Construct, id: str):
        super().__init__(scope, id)
        self.stack_name = core.Stack.of(self).stack_name

        self.download_dlq = sqs.DeadLetterQueue(
            max_receive_count=2,
            queue=sqs.Queue(
                self,
                "DownloadDeadLetterQueue",
                queue_name=f"{self.stack_name}-{names.DOWNLOAD_DLQ}",
                retention_period=core.Duration.days(14),
            ),
        )
        self.download_queue = sqs.Queue(
            self,
            "DownloadQueue",
            queue_name=f"{self.stack_name}-{names.DOWNLOAD_QUEUE}",
            retention_period=core.Duration.days(14),
            visibility_timeout=core.Duration.seconds(300),
            dead_letter_queue=self.download_dlq,
        )

        self.upload_dlq = sqs.DeadLetterQueue(
            max_receive_count=2,
            queue=sqs.Queue(
                self,
                "UploadDeadLetterQueue",
                queue_name=f"{self.stack_name}-{names.UPLOAD_DLQ}",
                retention_period=core.Duration.days(14),
            ),
        )

        self.upload_queue = sqs.Queue(
            self,
            "UploadQueue",
            queue_name=f"{self.stack_name}-{names.UPLOAD_QUEUE}",
            retention_period=core.Duration.days(14),
            visibility_timeout=core.Duration.seconds(300),
            content_based_deduplication=True,
            fifo=True,
            dead_letter_queue=self.upload_dlq,
        )

        # note: you can't simply use the `queue_name` attribute of the
        # construct objects here: cdk/cfn will complain about
        # 'The Name field of Export must not depend on any resources'
        queues = {
            names.DOWNLOAD_QUEUE: self.download_queue,
            names.DOWNLOAD_DLQ: self.download_dlq.queue,
            names.UPLOAD_QUEUE: self.upload_queue,
            names.UPLOAD_DLQ: self.upload_dlq.queue,
        }
        for name, queue in queues.items():
            core.CfnOutput(
                self,
                f"{name}-queue-export",
                export_name=f"{self.stack_name}-{name.replace('.', '-')}-url",
                value=queue.queue_url,
            )
    def __init__(self, scope: core.Construct, _id: str, bucket_para, **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        self.ddb_file_list = ddb.Table(self, "ddb",
                                       table_name=table_queue_name,
                                       partition_key=ddb.Attribute(name="Key", type=ddb.AttributeType.STRING),
                                       billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        self.sqs_queue_DLQ = sqs.Queue(self, "sqs_DLQ",
                                       queue_name=table_queue_name + "-DLQ",
                                       visibility_timeout=core.Duration.hours(1),
                                       retention_period=core.Duration.days(14)
                                       )
        self.sqs_queue = sqs.Queue(self, "sqs_queue",
                                   queue_name=table_queue_name,
                                   visibility_timeout=core.Duration.hours(1),
                                   retention_period=core.Duration.days(14),
                                   dead_letter_queue=sqs.DeadLetterQueue(
                                       max_receive_count=24,
                                       queue=self.sqs_queue_DLQ
                                   )
                                   )
        self.ssm_bucket_para = ssm.StringParameter(self, "para-bucket",
                                                   string_value=json.dumps(bucket_para),
                                                   parameter_name=ssm_parameter_bucket
                                                   )

        # You need to manually setup ssm_credential_para in SSM Parameter Store before deploy CDK
        # Here import ssm_credential_para, MIND THE VERSION NUMBER MUST BE EXACT THE SAME !!!
        # 你需要先手工配置了一个ssm_credential_para,然后在这里导入,注意版本号一致!!!
        self.ssm_credential_para = ssm.StringParameter.from_secure_string_parameter_attributes(
            self, "ssm_parameter_credentials",
            parameter_name=ssm_parameter_credentials,
            version=2
        )
Exemple #3
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 queue_context: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        q = dict(self.node.try_get_context(queue_context))

        queue_dlq = _sqs.Queue(self,
                               q["queue_dlq_name"],
                               queue_name=q["queue_dlq_name"])

        queue = _sqs.Queue(
            self,
            q["queue_name"],
            queue_name=q["queue_name"],
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=q["queue_dlq_max_receive_count"],
                queue=queue_dlq),
            encryption=_sqs.QueueEncryption.KMS_MANAGED,
            visibility_timeout=Duration.seconds(30),
            delivery_delay=Duration.seconds(15),
            retention_period=Duration.hours(14),
        )

        self.queue = queue
        self.queue_dlq = queue_dlq

        # Outputs

        core.CfnOutput(self, "QueueUrl", value=queue.queue_url)
Exemple #4
0
    def __init__(self, scope: core.Construct, _id: str, bucket_para,
                 **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        self.ddb_file_list = ddb.Table(
            self,
            "s3_migrate_ddb",
            partition_key=ddb.Attribute(name="Key",
                                        type=ddb.AttributeType.STRING),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        self.sqs_queue_DLQ = sqs.Queue(
            self,
            "s3_migrate_sqs_DLQ",
            visibility_timeout=core.Duration.hours(1),
            retention_period=core.Duration.days(14))
        self.sqs_queue = sqs.Queue(self,
                                   "s3_migrate_sqs_queue",
                                   visibility_timeout=core.Duration.hours(1),
                                   retention_period=core.Duration.days(14),
                                   dead_letter_queue=sqs.DeadLetterQueue(
                                       max_receive_count=24,
                                       queue=self.sqs_queue_DLQ))
        self.ssm_bucket_para = ssm.StringParameter(self,
                                                   "s3_migrate_bucket_para",
                                                   string_value=json.dumps(
                                                       bucket_para, indent=4))

        # You need to manually setup ssm_credential_para in SSM Parameter Store before deploy CDK
        # Here import ssm_credential_para, MIND THE VERSION NUMBER MUST BE EXACT THE SAME !!!
        # 你需要先手工配置了一个ssm_credential_para,然后在这里导入,注意版本号一致!!!
        self.ssm_credential_para = ssm.StringParameter.from_secure_string_parameter_attributes(
            self,
            "ssm_parameter_credentials",
            parameter_name=ssm_parameter_credentials,
            version=1)

        # New a S3 bucket, new object in this bucket will trigger SQS jobs
        # This is not for existing S3 bucket. Jobsender will scan the existing bucket and create sqs jobs.
        # 这里新建一个S3 bucket,里面新建Object就会触发SQS启动搬迁工作。
        # 对于现有的S3 bucket,不在这里配置,由jobsender进行扫描并生成SQS Job任务。
        self.s3bucket = s3.Bucket(self, "s3_migrate_bucket")
        self.s3bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED, s3n.SqsDestination(self.sqs_queue))

        # Deploy code
        self.s3_deploy = s3.Bucket(self, "s3_migrate_deploybucket")
        s3d.BucketDeployment(self,
                             "deploy_code",
                             sources=[s3d.Source.asset("./code")],
                             destination_bucket=self.s3_deploy)

        core.CfnOutput(self,
                       'NewS3Bucket_MigrateObjects',
                       value=self.s3bucket.bucket_name)
        core.CfnOutput(self,
                       'NewS3Bucket_deploy_code',
                       value=self.s3_deploy.bucket_name)
Exemple #5
0
    def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        ddb_file_list = ddb.Table(self, "ddb",
                                  partition_key=ddb.Attribute(name="Key", type=ddb.AttributeType.STRING),
                                  billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        sqs_queue_DLQ = sqs.Queue(self, "sqs_DLQ",
                                  visibility_timeout=core.Duration.minutes(15),
                                  retention_period=core.Duration.days(14)
                                  )
        sqs_queue = sqs.Queue(self, "sqs_queue",
                              visibility_timeout=core.Duration.minutes(15),
                              retention_period=core.Duration.days(14),
                              dead_letter_queue=sqs.DeadLetterQueue(
                                  max_receive_count=100,
                                  queue=sqs_queue_DLQ
                              )
                              )
        handler = lam.Function(self, "lambdaFunction",
                               code=lam.Code.asset("./lambda"),
                               handler="lambda_function.lambda_handler",
                               runtime=lam.Runtime.PYTHON_3_8,
                               memory_size=1024,
                               timeout=core.Duration.minutes(15),
                               environment={
                                   'table_name': ddb_file_list.table_name,
                                   'queue_name': sqs_queue.queue_name,
                                   'Des_bucket_default': Des_bucket_default,
                                   'Des_prefix_default': Des_prefix_default,
                                   'Des_region': Des_region,
                                   'StorageClass': StorageClass,
                                   'aws_access_key_id': aws_access_key_id,
                                   'aws_secret_access_key': aws_secret_access_key
                               })
        ddb_file_list.grant_read_write_data(handler)
        handler.add_event_source(SqsEventSource(sqs_queue))

        s3bucket = s3.Bucket(self, "s3bucket")
        s3bucket.grant_read(handler)
        s3bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                        s3n.SqsDestination(sqs_queue))

        # You can import an existing bucket and grant access to lambda
        # exist_s3bucket = s3.Bucket.from_bucket_name(self, "import_bucket",
        #                                             bucket_name="you_bucket_name")
        # exist_s3bucket.grant_read(handler)

        # But You have to add sqs as imported bucket event notification manually, it doesn't support by CloudFormation
        # An work around is to add on_cloud_trail_event for the bucket, but will trigger could_trail first
        # 因为是导入的Bucket,需要手工建Bucket Event Trigger SQS,以及设置SQS允许该bucekt触发的Permission

        core.CfnOutput(self, "DynamoDB_Table", value=ddb_file_list.table_name)
        core.CfnOutput(self, "SQS_Job_Queue", value=sqs_queue.queue_name)
        core.CfnOutput(self, "SQS_Job_Queue_DLQ", value=sqs_queue_DLQ.queue_name)
        core.CfnOutput(self, "Worker_Lambda_Function", value=handler.function_name)
        core.CfnOutput(self, "New_S3_Bucket", value=s3bucket.bucket_name)
Exemple #6
0
    def add_endpoint(self, bucket: s3.Bucket, fn: Function):
        # create the queue
        queue = sqs.Queue(self,
                          f'{fn.id_prefix}Queue',
                          dead_letter_queue=sqs.DeadLetterQueue(
                              max_receive_count=5,
                              queue=sqs.Queue(
                                  self,
                                  f'{fn.id_prefix}DLQ',
                                  queue_name=f'{fn.queue_name}-dlq')),
                          queue_name=fn.queue_name)

        # create the receiver function
        # add the queue url as an environment variable
        receiver_function = lambda_.Function(
            self,
            f'{fn.id_prefix}ReceiverFunction',
            code=fn.function_code,
            environment={'QUEUE_URL': queue.queue_url},
            function_name=f'{fn.function_name_prefix}-receiver',
            handler=fn.receiver_function_handler,
            layers=[fn.function_dependencies_layer],
            # memory_size=256,
            runtime=lambda_.Runtime.PYTHON_3_8)

        # allow the receiver function to enqueue messages
        queue.grant_send_messages(receiver_function)

        # route requests to the receiver lambda
        self.api.add_routes(integration=apigw.LambdaProxyIntegration(
            handler=receiver_function),
                            methods=[fn.api_method],
                            path=fn.api_path)

        # create the handler function
        # add the bucket name as an environment variable
        handler_function = lambda_.Function(
            self,
            f'{fn.id_prefix}HandlerFunction',
            code=fn.function_code,
            environment={'BUCKET_NAME': bucket.bucket_name},
            function_name=f'{fn.function_name_prefix}-handler',
            handler=fn.handler_function_handler,
            layers=[fn.function_dependencies_layer],
            # memory_size=256,
            runtime=lambda_.Runtime.PYTHON_3_8)

        # add the queue as a trigger for the handler function
        handler_function.add_event_source(SqsEventSource(queue))

        # allow the handler function to access the bucket
        bucket.grant_read_write(handler_function)
Exemple #7
0
    def __init__(self, scope: core.Construct, construct_id: str, env,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        # Create my-queue
        my_dl_queue = sqs.Queue(self,
                                id="SQSTestDLQueueMovingMsg",
                                queue_name="my_queue_dl_test")
        my_queue = sqs.Queue(self,
                             id="SQSTestMovingMsg",
                             queue_name="my_queue_test",
                             dead_letter_queue=sqs.DeadLetterQueue(
                                 max_receive_count=1, queue=my_dl_queue))
Exemple #8
0
    def __init__(self, scope: core.Construct, _id: str, bucket_para,
                 **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        self.ddb_file_list = ddb.Table(
            self,
            "ddb",
            table_name=table_queue_name,
            partition_key=ddb.Attribute(name="Key",
                                        type=ddb.AttributeType.STRING),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        self.sqs_queue_DLQ = sqs.Queue(
            self,
            "sqs_DLQ",
            queue_name=table_queue_name + "-DLQ",
            visibility_timeout=core.Duration.hours(1),
            retention_period=core.Duration.days(14))
        self.sqs_queue = sqs.Queue(self,
                                   "sqs_queue",
                                   queue_name=table_queue_name,
                                   visibility_timeout=core.Duration.hours(1),
                                   retention_period=core.Duration.days(14),
                                   dead_letter_queue=sqs.DeadLetterQueue(
                                       max_receive_count=24,
                                       queue=self.sqs_queue_DLQ))
        self.ssm_bucket_para = ssm.StringParameter(
            self,
            "para-bucket",
            string_value=json.dumps(bucket_para),
            parameter_name=ssm_parameter_bucket)

        # 你需要先手工配置了一个ssm_credential_para,然后在这里导入,注意版本号一致!!!
        self.ssm_credential_para = ssm.StringParameter.from_secure_string_parameter_attributes(
            self,
            "ssm_parameter_credentials",
            parameter_name=ssm_parameter_credentials,
            version=2)

        # 这里新建一个S3 bucket,里面新建Object就会触发SQS启动搬迁工作。
        # 对于现有的S3 bucket,不在这里配置,由jobsender进行扫描并生成SQS Job任务。
        self.s3bucket = s3.Bucket(self, "newbucket")
        self.s3bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED, s3n.SqsDestination(self.sqs_queue))
    def __init__(self, scope: core.Construct, id: str, publisher_lambda,
                 suscriber_lambda, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue_fail = sqs.Queue(self,
                               "queue_fail",
                               visibility_timeout=core.Duration.seconds(20))
        dlq = sqs.DeadLetterQueue(max_receive_count=100, queue=queue_fail)
        self.queue = sqs.Queue(self,
                               "base_queue",
                               visibility_timeout=core.Duration.seconds(20),
                               dead_letter_queue=dlq)

        #Configuramos la lambda para que reciba los mensajes de la cola
        self.queue.grant_consume_messages(suscriber_lambda)
        event_source = aws_lambda_event_sources.SqsEventSource(self.queue,
                                                               batch_size=1)
        suscriber_lambda.add_event_source(event_source)

        #la lambda que publica mensajes
        self.queue.grant_send_messages(publisher_lambda)
Exemple #10
0
    def __init__(self, scope: core.Construct, id: str, stage: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.base_platform = BasePlatform(self, self.stack_name, stage)

        queue = aws_sqs.Queue(self, "{}-queue".format(self.stack_name))

        dlqueue = aws_sqs.DeadLetterQueue(max_receive_count=5, queue=queue)

        # Create QueueProcessingEc2Service
        ecs_service = aws_ecs_patterns.QueueProcessingEc2Service(
            self,
            "{}-svc".format(self.stack_name),
            cluster=self.base_platform.ecs_cluster,
            image=aws_ecs.ContainerImage.from_registry(
                "amazon/amazon-ecs-sample"),
            desired_task_count=0,
            max_scaling_capacity=10,
            memory_reservation_mib=1024,
            cpu=512,
            queue=queue,
        )
Exemple #11
0
    def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        # Setup SSM parameter of credentials, bucket_para, ignore_list
        ssm_credential_para = ssm.StringParameter.from_secure_string_parameter_attributes(
            self,
            "ssm_parameter_credentials",
            parameter_name=ssm_parameter_credentials,
            version=1)

        ssm_bucket_para = ssm.StringParameter(self,
                                              "s3bucket_serverless",
                                              string_value=json.dumps(
                                                  bucket_para, indent=4))

        ssm_parameter_ignore_list = ssm.StringParameter(
            self, "s3_migrate_ignore_list", string_value=ignore_list)

        # Setup DynamoDB
        ddb_file_list = ddb.Table(self,
                                  "s3migrate_serverless",
                                  partition_key=ddb.Attribute(
                                      name="Key",
                                      type=ddb.AttributeType.STRING),
                                  billing_mode=ddb.BillingMode.PAY_PER_REQUEST)
        ddb_file_list.add_global_secondary_index(
            partition_key=ddb.Attribute(name="desBucket",
                                        type=ddb.AttributeType.STRING),
            index_name="desBucket-index",
            projection_type=ddb.ProjectionType.INCLUDE,
            non_key_attributes=["desKey", "versionId"])

        # Setup SQS
        sqs_queue_DLQ = sqs.Queue(self,
                                  "s3migrate_serverless_Q_DLQ",
                                  visibility_timeout=core.Duration.minutes(15),
                                  retention_period=core.Duration.days(14))
        sqs_queue = sqs.Queue(self,
                              "s3migrate_serverless_Q",
                              visibility_timeout=core.Duration.minutes(15),
                              retention_period=core.Duration.days(14),
                              dead_letter_queue=sqs.DeadLetterQueue(
                                  max_receive_count=60, queue=sqs_queue_DLQ))

        # Setup API for Lambda to get IP address (for debug networking routing purpose)
        checkip = api.RestApi(
            self,
            "lambda-checkip-api",
            cloud_watch_role=True,
            deploy=True,
            description="For Lambda get IP address",
            default_integration=api.MockIntegration(
                integration_responses=[
                    api.IntegrationResponse(status_code="200",
                                            response_templates={
                                                "application/json":
                                                "$context.identity.sourceIp"
                                            })
                ],
                request_templates={"application/json": '{"statusCode": 200}'}),
            endpoint_types=[api.EndpointType.REGIONAL])
        checkip.root.add_method("GET",
                                method_responses=[
                                    api.MethodResponse(
                                        status_code="200",
                                        response_models={
                                            "application/json":
                                            api.Model.EMPTY_MODEL
                                        })
                                ])

        # Setup Lambda functions
        handler = lam.Function(self,
                               "s3-migrate-worker",
                               code=lam.Code.asset("./lambda"),
                               handler="lambda_function_worker.lambda_handler",
                               runtime=lam.Runtime.PYTHON_3_8,
                               memory_size=1024,
                               timeout=core.Duration.minutes(15),
                               tracing=lam.Tracing.ACTIVE,
                               environment={
                                   'table_queue_name':
                                   ddb_file_list.table_name,
                                   'Des_bucket_default': Des_bucket_default,
                                   'Des_prefix_default': Des_prefix_default,
                                   'StorageClass': StorageClass,
                                   'checkip_url': checkip.url,
                                   'ssm_parameter_credentials':
                                   ssm_parameter_credentials,
                                   'JobType': JobType,
                                   'MaxRetry': MaxRetry,
                                   'MaxThread': MaxThread,
                                   'MaxParallelFile': MaxParallelFile,
                                   'JobTimeout': JobTimeout,
                                   'UpdateVersionId': UpdateVersionId,
                                   'GetObjectWithVersionId':
                                   GetObjectWithVersionId
                               })

        handler_jobsender = lam.Function(
            self,
            "s3-migrate-jobsender",
            code=lam.Code.asset("./lambda"),
            handler="lambda_function_jobsender.lambda_handler",
            runtime=lam.Runtime.PYTHON_3_8,
            memory_size=1024,
            timeout=core.Duration.minutes(15),
            tracing=lam.Tracing.ACTIVE,
            environment={
                'table_queue_name': ddb_file_list.table_name,
                'StorageClass': StorageClass,
                'checkip_url': checkip.url,
                'sqs_queue': sqs_queue.queue_name,
                'ssm_parameter_credentials': ssm_parameter_credentials,
                'ssm_parameter_ignore_list':
                ssm_parameter_ignore_list.parameter_name,
                'ssm_parameter_bucket': ssm_bucket_para.parameter_name,
                'JobType': JobType,
                'MaxRetry': MaxRetry,
                'JobsenderCompareVersionId': JobsenderCompareVersionId
            })

        # Allow lambda read/write DDB, SQS
        ddb_file_list.grant_read_write_data(handler)
        ddb_file_list.grant_read_write_data(handler_jobsender)
        sqs_queue.grant_send_messages(handler_jobsender)
        # SQS trigger Lambda worker
        handler.add_event_source(SqsEventSource(sqs_queue, batch_size=1))

        # Option1: Create S3 Bucket, all new objects in this bucket will be transmitted by Lambda Worker
        s3bucket = s3.Bucket(self, "s3_new_migrate")
        s3bucket.grant_read(handler)
        s3bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                        s3n.SqsDestination(sqs_queue))

        # Option2: Allow Exist S3 Buckets to be read by Lambda functions.
        # Lambda Jobsender will scan and compare the these buckets and trigger Lambda Workers to transmit
        bucket_name = ''
        for b in bucket_para:
            if bucket_name != b['src_bucket']:  # 如果列了多个相同的Bucket,就跳过
                bucket_name = b['src_bucket']
                s3exist_bucket = s3.Bucket.from_bucket_name(
                    self,
                    bucket_name,  # 用这个做id
                    bucket_name=bucket_name)
                if JobType == 'PUT':
                    s3exist_bucket.grant_read(handler_jobsender)
                    s3exist_bucket.grant_read(handler)
                else:  # 'GET' mode
                    s3exist_bucket.grant_read_write(handler_jobsender)
                    s3exist_bucket.grant_read_write(handler)

        # Allow Lambda read ssm parameters
        ssm_bucket_para.grant_read(handler_jobsender)
        ssm_credential_para.grant_read(handler)
        ssm_credential_para.grant_read(handler_jobsender)
        ssm_parameter_ignore_list.grant_read(handler_jobsender)

        # Schedule cron event to trigger Lambda Jobsender per hour:
        event.Rule(self,
                   'cron_trigger_jobsender',
                   schedule=event.Schedule.rate(core.Duration.hours(1)),
                   targets=[target.LambdaFunction(handler_jobsender)])

        # TODO: Trigger event imediately, add custom resource lambda to invoke handler_jobsender

        # Create Lambda logs filter to create network traffic metric
        handler.log_group.add_metric_filter(
            "Completed-bytes",
            metric_name="Completed-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Complete", bytes, key]'))
        handler.log_group.add_metric_filter(
            "Uploading-bytes",
            metric_name="Uploading-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Uploading", bytes, key]'))
        handler.log_group.add_metric_filter(
            "Downloading-bytes",
            metric_name="Downloading-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Downloading", bytes, key]'))
        handler.log_group.add_metric_filter(
            "MaxMemoryUsed",
            metric_name="MaxMemoryUsed",
            metric_namespace="s3_migrate",
            metric_value="$memory",
            filter_pattern=logs.FilterPattern.literal(
                '[head="REPORT", a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, '
                'a13, a14, a15, a16, memory, MB="MB", rest]'))
        lambda_metric_Complete = cw.Metric(namespace="s3_migrate",
                                           metric_name="Completed-bytes",
                                           statistic="Sum",
                                           period=core.Duration.minutes(1))
        lambda_metric_Upload = cw.Metric(namespace="s3_migrate",
                                         metric_name="Uploading-bytes",
                                         statistic="Sum",
                                         period=core.Duration.minutes(1))
        lambda_metric_Download = cw.Metric(namespace="s3_migrate",
                                           metric_name="Downloading-bytes",
                                           statistic="Sum",
                                           period=core.Duration.minutes(1))
        lambda_metric_MaxMemoryUsed = cw.Metric(
            namespace="s3_migrate",
            metric_name="MaxMemoryUsed",
            statistic="Maximum",
            period=core.Duration.minutes(1))
        handler.log_group.add_metric_filter(
            "ERROR",
            metric_name="ERROR-Logs",
            metric_namespace="s3_migrate",
            metric_value="1",
            filter_pattern=logs.FilterPattern.literal('"ERROR"'))
        handler.log_group.add_metric_filter(
            "WARNING",
            metric_name="WARNING-Logs",
            metric_namespace="s3_migrate",
            metric_value="1",
            filter_pattern=logs.FilterPattern.literal('"WARNING"'))
        # Task timed out
        handler.log_group.add_metric_filter(
            "TIMEOUT",
            metric_name="TIMEOUT-Logs",
            metric_namespace="s3_migrate",
            metric_value="1",
            filter_pattern=logs.FilterPattern.literal('"Task timed out"'))
        log_metric_ERROR = cw.Metric(namespace="s3_migrate",
                                     metric_name="ERROR-Logs",
                                     statistic="Sum",
                                     period=core.Duration.minutes(1))
        log_metric_WARNING = cw.Metric(namespace="s3_migrate",
                                       metric_name="WARNING-Logs",
                                       statistic="Sum",
                                       period=core.Duration.minutes(1))
        log_metric_TIMEOUT = cw.Metric(namespace="s3_migrate",
                                       metric_name="TIMEOUT-Logs",
                                       statistic="Sum",
                                       period=core.Duration.minutes(1))

        # Dashboard to monitor SQS and Lambda
        board = cw.Dashboard(self, "s3_migrate_serverless")

        board.add_widgets(
            cw.GraphWidget(title="Lambda-NETWORK",
                           left=[
                               lambda_metric_Download, lambda_metric_Upload,
                               lambda_metric_Complete
                           ]),
            cw.GraphWidget(title="Lambda-concurrent",
                           left=[
                               handler.metric(
                                   metric_name="ConcurrentExecutions",
                                   period=core.Duration.minutes(1))
                           ]),
            cw.GraphWidget(
                title="Lambda-invocations/errors/throttles",
                left=[
                    handler.metric_invocations(
                        period=core.Duration.minutes(1)),
                    handler.metric_errors(period=core.Duration.minutes(1)),
                    handler.metric_throttles(period=core.Duration.minutes(1))
                ]),
            cw.GraphWidget(
                title="Lambda-duration",
                left=[
                    handler.metric_duration(period=core.Duration.minutes(1))
                ]),
        )

        board.add_widgets(
            cw.GraphWidget(title="Lambda_MaxMemoryUsed(MB)",
                           left=[lambda_metric_MaxMemoryUsed]),
            cw.GraphWidget(title="ERROR/WARNING Logs",
                           left=[log_metric_ERROR],
                           right=[log_metric_WARNING, log_metric_TIMEOUT]),
            cw.GraphWidget(
                title="SQS-Jobs",
                left=[
                    sqs_queue.metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1))
                ]),
            cw.SingleValueWidget(
                title="Running/Waiting and Dead Jobs",
                metrics=[
                    sqs_queue.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue.metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1))
                ],
                height=6))
        # Alarm for queue - DLQ
        alarm_DLQ = cw.Alarm(
            self,
            "SQS_DLQ",
            metric=sqs_queue_DLQ.metric_approximate_number_of_messages_visible(
            ),
            threshold=0,
            comparison_operator=cw.ComparisonOperator.GREATER_THAN_THRESHOLD,
            evaluation_periods=1,
            datapoints_to_alarm=1)
        alarm_topic = sns.Topic(self, "SQS queue-DLQ has dead letter")
        alarm_topic.add_subscription(
            subscription=sub.EmailSubscription(alarm_email))
        alarm_DLQ.add_alarm_action(action.SnsAction(alarm_topic))

        core.CfnOutput(self,
                       "Dashboard",
                       value="CloudWatch Dashboard name s3_migrate_serverless")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        ###########################################################################
        # AWS SECRETS MANAGER - Templated secret
        ###########################################################################
        # templated_secret = aws_secretsmanager.Secret(self, "TemplatedSecret",
        #     generate_secret_string=aws_secretsmanager.SecretStringGenerator(
        #         secret_string_template= "{\"username\":\"cleanbox\"}",
        #         generate_string_key="password"
        #     )
        # )
        ###########################################################################
        # CUSTOM CLOUDFORMATION RESOURCE
        ###########################################################################
        # customlambda = aws_lambda.Function(self,'customconfig',
        # handler='customconfig.on_event',
        # runtime=aws_lambda.Runtime.PYTHON_3_7,
        # code=aws_lambda.Code.asset('customconfig'),
        # )

        # customlambda_statement = aws_iam.PolicyStatement(actions=["events:PutRule"], conditions=None, effect=None, not_actions=None, not_principals=None, not_resources=None, principals=None, resources=["*"], sid=None)
        # customlambda.add_to_role_policy(statement=customlambda_statement)

        # my_provider = cr.Provider(self, "MyProvider",
        #     on_event_handler=customlambda,
        #     # is_complete_handler=is_complete, # optional async "waiter"
        #     log_retention=logs.RetentionDays.SIX_MONTHS
        # )

        # CustomResource(self, 'customconfigresource', service_token=my_provider.service_token)

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        sqs_to_elastic_cloud = aws_lambda.Function(
            self,
            'sqs_to_elastic_cloud',
            handler='sqs_to_elastic_cloud.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('sqs_to_elastic_cloud'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=logs.RetentionDays.ONE_DAY)

        sqs_to_elasticsearch_service = aws_lambda.Function(
            self,
            'sqs_to_elasticsearch_service',
            handler='sqs_to_elasticsearch_service.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('sqs_to_elasticsearch_service'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=logs.RetentionDays.ONE_DAY)

        # sqs_to_elasticsearch_service.add_environment("kinesis_firehose_name", "-")
        # sqs_to_elastic_cloud.add_environment("index_name", "-")

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        # sqs_to_elasticsearch_service_permission = aws_lambda.Permission(*, principal, action=None, event_source_token=None, scope=None, source_account=None, source_arn=None)

        ###########################################################################
        # AMAZON S3 BUCKETS
        ###########################################################################
        access_log_bucket = aws_s3.Bucket(self, "access_log_bucket")
        kinesis_log_bucket = aws_s3.Bucket(self, "kinesis_log_bucket")

        ###########################################################################
        # LAMBDA SUPPLEMENTAL POLICIES
        ###########################################################################
        lambda_supplemental_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:Get*", "s3:Head*", "s3:List*", "firehose:*"],
            resources=["*"])

        sqs_to_elastic_cloud.add_to_role_policy(
            lambda_supplemental_policy_statement)
        sqs_to_elasticsearch_service.add_to_role_policy(
            lambda_supplemental_policy_statement)
        ###########################################################################
        # AWS SNS TOPICS
        ###########################################################################
        access_log_topic = aws_sns.Topic(self, "access_log_topic")

        ###########################################################################
        # ADD AMAZON S3 BUCKET NOTIFICATIONS
        ###########################################################################
        access_log_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,
            aws_s3_notifications.SnsDestination(access_log_topic))

        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        sqs_to_elasticsearch_service_queue_iqueue = aws_sqs.Queue(
            self, "sqs_to_elasticsearch_service_queue_dlq")
        sqs_to_elasticsearch_service_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10,
            queue=sqs_to_elasticsearch_service_queue_iqueue)
        sqs_to_elasticsearch_service_queue = aws_sqs.Queue(
            self,
            "sqs_to_elasticsearch_service_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=sqs_to_elasticsearch_service_queue_dlq)

        sqs_to_elastic_cloud_queue_iqueue = aws_sqs.Queue(
            self, "sqs_to_elastic_cloud_queue_dlq")
        sqs_to_elastic_cloud_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=sqs_to_elastic_cloud_queue_iqueue)
        sqs_to_elastic_cloud_queue = aws_sqs.Queue(
            self,
            "sqs_to_elastic_cloud_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=sqs_to_elastic_cloud_queue_dlq)

        ###########################################################################
        # AWS SNS TOPIC SUBSCRIPTIONS
        ###########################################################################
        access_log_topic.add_subscription(
            aws_sns_subscriptions.SqsSubscription(sqs_to_elastic_cloud_queue))
        access_log_topic.add_subscription(
            aws_sns_subscriptions.SqsSubscription(
                sqs_to_elasticsearch_service_queue))

        ###########################################################################
        # AWS LAMBDA SQS EVENT SOURCE
        ###########################################################################
        sqs_to_elastic_cloud.add_event_source(
            SqsEventSource(sqs_to_elastic_cloud_queue, batch_size=10))
        sqs_to_elasticsearch_service.add_event_source(
            SqsEventSource(sqs_to_elasticsearch_service_queue, batch_size=10))

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN
        ###########################################################################

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN ACCESS POLICY
        ###########################################################################
        this_aws_account = aws_iam.AccountPrincipal(account_id="012345678912")
        # s3_to_elasticsearch_access_logs_domain_access_policy_statement = aws_iam.PolicyStatement(
        #     principals=[this_aws_account],
        #     effect=aws_iam.Effect.ALLOW,
        #     actions=["es:*"],
        #     resources=["*"]
        #     )
        # s3_to_elasticsearch_access_logs_domain_access_policy_statement_list=[]
        # s3_to_elasticsearch_access_logs_domain_access_policy_statement_list.append(s3_to_elasticsearch_access_logs_domain_access_policy_statement)

        s3_to_elasticsearch_access_logs_domain = aws_elasticsearch.Domain(
            self,
            "s3-to-elasticsearch-access-logs-domain",
            # access_policies=s3_to_elasticsearch_access_logs_domain_access_policy_statement_list,
            version=aws_elasticsearch.ElasticsearchVersion.V7_1,
            capacity={
                "master_nodes": 3,
                "data_nodes": 4
            },
            ebs={"volume_size": 100},
            zone_awareness={"availability_zone_count": 2},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            })

        ###########################################################################
        # AMAZON COGNITO USER POOL
        ###########################################################################
        s3_to_elasticsearch_user_pool = aws_cognito.UserPool(
            self,
            "s3-to-elasticsearch-access-logs-pool",
            account_recovery=None,
            auto_verify=None,
            custom_attributes=None,
            email_settings=None,
            enable_sms_role=None,
            lambda_triggers=None,
            mfa=None,
            mfa_second_factor=None,
            password_policy=None,
            self_sign_up_enabled=None,
            sign_in_aliases=aws_cognito.SignInAliases(email=True,
                                                      phone=None,
                                                      preferred_username=None,
                                                      username=True),
            sign_in_case_sensitive=None,
            sms_role=None,
            sms_role_external_id=None,
            standard_attributes=None,
            user_invitation=None,
            user_pool_name=None,
            user_verification=None)

        ###########################################################################
        # AMAZON KINESIS FIREHOSE STREAM
        ###########################################################################
        # kinesis_policy_statement = aws_iam.PolicyStatement(
        #     effect=aws_iam.Effect.ALLOW,
        #     # actions=["es:*", "s3:*", "kms:*", "kinesis:*", "lambda:*"],
        #     actions=["*"],
        #     resources=["*"]
        #     )

        # kinesis_policy_document = aws_iam.PolicyDocument()
        # kinesis_policy_document.add_statements(kinesis_policy_statement)

        kinesis_firehose_stream_role = aws_iam.Role(
            self,
            "BaseVPCIAMLogRole",
            assumed_by=aws_iam.ServicePrincipal('firehose.amazonaws.com'),
            role_name=None,
            inline_policies={
                "AllowLogAccess":
                aws_iam.PolicyDocument(
                    assign_sids=False,
                    statements=[
                        aws_iam.PolicyStatement(actions=[
                            '*', 'es:*', 'logs:PutLogEvents',
                            'logs:DescribeLogGroups',
                            'logs:DescribeLogsStreams'
                        ],
                                                effect=aws_iam.Effect('ALLOW'),
                                                resources=['*'])
                    ])
            })

        RetryOptions = aws_kinesisfirehose.CfnDeliveryStream.ElasticsearchRetryOptionsProperty(
            duration_in_seconds=300)
        s3_configuration = aws_kinesisfirehose.CfnDeliveryStream.S3DestinationConfigurationProperty(
            bucket_arn=kinesis_log_bucket.bucket_arn,
            role_arn=kinesis_firehose_stream_role.role_arn)

        ElasticsearchDestinationConfiguration = aws_kinesisfirehose.CfnDeliveryStream.ElasticsearchDestinationConfigurationProperty(
            # "BufferingHints" : ElasticsearchBufferingHints,
            # "CloudWatchLoggingOptions" : CloudWatchLoggingOptions,
            # "ClusterEndpoint" : String,
            domain_arn=s3_to_elasticsearch_access_logs_domain.domain_arn,
            index_name="s3-to-elasticsearch-accesslogs",
            index_rotation_period="OneDay",
            # "ProcessingConfiguration" : ProcessingConfiguration,
            retry_options=RetryOptions,
            role_arn=kinesis_firehose_stream_role.role_arn,
            # "S3BackupMode" : String,
            s3_configuration=s3_configuration
            # "TypeName" : String
            # "VpcConfiguration" : VpcConfiguration
        )

        kinesis_firehose_stream = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "kinesis_firehose_stream",
            delivery_stream_encryption_configuration_input=None,
            delivery_stream_name=None,
            delivery_stream_type=None,
            elasticsearch_destination_configuration=
            ElasticsearchDestinationConfiguration,
            extended_s3_destination_configuration=None,
            http_endpoint_destination_configuration=None,
            kinesis_stream_source_configuration=None,
            redshift_destination_configuration=None,
            s3_destination_configuration=None,
            splunk_destination_configuration=None,
            tags=None)

        sqs_to_elasticsearch_service.add_environment(
            "FIREHOSE_NAME", kinesis_firehose_stream.ref)
        sqs_to_elasticsearch_service.add_environment(
            "QUEUEURL", sqs_to_elasticsearch_service_queue.queue_url)
        sqs_to_elasticsearch_service.add_environment("DEBUG", "False")

        sqs_to_elastic_cloud.add_environment("ELASTICCLOUD_SECRET_NAME", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_ID", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_PASSWORD", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_USERNAME", "-")
        sqs_to_elastic_cloud.add_environment(
            "QUEUEURL", sqs_to_elastic_cloud_queue.queue_url)
        sqs_to_elastic_cloud.add_environment("DEBUG", "False")
Exemple #13
0
    def __init__(self, app: core.App, id: str) -> None:
        super().__init__(app, id)

        ##################################
        # Resource Property Config
        ##################################

        # see: https://docs.aws.amazon.com/lambda/latest/dg/
        # with-sqs.html#events-sqs-queueconfig
        domain_scan_timeout = 900
        queue_visibility_timeout = 6 * domain_scan_timeout
        # tldextract needs to cache tld list after request and /tmp is writable in Lamdba
        tld_cache = os.path.join('/tmp', '.tld_set')

        ##################################
        # Domain Gatherer Lambda and Queue
        ##################################

        # create queue
        domain_queue = sqs.Queue(
            self,
            'domain-queue',
            visibility_timeout=core.Duration.seconds(queue_visibility_timeout),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=5,
                queue=sqs.Queue(self,
                                'domain-queue-dlq',
                                retention_period=core.Duration.days(5))))

        # create lambda to gather domains
        domain_gatherer_lambda = lambda_.Function(
            self,
            "domain-gatherer",
            code=lambda_.Code.from_asset(
                'lambda-releases/domain-gatherer.zip'),
            handler="handler.main",
            timeout=core.Duration.seconds(600),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=400)
        # set env vars
        domain_gatherer_lambda.add_environment('SQS_URL',
                                               domain_queue.queue_url)
        domain_gatherer_lambda.add_environment('TLDEXTRACT_CACHE', tld_cache)

        # provide lambda with execution role
        domain_gatherer_lambda_exec_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'lambda:InvokeFunction', 'sqs:SendMessage',
                'sqs:DeleteMessage', 'sqs:SendMessageBatch',
                'sqs:SetQueueAttributes', "sqs:GetQueueAttributes",
                "sqs:GetQueueUrl", 'sqs:GetQueueAttributes'
            ],
            resources=[domain_queue.queue_arn])
        domain_gatherer_lambda.add_to_role_policy(
            domain_gatherer_lambda_exec_policy)

        # allow lambda to send messages to queue
        domain_queue.grant_send_messages(domain_gatherer_lambda)

        # create rule to run the lambda every Friday
        rule = events.Rule(
            self,
            "Rule",
            schedule=events.Schedule.cron(minute='0',
                                          hour='18',
                                          month='*',
                                          week_day='FRI',
                                          year='*'),
        )
        rule.add_target(targets.LambdaFunction(domain_gatherer_lambda))

        ##################################
        # Domain Scan Lambda and Results Bucket
        ##################################

        # create lambda to scan domains
        domain_scanner_lambda = lambda_.Function(
            self,
            "domain-scanner",
            code=lambda_.Code.from_asset('lambda-releases/domain-scanner.zip'),
            handler="handler.main",
            timeout=core.Duration.seconds(domain_scan_timeout),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=1000)

        # create sqs event source for domain scan lambda
        domain_scanner_lambda.add_event_source(
            sources.SqsEventSource(domain_queue, batch_size=2))

        # create s3 bucket to put results
        bucket = s3.Bucket(self,
                           'results-bucket',
                           versioned=False,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           block_public_access=s3.BlockPublicAccess(
                               block_public_acls=False,
                               ignore_public_acls=False,
                               block_public_policy=True,
                               restrict_public_buckets=True))

        # grant s3:PUT to the pa11y lambda
        bucket.grant_put(domain_scanner_lambda)

        # set an env var for bucket name
        domain_scanner_lambda.add_environment('BUCKET_NAME',
                                              bucket.bucket_name)

        # create execution role for domain scanner lambda
        domain_scanner_lambda_exec_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'lambda:InvokeFunction', 'sqs:SendMessage',
                'sqs:DeleteMessage', 'sqs:SendMessageBatch',
                'sqs:SetQueueAttributes', "sqs:GetQueueAttributes",
                "sqs:GetQueueUrl", 'sqs:GetQueueAttributes', "s3:PutObject",
                "s3:PutObjectAcl"
            ],
            resources=[bucket.bucket_arn])
        domain_scanner_lambda.add_to_role_policy(
            domain_scanner_lambda_exec_policy)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        ###########################################################################
        # AWS SECRETS MANAGER - Templated secret
        ###########################################################################
        # templated_secret = aws_secretsmanager.Secret(self, "TemplatedSecret",
        #     generate_secret_string=aws_secretsmanager.SecretStringGenerator(
        #         secret_string_template= "{\"username\":\"cleanbox\"}",
        #         generate_string_key="password"
        #     )
        # )
        ###########################################################################
        # CUSTOM CLOUDFORMATION RESOURCE
        ###########################################################################
        # customlambda = aws_lambda.Function(self,'customconfig',
        # handler='customconfig.on_event',
        # runtime=aws_lambda.Runtime.PYTHON_3_7,
        # code=aws_lambda.Code.asset('customconfig'),
        # )

        # customlambda_statement = aws_iam.PolicyStatement(actions=["events:PutRule"], conditions=None, effect=None, not_actions=None, not_principals=None, not_resources=None, principals=None, resources=["*"], sid=None)
        # customlambda.add_to_role_policy(statement=customlambda_statement)

        # my_provider = cr.Provider(self, "MyProvider",
        #     on_event_handler=customlambda,
        #     # is_complete_handler=is_complete, # optional async "waiter"
        #     log_retention=logs.RetentionDays.SIX_MONTHS
        # )

        # CustomResource(self, 'customconfigresource', service_token=my_provider.service_token)

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        sqs_to_elastic_cloud = aws_lambda.Function(
            self,
            'sqs_to_elastic_cloud',
            handler='sqs_to_elastic_cloud.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('sqs_to_elastic_cloud'),
            memory_size=4096,
            timeout=core.Duration.seconds(301),
            log_retention=logs.RetentionDays.ONE_DAY)

        sqs_to_elasticsearch_service = aws_lambda.Function(
            self,
            'sqs_to_elasticsearch_service',
            handler='sqs_to_elasticsearch_service.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('sqs_to_elasticsearch_service'),
            memory_size=4096,
            timeout=core.Duration.seconds(301),
            log_retention=logs.RetentionDays.ONE_DAY)
        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################

        ###########################################################################
        # AMAZON S3 BUCKETS
        ###########################################################################
        cloudtrail_log_bucket = aws_s3.Bucket(self, "cloudtrail_log_bucket")

        ###########################################################################
        # LAMBDA SUPPLEMENTAL POLICIES
        ###########################################################################
        lambda_supplemental_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:Get*", "s3:Head*", "s3:List*", "firehose:*", "es:*"],
            resources=["*"])

        sqs_to_elastic_cloud.add_to_role_policy(
            lambda_supplemental_policy_statement)
        sqs_to_elasticsearch_service.add_to_role_policy(
            lambda_supplemental_policy_statement)
        ###########################################################################
        # AWS SNS TOPICS
        ###########################################################################
        cloudtrail_log_topic = aws_sns.Topic(self, "cloudtrail_log_topic")

        ###########################################################################
        # ADD AMAZON S3 BUCKET NOTIFICATIONS
        ###########################################################################
        cloudtrail_log_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,
            aws_s3_notifications.SnsDestination(cloudtrail_log_topic))

        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        sqs_to_elasticsearch_service_queue_iqueue = aws_sqs.Queue(
            self, "sqs_to_elasticsearch_service_queue_dlq")
        sqs_to_elasticsearch_service_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10,
            queue=sqs_to_elasticsearch_service_queue_iqueue)
        sqs_to_elasticsearch_service_queue = aws_sqs.Queue(
            self,
            "sqs_to_elasticsearch_service_queue",
            visibility_timeout=core.Duration.seconds(300),
            dead_letter_queue=sqs_to_elasticsearch_service_queue_dlq)

        sqs_to_elastic_cloud_queue_iqueue = aws_sqs.Queue(
            self, "sqs_to_elastic_cloud_queue_dlq")
        sqs_to_elastic_cloud_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=sqs_to_elastic_cloud_queue_iqueue)
        sqs_to_elastic_cloud_queue = aws_sqs.Queue(
            self,
            "sqs_to_elastic_cloud_queue",
            visibility_timeout=core.Duration.seconds(300),
            dead_letter_queue=sqs_to_elastic_cloud_queue_dlq)

        ###########################################################################
        # AWS SNS TOPIC SUBSCRIPTIONS
        ###########################################################################
        cloudtrail_log_topic.add_subscription(
            aws_sns_subscriptions.SqsSubscription(sqs_to_elastic_cloud_queue))
        cloudtrail_log_topic.add_subscription(
            aws_sns_subscriptions.SqsSubscription(
                sqs_to_elasticsearch_service_queue))

        ###########################################################################
        # AWS LAMBDA SQS EVENT SOURCE
        ###########################################################################
        sqs_to_elastic_cloud.add_event_source(
            SqsEventSource(sqs_to_elastic_cloud_queue, batch_size=10))
        sqs_to_elasticsearch_service.add_event_source(
            SqsEventSource(sqs_to_elasticsearch_service_queue, batch_size=10))

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN
        ###########################################################################

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN ACCESS POLICY
        ###########################################################################
        this_aws_account = aws_iam.AccountPrincipal(account_id="012345678912")

        s3_to_elasticsearch_cloudtrail_logs_domain = aws_elasticsearch.Domain(
            self,
            "s3-to-elasticsearch-cloudtrail-logs-domain",
            version=aws_elasticsearch.ElasticsearchVersion.V7_1,
            capacity={
                "master_nodes": 3,
                "data_nodes": 4
            },
            ebs={"volume_size": 100},
            zone_awareness={"availability_zone_count": 2},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            })

        ###########################################################################
        # AMAZON COGNITO USER POOL
        ###########################################################################
        s3_to_elasticsearch_user_pool = aws_cognito.UserPool(
            self,
            "s3-to-elasticsearch-cloudtrial-logs-pool",
            account_recovery=None,
            auto_verify=None,
            custom_attributes=None,
            email_settings=None,
            enable_sms_role=None,
            lambda_triggers=None,
            mfa=None,
            mfa_second_factor=None,
            password_policy=None,
            self_sign_up_enabled=None,
            sign_in_aliases=aws_cognito.SignInAliases(email=True,
                                                      phone=None,
                                                      preferred_username=None,
                                                      username=True),
            sign_in_case_sensitive=None,
            sms_role=None,
            sms_role_external_id=None,
            standard_attributes=None,
            user_invitation=None,
            user_pool_name=None,
            user_verification=None)

        sqs_to_elasticsearch_service.add_environment(
            "ELASTICSEARCH_HOST",
            s3_to_elasticsearch_cloudtrail_logs_domain.domain_endpoint)
        sqs_to_elasticsearch_service.add_environment(
            "QUEUEURL", sqs_to_elasticsearch_service_queue.queue_url)
        sqs_to_elasticsearch_service.add_environment("DEBUG", "False")

        sqs_to_elastic_cloud.add_environment("ELASTICCLOUD_SECRET_NAME", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_ID", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_PASSWORD", "-")
        sqs_to_elastic_cloud.add_environment("ELASTIC_CLOUD_USERNAME", "-")
        sqs_to_elastic_cloud.add_environment(
            "QUEUEURL", sqs_to_elastic_cloud_queue.queue_url)
        sqs_to_elastic_cloud.add_environment("DEBUG", "False")

        ###########################################################################
        # AWS COGNITO USER POOL
        ###########################################################################
        allevents_trail = aws_cloudtrail.Trail(
            self,
            "allevents_trail",
            bucket=cloudtrail_log_bucket,
            cloud_watch_log_group=None,
            cloud_watch_logs_retention=None,
            enable_file_validation=None,
            encryption_key=None,
            include_global_service_events=None,
            is_multi_region_trail=True,
            kms_key=None,
            management_events=aws_cloudtrail.ReadWriteType("ALL"),
            s3_key_prefix=None,
            send_to_cloud_watch_logs=False,
            sns_topic=None,
            trail_name=None)
Exemple #15
0
    def __init__(self, app: core.App, id: str) -> None:
        super().__init__(app, id)

        ##################################
        # Lambda Timeouts (seconds) & Queue Redrive
        ##################################

        lambda_gatherer_timeout = 600
        lambda_joiner_timeout = 350
        # pa11y's timeout is set to 50, so the lambda is just a little longer
        lambda_a11y_scan_timeout = 55
        max_receive_count = 2

        ##################################
        # S3 Bucket with Domains
        ##################################

        asset = aws_s3_assets.Asset(
            self, 'domain-list', path=os.path.abspath('./domains/domains.csv'))

        ##################################
        # Domain Gatherer Lambda and Queue
        ##################################

        domain_queue = sqs.Queue(
            self,
            'domain-queue',
            visibility_timeout=core.Duration.seconds(
                (max_receive_count + 1) * lambda_gatherer_timeout),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=max_receive_count,
                queue=sqs.Queue(self,
                                'domain-queue-dlq',
                                retention_period=core.Duration.days(5))))

        lambda_gatherer = lambda_.Function(
            self,
            "domain-gatherer",
            code=lambda_.Code.from_asset('./lambdas/domain_gatherer'),
            handler="handler.main",
            timeout=core.Duration.seconds(lambda_gatherer_timeout),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=150)

        lambda_gatherer.add_environment('SQS_URL', domain_queue.queue_url)
        lambda_gatherer.add_environment('BUCKET_NAME', asset.s3_bucket_name)
        lambda_gatherer.add_environment('OBJECT_KEY', asset.s3_object_key)

        lambda_gatherer_sqs_exec_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'lambda:InvokeFunction', 'sqs:SendMessage',
                'sqs:DeleteMessage', 'sqs:SendMessageBatch',
                'sqs:SetQueueAttributes', 'sqs:GetQueueAttributes',
                'sqs:GetQueueUrl', 'sqs:GetQueueAttributes'
            ],
            resources=[domain_queue.queue_arn])
        lambda_gatherer.add_to_role_policy(lambda_gatherer_sqs_exec_policy)
        domain_queue.grant_send_messages(lambda_gatherer)

        # trigger for 1st and 15th of the month at 18:00 UTC (1pm EST)
        lambda_gatherer_rule = events.Rule(self,
                                           "Lambda Gatherer Rule",
                                           schedule=events.Schedule.cron(
                                               minute='0',
                                               hour='18',
                                               day="1,15",
                                               month='*',
                                               year='*'))
        lambda_gatherer_rule.add_target(
            targets.LambdaFunction(lambda_gatherer))
        asset.grant_read(lambda_gatherer)

        ##################################
        # A11y Scanner Lambda and S3
        ##################################

        layer = lambda_.LayerVersion(
            self,
            'chrome-aws-lambda',
            code=lambda_.Code.from_asset('./lambdas/chrome_aws_lambda.zip'),
            compatible_runtimes=[lambda_.Runtime.NODEJS_12_X],
            description='A layer of chrome-aws-lambda')

        lambda_a11y_scan = lambda_.Function(
            self,
            "a11y-scan",
            code=lambda_.Code.from_asset('./lambdas/a11y_scan'),
            handler="index.handler",
            timeout=core.Duration.seconds(lambda_a11y_scan_timeout),
            runtime=lambda_.Runtime.NODEJS_12_X,
            memory_size=1000,
            layers=[layer])

        lambda_a11y_scan.add_event_source(
            sources.SqsEventSource(domain_queue, batch_size=1))

        # create s3 bucket to put results
        results_bucket = s3.Bucket(self,
                                   'results-bucket',
                                   versioned=False,
                                   removal_policy=core.RemovalPolicy.DESTROY,
                                   block_public_access=s3.BlockPublicAccess(
                                       block_public_acls=True,
                                       ignore_public_acls=True,
                                       block_public_policy=True,
                                       restrict_public_buckets=True),
                                   lifecycle_rules=[
                                       s3.LifecycleRule(
                                           enabled=True,
                                           expiration=core.Duration.days(10))
                                   ])

        lambda_a11y_scan.add_environment('BUCKET_NAME',
                                         results_bucket.bucket_name)
        results_bucket.grant_put(lambda_a11y_scan)

        ##################################
        # Results Joiner Lambda
        ##################################

        # create s3 bucket to put site data
        data_bucket = s3.Bucket(self,
                                'data-bucket',
                                versioned=False,
                                removal_policy=core.RemovalPolicy.DESTROY,
                                block_public_access=s3.BlockPublicAccess(
                                    block_public_acls=True,
                                    ignore_public_acls=True,
                                    block_public_policy=True,
                                    restrict_public_buckets=True))

        lambda_joiner = lambda_.Function(
            self,
            "results-joiner",
            code=lambda_.Code.from_asset(
                './lambda-releases/results_joiner.zip'),
            handler="handler.main",
            timeout=core.Duration.seconds(lambda_joiner_timeout),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=400)
        lambda_joiner.add_environment('DATA_BUCKET_NAME',
                                      data_bucket.bucket_name)
        lambda_joiner.add_environment('RESULTS_BUCKET_NAME',
                                      results_bucket.bucket_name)
        results_bucket.grant_read_write(lambda_joiner)
        data_bucket.grant_read_write(lambda_joiner)

        # trigger for 8th and 23rd of the month at 18:00 UTC (1pm EST)
        lambda_joiner_rule = events.Rule(self,
                                         "Lambda Joiner Rule",
                                         schedule=events.Schedule.cron(
                                             minute='0',
                                             hour='18',
                                             day="8,23",
                                             month='*',
                                             year='*'))
        lambda_joiner_rule.add_target(targets.LambdaFunction(lambda_joiner))
Exemple #16
0
    def __init__(self, scope: core.Construct, stack_id: str, *,
                 env: core.Environment,
                 api_method: apigw.HttpMethod,
                 api_path: str,
                 function_code: lambda_.Code,
                 function_dependencies_layer: lambda_.LayerVersion,
                 function_name_prefix: str,
                 handler_function_handler: str,
                 receiver_function_handler: str,
                 queue_name: str,
                 **kwargs):
        super().__init__(scope, stack_id, env=env, **kwargs)

        # create the queue
        self.queue = sqs.Queue(
            self, 'Queue',
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=1,
                queue=sqs.Queue(
                    self,
                    f'DLQ',
                    queue_name=f'{queue_name}-dlq')),
            queue_name=queue_name)

        # create the receiver function
        # add the queue url as an environment variable
        self.receiver_function = lambda_.Function(
            self, 'ReceiverFunction',
            code=function_code,
            environment={'QUEUE_URL': self.queue.queue_url},
            function_name=f'{function_name_prefix}-receiver',
            handler=receiver_function_handler,
            layers=[function_dependencies_layer],
            # memory_size=256,
            runtime=lambda_.Runtime.PYTHON_3_8)

        # allow the receiver function to enqueue messages
        self.queue.grant_send_messages(self.receiver_function)

        # route requests to the receiver lambda
        # (with a circular dependency, so never mind)
        # api.add_routes(
        #     integration=apigw.LambdaProxyIntegration(
        #         handler=self.receiver_function),
        #     methods=[api_method],
        #     path=api_path)

        # route requests to the receiver lambda
        # (without creating a circular dependency?)
        # integration = apigw.CfnIntegration(
        #     self, 'Integration',
        #     api_id=api.http_api_id,
        #     integration_type='AWS_PROXY',
        #     integration_uri=self.receiver_function.function_arn,
        #     payload_format_version='2.0')

        # apigw.CfnRoute(self, 'Route',
        #                api_id=api.http_api_id,
        #                route_key=f'{api_method.value} {api_path}',
        #                target=f'integrations/{integration.ref}')

        # # trigger the lambda with those routed requests
        # lambda_.CfnEventSourceMapping(
        #     self, 'Mappping',
        #     event_source_arn=f'arn:aws:execute-api:{env.region}:{env.account}:{api.http_api_id}/*/*{api_path}',
        #     function_name=self.receiver_function.function_arn)

        # create the handler function
        self.handler_function = lambda_.Function(
            self, 'HandlerFunction',
            code=function_code,
            function_name=f'{function_name_prefix}-handler',
            handler=handler_function_handler,
            layers=[function_dependencies_layer],
            # memory_size=256,
            runtime=lambda_.Runtime.PYTHON_3_8)

        # add the queue as a trigger for the handler function
        self.handler_function.add_event_source(SqsEventSource(self.queue))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #IAM Roles
        sqs_lambda_poller_role = iam.Role(
            self,
            id="SQSPollerRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaSQSQueueExecutionRole")
            ])
        sqs_lambda_poller_role.add_to_policy(
            iam.PolicyStatement(actions=["sqs:*", "states:*"],
                                effect=iam.Effect.ALLOW,
                                resources=["*"]))
        sqs_delete_role = iam.Role(
            self,
            id="RemoveMessageFromQueueRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole")
            ])

        sqs_delete_role.add_to_policy(
            iam.PolicyStatement(actions=["sqs:*"],
                                effect=iam.Effect.ALLOW,
                                resources=["*"]))
        #define the queue
        dl_queue = sqs.Queue(
            self,
            "DeadLetterQueue",
        )
        dl_letter = sqs.DeadLetterQueue(max_receive_count=3, queue=dl_queue)
        queue = sqs.Queue(self,
                          "TextractPipelineCdkQueue",
                          visibility_timeout=core.Duration.seconds(300),
                          dead_letter_queue=dl_letter)
        queue.grant_consume_messages(sqs_lambda_poller_role)

        #lamba policy
        pdf2imageStatement = iam.PolicyStatement(actions=["s3:*"],
                                                 effect=iam.Effect.ALLOW,
                                                 resources=["*"])
        callTextractStatement = iam.PolicyStatement(
            actions=["textract:*", "s3:*"],
            effect=iam.Effect.ALLOW,
            resources=["*"])

        callSQSstatement = iam.PolicyStatement(actions=["sqs:*", "states:*"],
                                               effect=iam.Effect.ALLOW,
                                               resources=["*"])

        #set the lambda code paths
        layer_files = os.path.join(os.path.dirname(__file__), os.pardir,
                                   'vendor')

        #define a layer for libraries
        poppler_layer = lambda_.LayerVersion(
            self,
            "poppler_layer",
            code=lambda_.Code.from_asset(path='layers/poppler_layer'),
            compatible_runtimes=[lambda_.Runtime.PYTHON_3_7],
            description="Poppler Lambda Layer")
        #helper layers
        textract_processor_layer = lambda_.LayerVersion(
            self,
            "HelperLayer",
            code=lambda_.Code.from_asset(path='layers/helpers'),
            compatible_runtimes=[lambda_.Runtime.PYTHON_3_7],
            description="Helper Layers")
        #define functions

        #get message from sqs
        poll_sqs_queue = lambda_.Function(
            self,
            "PollSQS",
            runtime=lambda_.Runtime.PYTHON_3_7,
            handler="process_sqs.handler",
            code=lambda_.Code.from_asset("lambda_funcs/process_sqs"),
            timeout=core.Duration.seconds(300),
            tracing=lambda_.Tracing.ACTIVE,
            role=sqs_lambda_poller_role,
            environment={
                "QueueUrl": queue.queue_url,
                "ACCOUNT_ID": core.Aws.ACCOUNT_ID
            },
        )
        #cron to trigger lambda
        polling_rule = events.Rule(self,
                                   "PollSQSRule",
                                   schedule=events.Schedule.rate(
                                       core.Duration.minutes(3)))

        polling_rule.add_target(target=targets.LambdaFunction(poll_sqs_queue))

        #process pdf
        pdf2image = lambda_.Function(
            self,
            "Pdf2Image",
            code=lambda_.Code.from_asset(
                'lambda_funcs/pdf2image',
                bundling={
                    "image":
                    lambda_.Runtime.PYTHON_3_7.bundling_docker_image,
                    "command": [
                        "bash", "-c",
                        "pip install -r requirements.txt -t /asset-input && cp -au . /asset-output"
                    ]
                }),
            handler='process_images.handler',
            runtime=lambda_.Runtime.PYTHON_3_7,
            layers=[poppler_layer],
            tracing=lambda_.Tracing.ACTIVE,
            memory_size=1028,
            timeout=core.Duration.seconds(300)).add_to_role_policy(
                statement=pdf2imageStatement)

        #call texract
        callTextract = lambda_.Function(
            self,
            "CallTextract",
            code=lambda_.Code.from_asset('lambda_funcs/calltextract'),
            handler='calltextract.handler',
            runtime=lambda_.Runtime.PYTHON_3_7,
            layers=[textract_processor_layer],
            tracing=lambda_.Tracing.ACTIVE,
            memory_size=1028,
            timeout=core.Duration.seconds(300)).add_to_role_policy(
                statement=callTextractStatement)

        #Remove message from queue
        remove_message_from_queue = lambda_.Function(
            self,
            id="RemoveMessageFromQueue",
            handler="remove_message.handler",
            code=lambda_.Code.from_asset("lambda_funcs/remove_msg_func"),
            runtime=lambda_.Runtime.PYTHON_3_7,
            tracing=lambda_.Tracing.ACTIVE,
            timeout=core.Duration.seconds(300),
            role=sqs_delete_role,
            environment={"QueueUrl": queue.queue_url})
Exemple #18
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if self.node.try_get_context('vpc_type'):
            validate_cdk_json(self)

        ES_LOADER_TIMEOUT = 600
        ######################################################################
        # REGION mapping / ELB & Lambda Arch
        ######################################################################
        elb_id_temp = region_info.FactName.ELBV2_ACCOUNT
        elb_map_temp = region_info.RegionInfo.region_map(elb_id_temp)
        region_dict = {}
        for region in elb_map_temp:
            # ELB account ID
            region_dict[region] = {'ElbV2AccountId': elb_map_temp[region]}
            # Lambda Arch
            if region in ('us-east-1', 'us-east-2', 'us-west-2', 'ap-south-1',
                          'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1',
                          'eu-central-1', 'eu-west-1', 'eu-west-2'):
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.ARM_64.name)
            else:
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.X86_64.name)
        region_mapping = core.CfnMapping(
            scope=self, id='RegionMap', mapping=region_dict)

        ######################################################################
        # get params
        ######################################################################
        allow_source_address = core.CfnParameter(
            self, 'AllowedSourceIpAddresses', allowed_pattern=r'^[0-9./\s]*',
            description='Space-delimited list of CIDR blocks',
            default='10.0.0.0/8 172.16.0.0/12 192.168.0.0/16')
        sns_email = core.CfnParameter(
            self, 'SnsEmail', allowed_pattern=r'^[0-9a-zA-Z@_\-\+\.]*',
            description=('Input your email as SNS topic, where Amazon '
                         'OpenSearch Service will send alerts to'),
            default='*****@*****.**')
        geoip_license_key = core.CfnParameter(
            self, 'GeoLite2LicenseKey', allowed_pattern=r'^[0-9a-zA-Z]{16}$',
            default='xxxxxxxxxxxxxxxx',
            description=("If you wolud like to enrich geoip locaiton such as "
                         "IP address's country, get a license key form MaxMind"
                         " and input the key. If you not, keep "
                         "xxxxxxxxxxxxxxxx"))
        reserved_concurrency = core.CfnParameter(
            self, 'ReservedConcurrency', default=10, type='Number',
            description=('Input reserved concurrency. Increase this value if '
                         'there are steady logs delay despite no errors'))
        aes_domain_name = self.node.try_get_context('aes_domain_name')
        bucket = f'{aes_domain_name}-{core.Aws.ACCOUNT_ID}'
        s3bucket_name_geo = f'{bucket}-geo'
        s3bucket_name_log = f'{bucket}-log'
        s3bucket_name_snapshot = f'{bucket}-snapshot'

        # organizations / multiaccount
        org_id = self.node.try_get_context('organizations').get('org_id')
        org_mgmt_id = self.node.try_get_context(
            'organizations').get('management_id')
        org_member_ids = self.node.try_get_context(
            'organizations').get('member_ids')
        no_org_ids = self.node.try_get_context(
            'no_organizations').get('aws_accounts')

        # Overwrite default S3 bucket name as customer name
        temp_geo = self.node.try_get_context('s3_bucket_name').get('geo')
        if temp_geo:
            s3bucket_name_geo = temp_geo
        else:
            print('Using default bucket names')
        temp_log = self.node.try_get_context('s3_bucket_name').get('log')
        if temp_log:
            s3bucket_name_log = temp_log
        elif org_id or no_org_ids:
            s3bucket_name_log = f'{aes_domain_name}-{self.account}-log'
        else:
            print('Using default bucket names')
        temp_snap = self.node.try_get_context('s3_bucket_name').get('snapshot')
        if temp_snap:
            s3bucket_name_snapshot = temp_snap
        else:
            print('Using default bucket names')
        kms_cmk_alias = self.node.try_get_context('kms_cmk_alias')
        if not kms_cmk_alias:
            kms_cmk_alias = 'aes-siem-key'
            print('Using default key alais')

        ######################################################################
        # deploy VPC when context is defined as using VPC
        ######################################################################
        # vpc_type is 'new' or 'import' or None
        vpc_type = self.node.try_get_context('vpc_type')

        if vpc_type == 'new':
            is_vpc = True
            vpc_cidr = self.node.try_get_context('new_vpc_nw_cidr_block')
            subnet_cidr_mask = int(
                self.node.try_get_context('new_vpc_subnet_cidr_mask'))
            is_vpc = True
            # VPC
            vpc_aes_siem = aws_ec2.Vpc(
                self, 'VpcAesSiem', cidr=vpc_cidr,
                max_azs=3, nat_gateways=0,
                subnet_configuration=[
                    aws_ec2.SubnetConfiguration(
                        subnet_type=aws_ec2.SubnetType.ISOLATED,
                        name='aes-siem-subnet', cidr_mask=subnet_cidr_mask)])
            subnet1 = vpc_aes_siem.isolated_subnets[0]
            subnets = [{'subnet_type': aws_ec2.SubnetType.ISOLATED}]
            vpc_subnets = aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED)
            vpc_aes_siem_opt = vpc_aes_siem.node.default_child.cfn_options
            vpc_aes_siem_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
            for subnet in vpc_aes_siem.isolated_subnets:
                subnet_opt = subnet.node.default_child.cfn_options
                subnet_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
        elif vpc_type == 'import':
            vpc_id = self.node.try_get_context('imported_vpc_id')
            vpc_aes_siem = aws_ec2.Vpc.from_lookup(
                self, 'VpcAesSiem', vpc_id=vpc_id)

            subnet_ids = get_subnet_ids(self)
            subnets = []
            for number, subnet_id in enumerate(subnet_ids, 1):
                obj_id = 'Subenet' + str(number)
                subnet = aws_ec2.Subnet.from_subnet_id(self, obj_id, subnet_id)
                subnets.append(subnet)
            subnet1 = subnets[0]
            vpc_subnets = aws_ec2.SubnetSelection(subnets=subnets)

        if vpc_type:
            is_vpc = True
            # Security Group
            sg_vpc_noinbound_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcNoinboundSecurityGroup',
                security_group_name='aes-siem-noinbound-vpc-sg',
                vpc=vpc_aes_siem)

            sg_vpc_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcSecurityGroup',
                security_group_name='aes-siem-vpc-sg',
                vpc=vpc_aes_siem)
            sg_vpc_aes_siem.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(vpc_aes_siem.vpc_cidr_block),
                connection=aws_ec2.Port.tcp(443),)
            sg_vpc_opt = sg_vpc_aes_siem.node.default_child.cfn_options
            sg_vpc_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

            # VPC Endpoint
            vpc_aes_siem.add_gateway_endpoint(
                'S3Endpoint', service=aws_ec2.GatewayVpcEndpointAwsService.S3,
                subnets=subnets)
            vpc_aes_siem.add_interface_endpoint(
                'SQSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.SQS,)
            vpc_aes_siem.add_interface_endpoint(
                'KMSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.KMS,)
        else:
            is_vpc = False

        is_vpc = core.CfnCondition(
            self, 'IsVpc', expression=core.Fn.condition_equals(is_vpc, True))
        """
        CloudFormation実行時の条件式の書き方
        ClassのBasesが aws_cdk.core.Resource の時は、
        node.default_child.cfn_options.condition = is_vpc
        ClassのBasesが aws_cdk.core.CfnResource の時は、
        cfn_options.condition = is_vpc
        """

        ######################################################################
        # create cmk of KMS to encrypt S3 bucket
        ######################################################################
        kms_aes_siem = aws_kms.Key(
            self, 'KmsAesSiemLog', description='CMK for SIEM solution',
            removal_policy=core.RemovalPolicy.RETAIN)

        aws_kms.Alias(
            self, 'KmsAesSiemLogAlias', alias_name=kms_cmk_alias,
            target_key=kms_aes_siem,
            removal_policy=core.RemovalPolicy.RETAIN)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow GuardDuty to use the key',
                actions=['kms:GenerateDataKey'],
                principals=[aws_iam.ServicePrincipal(
                    'guardduty.amazonaws.com')],
                resources=['*'],),)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow VPC Flow Logs to use the key',
                actions=['kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*',
                         'kms:GenerateDataKey*', 'kms:DescribeKey'],
                principals=[aws_iam.ServicePrincipal(
                    'delivery.logs.amazonaws.com')],
                resources=['*'],),)
        # basic policy
        key_policy_basic1 = aws_iam.PolicyStatement(
            sid='Allow principals in the account to decrypt log files',
            actions=['kms:DescribeKey', 'kms:ReEncryptFrom'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_basic1)

        # for Athena
        key_policy_athena = aws_iam.PolicyStatement(
            sid='Allow Athena to query s3 objects with this key',
            actions=['kms:Decrypt', 'kms:DescribeKey', 'kms:Encrypt',
                     'kms:GenerateDataKey*', 'kms:ReEncrypt*'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],
            conditions={'ForAnyValue:StringEquals': {
                'aws:CalledVia': 'athena.amazonaws.com'}})
        kms_aes_siem.add_to_resource_policy(key_policy_athena)

        # for CloudTrail
        key_policy_trail1 = aws_iam.PolicyStatement(
            sid='Allow CloudTrail to describe key',
            actions=['kms:DescribeKey'],
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_trail1)

        key_policy_trail2 = aws_iam.PolicyStatement(
            sid=('Allow CloudTrail to encrypt logs'),
            actions=['kms:GenerateDataKey*'],
            principals=[aws_iam.ServicePrincipal(
                'cloudtrail.amazonaws.com')],
            resources=['*'],
            conditions={'StringLike': {
                'kms:EncryptionContext:aws:cloudtrail:arn': [
                    f'arn:aws:cloudtrail:*:{core.Aws.ACCOUNT_ID}:trail/*']}})
        kms_aes_siem.add_to_resource_policy(key_policy_trail2)

        ######################################################################
        # create s3 bucket
        ######################################################################
        block_pub = aws_s3.BlockPublicAccess(
            block_public_acls=True,
            ignore_public_acls=True,
            block_public_policy=True,
            restrict_public_buckets=True
        )
        s3_geo = aws_s3.Bucket(
            self, 'S3BucketForGeoip', block_public_access=block_pub,
            bucket_name=s3bucket_name_geo,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for log collector
        s3_log = aws_s3.Bucket(
            self, 'S3BucketForLog', block_public_access=block_pub,
            bucket_name=s3bucket_name_log, versioned=True,
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for aes snapshot
        s3_snapshot = aws_s3.Bucket(
            self, 'S3BucketForSnapshot', block_public_access=block_pub,
            bucket_name=s3bucket_name_snapshot,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        ######################################################################
        # IAM Role
        ######################################################################
        # delopyment policy for lambda deploy-aes
        arn_prefix = f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
        loggroup_aes = f'log-group:/aws/aes/domains/{aes_domain_name}/*'
        loggroup_opensearch = (
            f'log-group:/aws/OpenSearchService/domains/{aes_domain_name}/*')
        loggroup_lambda = 'log-group:/aws/lambda/aes-siem-*'
        policydoc_create_loggroup = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:PutResourcePolicy',
                        'logs:DescribeLogGroups',
                        'logs:DescribeLogStreams'
                    ],
                    resources=[f'{arn_prefix}:*', ]
                ),
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:CreateLogGroup', 'logs:CreateLogStream',
                        'logs:PutLogEvents', 'logs:PutRetentionPolicy'],
                    resources=[
                        f'{arn_prefix}:{loggroup_aes}',
                        f'{arn_prefix}:{loggroup_opensearch}',
                        f'{arn_prefix}:{loggroup_lambda}',
                    ],
                )
            ]
        )

        policydoc_crhelper = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'lambda:AddPermission',
                        'lambda:RemovePermission',
                        'events:ListRules',
                        'events:PutRule',
                        'events:DeleteRule',
                        'events:PutTargets',
                        'events:RemoveTargets'],
                    resources=['*']
                )
            ]
        )

        # snaphot rule for AES
        policydoc_snapshot = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['s3:ListBucket'],
                    resources=[s3_snapshot.bucket_arn]
                ),
                aws_iam.PolicyStatement(
                    actions=['s3:GetObject', 's3:PutObject',
                             's3:DeleteObject'],
                    resources=[s3_snapshot.bucket_arn + '/*']
                )
            ]
        )
        aes_siem_snapshot_role = aws_iam.Role(
            self, 'AesSiemSnapshotRole',
            role_name='aes-siem-snapshot-role',
            inline_policies=[policydoc_snapshot, ],
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        policydoc_assume_snapshrole = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['iam:PassRole'],
                    resources=[aes_siem_snapshot_role.role_arn]
                ),
            ]
        )

        aes_siem_deploy_role_for_lambda = aws_iam.Role(
            self, 'AesSiemDeployRoleForLambda',
            role_name='aes-siem-deploy-role-for-lambda',
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonOpenSearchServiceFullAccess'),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
            ],
            inline_policies=[policydoc_assume_snapshrole, policydoc_snapshot,
                             policydoc_create_loggroup, policydoc_crhelper],
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        if vpc_type:
            aes_siem_deploy_role_for_lambda.add_managed_policy(
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaVPCAccessExecutionRole')
            )

        # for alert from Amazon OpenSearch Service
        aes_siem_sns_role = aws_iam.Role(
            self, 'AesSiemSnsRole',
            role_name='aes-siem-sns-role',
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        # EC2 role
        aes_siem_es_loader_ec2_role = aws_iam.Role(
            self, 'AesSiemEsLoaderEC2Role',
            role_name='aes-siem-es-loader-for-ec2',
            assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com'),
        )

        aws_iam.CfnInstanceProfile(
            self, 'AesSiemEsLoaderEC2InstanceProfile',
            instance_profile_name=aes_siem_es_loader_ec2_role.role_name,
            roles=[aes_siem_es_loader_ec2_role.role_name]
        )

        ######################################################################
        # in VPC
        ######################################################################
        aes_role_exist = check_iam_role('/aws-service-role/es.amazonaws.com/')
        if vpc_type and not aes_role_exist:
            slr_aes = aws_iam.CfnServiceLinkedRole(
                self, 'AWSServiceRoleForAmazonOpenSearchService',
                aws_service_name='es.amazonaws.com',
                description='Created by cloudformation of siem stack'
            )
            slr_aes.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # SQS for es-laoder's DLQ
        ######################################################################
        sqs_aes_siem_dlq = aws_sqs.Queue(
            self, 'AesSiemDlq', queue_name='aes-siem-dlq',
            retention_period=core.Duration.days(14))

        sqs_aes_siem_splitted_logs = aws_sqs.Queue(
            self, 'AesSiemSqsSplitLogs',
            queue_name='aes-siem-sqs-splitted-logs',
            dead_letter_queue=aws_sqs.DeadLetterQueue(
                max_receive_count=2, queue=sqs_aes_siem_dlq),
            visibility_timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            retention_period=core.Duration.days(14))

        ######################################################################
        # Setup Lambda
        ######################################################################
        # setup lambda of es_loader
        lambda_es_loader_vpc_kwargs = {}
        if vpc_type:
            lambda_es_loader_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': vpc_subnets,
            }

        lambda_es_loader = aws_lambda.Function(
            self, 'LambdaEsLoader', **lambda_es_loader_vpc_kwargs,
            function_name='aes-siem-es-loader',
            description=f'{SOLUTION_NAME} / es-loader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/es_loader.zip'),
            code=aws_lambda.Code.asset('../lambda/es_loader'),
            handler='index.lambda_handler',
            memory_size=2048,
            timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            reserved_concurrent_executions=(
                reserved_concurrency.value_as_number),
            dead_letter_queue_enabled=True,
            dead_letter_queue=sqs_aes_siem_dlq,
            environment={
                'GEOIP_BUCKET': s3bucket_name_geo, 'LOG_LEVEL': 'info',
                'POWERTOOLS_LOGGER_LOG_EVENT': 'false',
                'POWERTOOLS_SERVICE_NAME': 'es-loader',
                'POWERTOOLS_METRICS_NAMESPACE': 'SIEM'})
        es_loader_newver = lambda_es_loader.add_version(
            name=__version__, description=__version__)
        es_loader_opt = es_loader_newver.node.default_child.cfn_options
        es_loader_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # send only
        # sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage')
        # send and reieve. but it must be loop
        sqs_aes_siem_dlq.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        sqs_aes_siem_splitted_logs.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        lambda_es_loader.add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                sqs_aes_siem_splitted_logs, batch_size=1))

        # es-loaer on EC2 role
        sqs_aes_siem_dlq.grant(
            aes_siem_es_loader_ec2_role, 'sqs:GetQueue*', 'sqs:ListQueues*',
            'sqs:ReceiveMessage*', 'sqs:DeleteMessage*')

        lambda_geo = aws_lambda.Function(
            self, 'LambdaGeoipDownloader',
            function_name='aes-siem-geoip-downloader',
            description=f'{SOLUTION_NAME} / geoip-downloader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/geoip_downloader'),
            handler='index.lambda_handler',
            memory_size=320,
            timeout=core.Duration.seconds(300),
            environment={
                's3bucket_name': s3bucket_name_geo,
                'license_key': geoip_license_key.value_as_string,
            }
        )
        lambda_geo_newver = lambda_geo.add_version(
            name=__version__, description=__version__)
        lamba_geo_opt = lambda_geo_newver.node.default_child.cfn_options
        lamba_geo_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # setup OpenSearch Service
        ######################################################################
        lambda_deploy_es = aws_lambda.Function(
            self, 'LambdaDeployAES',
            function_name='aes-siem-deploy-aes',
            description=f'{SOLUTION_NAME} / opensearch domain deployment',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/deploy_es.zip'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_domain_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_deploy_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_deploy_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_deploy_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_deploy_es.add_environment('vpc_subnet_id', 'None')
            lambda_deploy_es.add_environment('security_group_id', 'None')
        deploy_es_newver = lambda_deploy_es.add_version(
            name=__version__, description=__version__)
        deploy_es_opt = deploy_es_newver.node.default_child.cfn_options
        deploy_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # execute lambda_deploy_es to deploy Amaozon ES Domain
        aes_domain = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainDeployedR2',
            service_token=lambda_deploy_es.function_arn,)
        aes_domain.add_override('Properties.ConfigVersion', __version__)

        es_endpoint = aes_domain.get_att('es_endpoint').to_string()
        lambda_es_loader.add_environment('ES_ENDPOINT', es_endpoint)
        lambda_es_loader.add_environment(
            'SQS_SPLITTED_LOGS_URL', sqs_aes_siem_splitted_logs.queue_url)

        lambda_configure_es_vpc_kwargs = {}
        if vpc_type:
            lambda_configure_es_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': aws_ec2.SubnetSelection(subnets=[subnet1, ]), }
        lambda_configure_es = aws_lambda.Function(
            self, 'LambdaConfigureAES', **lambda_configure_es_vpc_kwargs,
            function_name='aes-siem-configure-aes',
            description=f'{SOLUTION_NAME} / opensearch configuration',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_config_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
                'es_endpoint': es_endpoint,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_configure_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_configure_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_configure_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_configure_es.add_environment('vpc_subnet_id', 'None')
            lambda_configure_es.add_environment('security_group_id', 'None')
        configure_es_newver = lambda_configure_es.add_version(
            name=__version__, description=__version__)
        configure_es_opt = configure_es_newver.node.default_child.cfn_options
        configure_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        aes_config = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainConfiguredR2',
            service_token=lambda_configure_es.function_arn,)
        aes_config.add_override('Properties.ConfigVersion', __version__)
        aes_config.add_depends_on(aes_domain)
        aes_config.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        es_arn = (f'arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
                  f':domain/{aes_domain_name}')
        # grant permission to es_loader role
        inline_policy_to_load_entries_into_es = aws_iam.Policy(
            self, 'aes-siem-policy-to-load-entries-to-es',
            policy_name='aes-siem-policy-to-load-entries-to-es',
            statements=[
                aws_iam.PolicyStatement(
                    actions=['es:*'],
                    resources=[es_arn + '/*', ]),
            ]
        )
        lambda_es_loader.role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)
        aes_siem_es_loader_ec2_role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)

        # grant additional permission to es_loader role
        additional_kms_cmks = self.node.try_get_context('additional_kms_cmks')
        if additional_kms_cmks:
            inline_policy_access_to_additional_cmks = aws_iam.Policy(
                self, 'access_to_additional_cmks',
                policy_name='access_to_additional_cmks',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['kms:Decrypt'],
                        resources=sorted(set(additional_kms_cmks))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
        additional_buckets = self.node.try_get_context('additional_s3_buckets')

        if additional_buckets:
            buckets_list = []
            for bucket in additional_buckets:
                buckets_list.append(f'arn:aws:s3:::{bucket}')
                buckets_list.append(f'arn:aws:s3:::{bucket}/*')
            inline_policy_access_to_additional_buckets = aws_iam.Policy(
                self, 'access_to_additional_buckets',
                policy_name='access_to_additional_buckets',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['s3:GetObject*', 's3:GetBucket*', 's3:List*'],
                        resources=sorted(set(buckets_list))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)

        kms_aes_siem.grant_decrypt(lambda_es_loader)
        kms_aes_siem.grant_decrypt(aes_siem_es_loader_ec2_role)

        ######################################################################
        # s3 notification and grant permisssion
        ######################################################################
        s3_geo.grant_read_write(lambda_geo)
        s3_geo.grant_read(lambda_es_loader)
        s3_geo.grant_read(aes_siem_es_loader_ec2_role)
        s3_log.grant_read(lambda_es_loader)
        s3_log.grant_read(aes_siem_es_loader_ec2_role)

        # create s3 notification for es_loader
        notification = aws_s3_notifications.LambdaDestination(lambda_es_loader)

        # assign notification for the s3 PUT event type
        # most log system use PUT, but also CLB use POST & Multipart Upload
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='AWSLogs/'))

        # For user logs, not AWS logs
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='UserLogs/'))

        # Download geoip to S3 once by executing lambda_geo
        get_geodb = aws_cloudformation.CfnCustomResource(
            self, 'ExecLambdaGeoipDownloader',
            service_token=lambda_geo.function_arn,)
        get_geodb.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # Download geoip every day at 6PM UTC
        rule = aws_events.Rule(
            self, 'CwlRuleLambdaGeoipDownloaderDilly',
            schedule=aws_events.Schedule.rate(core.Duration.hours(12)))
        rule.add_target(aws_events_targets.LambdaFunction(lambda_geo))

        ######################################################################
        # bucket policy
        ######################################################################
        s3_awspath = s3_log.bucket_arn + '/AWSLogs/' + core.Aws.ACCOUNT_ID
        bucket_policy_common1 = aws_iam.PolicyStatement(
            sid='ELB Policy',
            principals=[aws_iam.AccountPrincipal(
                account_id=region_mapping.find_in_map(
                    core.Aws.REGION, 'ElbV2AccountId'))],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],)
        # NLB / ALB / R53resolver / VPC Flow Logs
        bucket_policy_elb1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_elb2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_common1)
        s3_log.add_to_resource_policy(bucket_policy_elb1)
        s3_log.add_to_resource_policy(bucket_policy_elb2)

        # CloudTrail
        bucket_policy_trail1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For Cloudtrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:GetBucketAcl'], resources=[s3_log.bucket_arn],)
        bucket_policy_trail2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For CloudTrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_trail1)
        s3_log.add_to_resource_policy(bucket_policy_trail2)

        # GuardDuty
        bucket_policy_gd1 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to use the getBucketLocation operation',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:GetBucketLocation'], resources=[s3_log.bucket_arn],)
        bucket_policy_gd2 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to upload objects to the bucket',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_log.bucket_arn + '/*'],)
        bucket_policy_gd5 = aws_iam.PolicyStatement(
            sid='Deny non-HTTPS access', effect=aws_iam.Effect.DENY,
            actions=['s3:*'], resources=[s3_log.bucket_arn + '/*'],
            conditions={'Bool': {'aws:SecureTransport': 'false'}})
        bucket_policy_gd5.add_any_principal()
        s3_log.add_to_resource_policy(bucket_policy_gd1)
        s3_log.add_to_resource_policy(bucket_policy_gd2)
        s3_log.add_to_resource_policy(bucket_policy_gd5)

        # Config
        bucket_policy_config1 = aws_iam.PolicyStatement(
            sid='AWSConfig BucketPermissionsCheck and BucketExistenceCheck',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_config2 = aws_iam.PolicyStatement(
            sid='AWSConfigBucketDelivery',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/Config/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_config1)
        s3_log.add_to_resource_policy(bucket_policy_config2)

        # geoip
        bucket_policy_geo1 = aws_iam.PolicyStatement(
            sid='Allow geoip downloader and es-loader to read/write',
            principals=[lambda_es_loader.role, lambda_geo.role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_geo.bucket_arn + '/*'],)
        s3_geo.add_to_resource_policy(bucket_policy_geo1)

        # ES Snapshot
        bucket_policy_snapshot = aws_iam.PolicyStatement(
            sid='Allow ES to store snapshot',
            principals=[aes_siem_snapshot_role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_snapshot.bucket_arn + '/*'],)
        s3_snapshot.add_to_resource_policy(bucket_policy_snapshot)

        ######################################################################
        # for multiaccount / organizaitons
        ######################################################################
        if org_id or no_org_ids:
            ##################################################################
            # KMS key policy for multiaccount / organizaitons
            ##################################################################
            # for CloudTrail
            cond_tail2 = self.make_resource_list(
                path='arn:aws:cloudtrail:*:', tail=':trail/*',
                keys=self.list_without_none(org_mgmt_id, no_org_ids))
            key_policy_mul_trail2 = aws_iam.PolicyStatement(
                sid=('Allow CloudTrail to encrypt logs for multiaccounts'),
                actions=['kms:GenerateDataKey*'],
                principals=[aws_iam.ServicePrincipal(
                    'cloudtrail.amazonaws.com')],
                resources=['*'],
                conditions={'StringLike': {
                    'kms:EncryptionContext:aws:cloudtrail:arn': cond_tail2}})
            kms_aes_siem.add_to_resource_policy(key_policy_mul_trail2)

            # for replicaiton
            key_policy_rep1 = aws_iam.PolicyStatement(
                sid=('Enable cross account encrypt access for S3 Cross Region '
                     'Replication'),
                actions=['kms:Encrypt'],
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                resources=['*'],)
            kms_aes_siem.add_to_resource_policy(key_policy_rep1)

            ##################################################################
            # Buckdet Policy for multiaccount / organizaitons
            ##################################################################
            s3_log_bucket_arn = 'arn:aws:s3:::' + s3bucket_name_log

            # for CloudTrail
            s3_mulpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_org_trail = aws_iam.PolicyStatement(
                sid='AWSCloudTrailWrite for Multiaccounts / Organizations',
                principals=[
                    aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_mulpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_org_trail)

            # config
            s3_conf_multpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/Config/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_mul_config2 = aws_iam.PolicyStatement(
                sid='AWSConfigBucketDelivery',
                principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_conf_multpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_mul_config2)

            # for replication
            bucket_policy_rep1 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on objects',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:ReplicateDelete', 's3:ReplicateObject',
                         's3:ReplicateTags', 's3:GetObjectVersionTagging',
                         's3:ObjectOwnerOverrideToBucketOwner'],
                resources=[f'{s3_log_bucket_arn}/*'])
            bucket_policy_rep2 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on bucket',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:List*', 's3:GetBucketVersioning',
                         's3:PutBucketVersioning'],
                resources=[f'{s3_log_bucket_arn}'])
            s3_log.add_to_resource_policy(bucket_policy_rep1)
            s3_log.add_to_resource_policy(bucket_policy_rep2)

        ######################################################################
        # SNS topic for Amazon OpenSearch Service Alert
        ######################################################################
        sns_topic = aws_sns.Topic(
            self, 'SnsTopic', topic_name='aes-siem-alert',
            display_name='AES SIEM')

        sns_topic.add_subscription(aws_sns_subscriptions.EmailSubscription(
            email_address=sns_email.value_as_string))
        sns_topic.grant_publish(aes_siem_sns_role)

        ######################################################################
        # output of CFn
        ######################################################################
        kibanaurl = f'https://{es_endpoint}/_dashboards/'
        kibanaadmin = aes_domain.get_att('kibanaadmin').to_string()
        kibanapass = aes_domain.get_att('kibanapass').to_string()

        core.CfnOutput(self, 'RoleDeploy', export_name='role-deploy',
                       value=aes_siem_deploy_role_for_lambda.role_arn)
        core.CfnOutput(self, 'DashboardsUrl', export_name='dashboards-url',
                       value=kibanaurl)
        core.CfnOutput(self, 'DashboardsPassword',
                       export_name='dashboards-pass', value=kibanapass,
                       description=('Please change the password in OpenSearch '
                                    'Dashboards ASAP'))
        core.CfnOutput(self, 'DashboardsAdminID',
                       export_name='dashboards-admin', value=kibanaadmin)
Exemple #19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        """Initializes the stack

        :param scope: parent of this stack, scope within which resources defined here are accessible
        :type file_loc: Optional[aws_cdk.core.Construct]
        :param id: the id of the stack
        :type id: Optional[str]
        :param **kwargs: additional optional arguments
            ``description``:
                a description of the stack (`Optional[str]`).
            ``env``
                AWS environment (account/region) where this stack will be deployed (`Optional[aws_cdk.core.Environment]`).
            ``stack_name``
                name with which to deploy the stack (`Optional[str]`).
            ``synthesizer``
                synthesis method to use while deploying this stack (`Optional[aws_cdk.core.IStackSynthesizer]`).
            ``tags``
                stack tags that will be applied to all taggable resources as well as the stack (`Optional[Mapping[str, str]]`).
            ``termination_protection``
                whether to enable termination protection for this stack (`Optional[bool]`).
        """
        super().__init__(scope, id, **kwargs)
        metric_handler_dict, webhook_creator_dict = self.handle_parameters()
        # timeout used for lambda and sqs, in seconds
        lambda_timeout = 300
        if self.node.try_get_context('lambda_timeout'):
            lambda_timeout = int(self.node.try_get_context('lambda_timeout'))

        dead_letter_queue = sqs.Queue(self,
                                      'DeadLetterQueue',
                                      queue_name='DeadLetterQueue')
        webhook_queue = sqs.Queue(
            self,
            'WebhookQueue',
            queue_name='WebhookQueue',
            visibility_timeout=core.Duration.seconds(lambda_timeout),
            dead_letter_queue=sqs.DeadLetterQueue(max_receive_count=3,
                                                  queue=dead_letter_queue))
        metric_handler_dict['queue_url'] = webhook_queue.queue_url

        metric_handler_management_role = self.create_lambda_role_and_policy(
            'MetricHandlerManagementRole', [
                'cloudwatch:GetDashboard', 'cloudwatch:GetMetricData',
                'cloudwatch:ListDashboards', 'cloudwatch:PutDashboard',
                'cloudwatch:PutMetricData', 'logs:CreateLogGroup',
                'logs:CreateLogStream', 'logs:PutLogEvents',
                'secretsmanager:GetSecretValue'
            ])
        metric_handler_timeout = lambda_timeout
        metric_handler_function = _lambda.Function(
            self,
            'MetricsHandler',
            function_name='MetricsHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda_dir'),
            handler='cloudwatch_dashboard_handler.handler',
            role=metric_handler_management_role,
            environment=metric_handler_dict,
            timeout=core.Duration.seconds(metric_handler_timeout))
        self.create_event_with_permissions(metric_handler_function)

        # Connect SQS to Lambda
        sqs_event_source = lambda_event_source.SqsEventSource(webhook_queue)
        metric_handler_function.add_event_source(sqs_event_source)

        apigw_webhook_url = self.create_and_integrate_apigw(
            webhook_queue, metric_handler_dict['dashboard_name_prefix'])
        webhook_creator_dict['apigw_endpoint'] = apigw_webhook_url

        webhook_role = self.create_lambda_role_and_policy(
            'WebhookCreatorRole', ['secretsmanager:GetSecretValue'])
        _lambda.Function(self,
                         'WebhookCreator',
                         function_name='WebhookCreator',
                         runtime=_lambda.Runtime.PYTHON_3_7,
                         code=_lambda.Code.asset('lambda_dir'),
                         handler='webhook_creator.handler',
                         role=webhook_role,
                         environment=webhook_creator_dict,
                         timeout=core.Duration.seconds(5))
Exemple #20
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        entrypoint: Optional[List] = None,
        cpu: Union[int, float] = 256,
        memory: Union[int, float] = 512,
        mincount: int = 0,
        maxcount: int = 50,
        scaling_steps: int = 5,
        permissions: Optional[List[iam.PolicyStatement]] = None,
        vpc_id: Optional[str] = None,
        vpc_is_default: Optional[bool] = None,
        environment: dict = {},
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, **kwargs)

        permissions = permissions or []

        vpc = ec2.Vpc.from_lookup(self,
                                  "vpc",
                                  vpc_id=vpc_id,
                                  is_default=vpc_is_default)

        cluster = ecs.Cluster(self, f"{id}-cluster", vpc=vpc)

        topic = sns.Topic(self,
                          "ecsTopic",
                          display_name="ECS Watchbot SNS Topic")
        core.CfnOutput(
            self,
            "SNSTopic",
            value=topic.topic_arn,
            description="SNS Topic ARN",
            export_name=f"{id}-SNSTopic",
        )

        dlqueue = sqs.Queue(self, "ecsDeadLetterQueue")
        core.CfnOutput(
            self,
            "DeadSQSQueueURL",
            value=dlqueue.queue_url,
            description="DeadLetter SQS URL",
            export_name=f"{id}-DeadSQSQueueURL",
        )

        queue = sqs.Queue(
            self,
            "ecsQueue",
            dead_letter_queue=sqs.DeadLetterQueue(queue=dlqueue,
                                                  max_receive_count=3),
        )
        core.CfnOutput(
            self,
            "SQSQueueURL",
            value=queue.queue_url,
            description="SQS URL",
            export_name=f"{id}-SQSQueueURL",
        )

        environment.update({
            "REGION": self.region,
            "QUEUE_NAME": queue.queue_name
        })

        topic.add_subscription(sns_sub.SqsSubscription(queue))

        fargate_task_definition = ecs.FargateTaskDefinition(
            self,
            "FargateTaskDefinition",
            memory_limit_mib=memory,
            cpu=cpu,
        )
        log_driver = ecs.AwsLogDriver(
            stream_prefix=f"/ecs/tilebot/{id}",
            log_retention=aws_logs.RetentionDays.ONE_WEEK,
        )

        fargate_task_definition.add_container(
            "FargateContainer",
            image=ecs.ContainerImage.from_asset(directory="./"),
            entry_point=entrypoint,
            environment=environment,
            logging=log_driver,
        )

        fargate_service = ecs.FargateService(
            self,
            "FargateService",
            cluster=cluster,
            task_definition=fargate_task_definition,
            desired_count=mincount,
            enable_ecs_managed_tags=True,
            assign_public_ip=True,
        )
        permissions.append(
            iam.PolicyStatement(
                actions=["sqs:*"],
                resources=[queue.queue_arn],
            ))
        for perm in permissions:
            fargate_service.task_definition.task_role.add_to_policy(perm)

        total_number_of_message_lambda = aws_lambda.Function(
            self,
            f"{id}-TotalMessagesLambda",
            description="Create TotalNumberOfMessage metrics",
            code=aws_lambda.Code.from_inline("""const AWS = require('aws-sdk');
    exports.handler = function(event, context, callback) {
    const sqs = new AWS.SQS({ region: process.env.AWS_DEFAULT_REGION });
    const cw = new AWS.CloudWatch({ region: process.env.AWS_DEFAULT_REGION });
    return sqs.getQueueAttributes({
        QueueUrl: process.env.SQS_QUEUE_URL,
        AttributeNames: ['ApproximateNumberOfMessagesNotVisible', 'ApproximateNumberOfMessages']
    }).promise()
    .then((attrs) => {
        return cw.putMetricData({
            Namespace: 'AWS/SQS',
            MetricData: [{
            MetricName: 'TotalNumberOfMessages',
            Dimensions: [{ Name: 'QueueName', Value: process.env.SQS_QUEUE_NAME }],
            Value: Number(attrs.Attributes.ApproximateNumberOfMessagesNotVisible) +
                    Number(attrs.Attributes.ApproximateNumberOfMessages)
            }]
        }).promise();
    })
    .then((metric) => callback(null, metric))
    .catch((err) => callback(err));
};"""),
            handler="index.handler",
            runtime=aws_lambda.Runtime.NODEJS_10_X,
            timeout=core.Duration.seconds(60),
            environment={
                "SQS_QUEUE_URL": queue.queue_url,
                "SQS_QUEUE_NAME": queue.queue_name,
            },
        )
        total_number_of_message_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["sqs:GetQueueAttributes"],
                resources=[queue.queue_arn],
            ))
        total_number_of_message_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["cloudwatch:PutMetricData"],
                resources=["*"],
            ))
        total_number_of_message_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["logs:*"],
                resources=["arn:aws:logs:*:*:*"],
            ))

        rule = aws_events.Rule(
            self,
            "TotalMessagesSchedule",
            schedule=aws_events.Schedule.rate(core.Duration.seconds(60)),
        )
        rule.add_target(
            aws_events_targets.LambdaFunction(total_number_of_message_lambda))

        scalable_target = auto_scale.ScalableTarget(
            self,
            "AutoScallingTarget",
            min_capacity=mincount,
            max_capacity=maxcount,
            service_namespace=auto_scale.ServiceNamespace.ECS,
            resource_id="/".join([
                "service", cluster.cluster_name, fargate_service.service_name
            ]),
            scalable_dimension="ecs:service:DesiredCount",
        )
        scalable_target.node.add_dependency(fargate_service)

        scale_up = auto_scale.CfnScalingPolicy(
            self,
            "ScaleUp",
            policy_name="PolicyScaleUp",
            policy_type="StepScaling",
            scaling_target_id=scalable_target.scalable_target_id,
            step_scaling_policy_configuration=auto_scale.CfnScalingPolicy.
            StepScalingPolicyConfigurationProperty(
                adjustment_type="ChangeInCapacity",
                cooldown=120,
                metric_aggregation_type="Maximum",
                step_adjustments=[
                    auto_scale.CfnScalingPolicy.StepAdjustmentProperty(
                        scaling_adjustment=scaling_steps,
                        metric_interval_lower_bound=0,
                    ),
                ],
            ),
        )
        scale_up_trigger = aws_cloudwatch.CfnAlarm(  # noqa
            self,
            "ScaleUpTrigger",
            alarm_description="Scale up due to visible messages in queue",
            dimensions=[
                aws_cloudwatch.CfnAlarm.DimensionProperty(
                    name="QueueName",
                    value=queue.queue_name,
                ),
            ],
            metric_name="ApproximateNumberOfMessagesVisible",
            namespace="AWS/SQS",
            evaluation_periods=1,
            comparison_operator="GreaterThanThreshold",
            period=60,
            statistic="Maximum",
            threshold=0,
            alarm_actions=[scale_up.ref],
        )

        scale_down = auto_scale.CfnScalingPolicy(
            self,
            "ScaleDown",
            policy_name="PolicyScaleDown",
            policy_type="StepScaling",
            scaling_target_id=scalable_target.scalable_target_id,
            step_scaling_policy_configuration=auto_scale.CfnScalingPolicy.
            StepScalingPolicyConfigurationProperty(
                adjustment_type="ExactCapacity",
                cooldown=60,
                step_adjustments=[
                    auto_scale.CfnScalingPolicy.StepAdjustmentProperty(
                        scaling_adjustment=mincount,
                        metric_interval_upper_bound=0,
                    ),
                ],
            ),
        )

        scale_down_trigger = aws_cloudwatch.CfnAlarm(  # noqa
            self,
            "ScaleDownTrigger",
            alarm_description=
            "Scale down due to lack of in-flight messages in queue",
            dimensions=[
                aws_cloudwatch.CfnAlarm.DimensionProperty(
                    name="QueueName",
                    value=queue.queue_name,
                ),
            ],
            metric_name="TotalNumberOfMessages",
            namespace="AWS/SQS",
            evaluation_periods=1,
            comparison_operator="LessThanThreshold",
            period=120,
            statistic="Maximum",
            threshold=1,
            alarm_actions=[scale_down.ref],
        )
    def __init__(self, scope: core.Construct, construct_id: str,
                 stack_log_level: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below):

        # Maximum number of times, a message can be tried to be process from the queue before deleting
        self.max_msg_receive_cnt = 5
        self.max_msg_receive_cnt_at_retry = 3

        # Define Dead Letter Queue
        self.reliable_q_dlq = _sqs.Queue(
            self,
            "DeadLetterQueue",
            delivery_delay=core.Duration.seconds(100),
            queue_name=f"reliable_q_dlq",
            retention_period=core.Duration.days(2),
            visibility_timeout=core.Duration.seconds(10),
            receive_message_wait_time=core.Duration.seconds(10))

        # Define Retry Queue for Reliable Q
        self.reliable_q_retry_1 = _sqs.Queue(
            self,
            "reliableQueueRetry1",
            delivery_delay=core.Duration.seconds(10),
            queue_name=f"reliable_q_retry_1",
            retention_period=core.Duration.days(2),
            visibility_timeout=core.Duration.seconds(10),
            receive_message_wait_time=core.Duration.seconds(10),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=self.max_msg_receive_cnt_at_retry,
                queue=self.reliable_q_dlq))

        # Primary Source Queue
        self.reliable_q = _sqs.Queue(
            self,
            "reliableQueue",
            delivery_delay=core.Duration.seconds(5),
            queue_name=f"reliable_q",
            retention_period=core.Duration.days(2),
            visibility_timeout=core.Duration.seconds(10),
            receive_message_wait_time=core.Duration.seconds(10),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=self.max_msg_receive_cnt,
                queue=self.reliable_q_retry_1))

        ########################################
        #######                          #######
        #######     SQS Data Producer    #######
        #######                          #######
        ########################################

        # Read Lambda Code
        try:
            with open(
                    "stacks/back_end/serverless_sqs_producer_stack/lambda_src/sqs_data_producer.py",
                    encoding="utf-8",
                    mode="r") as f:
                data_producer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        data_producer_fn = _lambda.Function(
            self,
            "sqsDataProducerFn",
            function_name=f"data_producer_fn_{construct_id}",
            description="Produce data events and push to SQS",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(data_producer_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(5),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "APP_ENV": "Production",
                "RELIABLE_QUEUE_NAME": f"{self.reliable_q.queue_name}",
                "TRIGGER_RANDOM_FAILURES": "True"
            })

        # Grant our Lambda Producer privileges to write to SQS
        self.reliable_q.grant_send_messages(data_producer_fn)

        # Create Custom Loggroup for Producer
        data_producer_lg = _logs.LogGroup(
            self,
            "dataProducerLogGroup",
            log_group_name=f"/aws/lambda/{data_producer_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        data_producer_fn.add_permission(
            "restrictLambdaInvocationToFhInOwnAccount",
            principal=_iam.AccountRootPrincipal(),
            action="lambda:InvokeFunction",
            source_account=core.Aws.ACCOUNT_ID)

        # Monitoring for Queue
        reliable_q_alarm = _cw.Alarm(
            self,
            "reliableQueueAlarm",
            metric=self.reliable_q.metric(
                "ApproximateNumberOfMessagesVisible"),
            statistic="sum",
            threshold=10,
            period=core.Duration.minutes(5),
            evaluation_periods=1,
            comparison_operator=_cw.ComparisonOperator.GREATER_THAN_THRESHOLD)

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            "SqsDataProducer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={core.Aws.REGION}#/functions/{data_producer_fn.function_name}",
            description="Produce data events and push to SQS Queue.")

        output_2 = core.CfnOutput(
            self,
            "ReliableQueue",
            value=
            f"https://console.aws.amazon.com/sqs/v2/home?region={core.Aws.REGION}#/queues",
            description="Reliable Queue")
    def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        ddb_file_list = ddb.Table(self, "ddb",
                                  partition_key=ddb.Attribute(name="Key", type=ddb.AttributeType.STRING),
                                  billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        sqs_queue_DLQ = sqs.Queue(self, "sqs_DLQ",
                                  visibility_timeout=core.Duration.minutes(15),
                                  retention_period=core.Duration.days(14)
                                  )
        sqs_queue = sqs.Queue(self, "sqs_queue",
                              visibility_timeout=core.Duration.minutes(15),
                              retention_period=core.Duration.days(14),
                              dead_letter_queue=sqs.DeadLetterQueue(
                                  max_receive_count=100,
                                  queue=sqs_queue_DLQ
                              )
                              )
        handler = lam.Function(self, "lambdaFunction",
                               code=lam.Code.asset("./lambda"),
                               handler="lambda_function.lambda_handler",
                               runtime=lam.Runtime.PYTHON_3_8,
                               memory_size=1024,
                               timeout=core.Duration.minutes(15),
                               tracing=lam.Tracing.ACTIVE,
                               environment={
                                   'table_queue_name': ddb_file_list.table_name,
                                   'Des_bucket_default': Des_bucket_default,
                                   'Des_prefix_default': Des_prefix_default,
                                   'StorageClass': StorageClass,
                                   'aws_access_key_id': aws_access_key_id,
                                   'aws_secret_access_key': aws_secret_access_key,
                                   'aws_access_key_region': aws_access_key_region
                               })

        ddb_file_list.grant_read_write_data(handler)
        handler.add_event_source(SqsEventSource(sqs_queue))

        s3bucket = s3.Bucket(self, "s3bucket")
        s3bucket.grant_read(handler)
        s3bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                        s3n.SqsDestination(sqs_queue))

        # You can import an existing bucket and grant access to lambda
        # exist_s3bucket = s3.Bucket.from_bucket_name(self, "import_bucket",
        #                                             bucket_name="you_bucket_name")
        # exist_s3bucket.grant_read(handler)

        # But You have to add sqs as imported bucket event notification manually, it doesn't support by CloudFormation
        # An work around is to add on_cloud_trail_event for the bucket, but will trigger could_trail first
        # 因为是导入的Bucket,需要手工建Bucket Event Trigger SQS,以及设置SQS允许该bucekt触发的Permission

        core.CfnOutput(self, "DynamoDB_Table", value=ddb_file_list.table_name)
        core.CfnOutput(self, "SQS_Job_Queue", value=sqs_queue.queue_name)
        core.CfnOutput(self, "SQS_Job_Queue_DLQ", value=sqs_queue_DLQ.queue_name)
        core.CfnOutput(self, "Worker_Lambda_Function", value=handler.function_name)
        core.CfnOutput(self, "New_S3_Bucket", value=s3bucket.bucket_name)

        # Create Lambda logs filter to create network traffic metric
        handler.log_group.add_metric_filter("Complete-bytes",
                                            metric_name="Complete-bytes",
                                            metric_namespace="s3_migrate",
                                            metric_value="$bytes",
                                            filter_pattern=logs.FilterPattern.literal(
                                                '[info, date, sn, p="--->Complete", bytes, key]'))
        handler.log_group.add_metric_filter("Uploading-bytes",
                                            metric_name="Uploading-bytes",
                                            metric_namespace="s3_migrate",
                                            metric_value="$bytes",
                                            filter_pattern=logs.FilterPattern.literal(
                                                '[info, date, sn, p="--->Uploading", bytes, key]'))
        handler.log_group.add_metric_filter("Downloading-bytes",
                                            metric_name="Downloading-bytes",
                                            metric_namespace="s3_migrate",
                                            metric_value="$bytes",
                                            filter_pattern=logs.FilterPattern.literal(
                                                '[info, date, sn, p="--->Downloading", bytes, key]'))
        lambda_metric_Complete = cw.Metric(namespace="s3_migrate",
                                           metric_name="Complete-bytes",
                                           statistic="Sum",
                                           period=core.Duration.minutes(1))
        lambda_metric_Upload = cw.Metric(namespace="s3_migrate",
                                         metric_name="Uploading-bytes",
                                         statistic="Sum",
                                         period=core.Duration.minutes(1))
        lambda_metric_Download = cw.Metric(namespace="s3_migrate",
                                           metric_name="Downloading-bytes",
                                           statistic="Sum",
                                           period=core.Duration.minutes(1))
        handler.log_group.add_metric_filter("ERROR",
                                            metric_name="ERROR-Logs",
                                            metric_namespace="s3_migrate",
                                            metric_value="1",
                                            filter_pattern=logs.FilterPattern.literal(
                                                '"ERROR"'))
        handler.log_group.add_metric_filter("WARNING",
                                            metric_name="WARNING-Logs",
                                            metric_namespace="s3_migrate",
                                            metric_value="1",
                                            filter_pattern=logs.FilterPattern.literal(
                                                '"WARNING"'))
        log_metric_ERROR = cw.Metric(namespace="s3_migrate",
                                     metric_name="ERROR-Logs",
                                     statistic="Sum",
                                     period=core.Duration.minutes(1))
        log_metric_WARNING = cw.Metric(namespace="s3_migrate",
                                       metric_name="WARNING-Logs",
                                       statistic="Sum",
                                       period=core.Duration.minutes(1))

        # Dashboard to monitor SQS and Lambda
        board = cw.Dashboard(self, "s3_migrate", dashboard_name="s3_migrate_serverless")

        board.add_widgets(cw.GraphWidget(title="Lambda-NETWORK",
                                         left=[lambda_metric_Download, lambda_metric_Upload, lambda_metric_Complete]),
                          # TODO: here monitor all lambda concurrency not just the working one. Limitation from CDK
                          # Lambda now supports monitor single lambda concurrency, will change this after CDK support
                          cw.GraphWidget(title="Lambda-all-concurrent",
                                         left=[handler.metric_all_concurrent_executions(period=core.Duration.minutes(1))]),

                          cw.GraphWidget(title="Lambda-invocations/errors/throttles",
                                         left=[handler.metric_invocations(period=core.Duration.minutes(1)),
                                               handler.metric_errors(period=core.Duration.minutes(1)),
                                               handler.metric_throttles(period=core.Duration.minutes(1))]),
                          cw.GraphWidget(title="Lambda-duration",
                                         left=[handler.metric_duration(period=core.Duration.minutes(1))]),
                          )

        board.add_widgets(cw.GraphWidget(title="SQS-Jobs",
                                         left=[sqs_queue.metric_approximate_number_of_messages_visible(
                                             period=core.Duration.minutes(1)
                                         ),
                                               sqs_queue.metric_approximate_number_of_messages_not_visible(
                                                   period=core.Duration.minutes(1)
                                               )]),
                          cw.GraphWidget(title="SQS-DeadLetterQueue",
                                         left=[sqs_queue_DLQ.metric_approximate_number_of_messages_visible(
                                             period=core.Duration.minutes(1)
                                         ),
                                               sqs_queue_DLQ.metric_approximate_number_of_messages_not_visible(
                                                   period=core.Duration.minutes(1)
                                               )]),
                          cw.GraphWidget(title="ERROR/WARNING Logs",
                                         left=[log_metric_ERROR],
                                         right=[log_metric_WARNING]),
                          cw.SingleValueWidget(title="Running/Waiting and Dead Jobs",
                                               metrics=[sqs_queue.metric_approximate_number_of_messages_not_visible(
                                                   period=core.Duration.minutes(1)
                                               ),
                                                        sqs_queue.metric_approximate_number_of_messages_visible(
                                                            period=core.Duration.minutes(1)
                                                        ),
                                                        sqs_queue_DLQ.metric_approximate_number_of_messages_not_visible(
                                                            period=core.Duration.minutes(1)
                                                        ),
                                                        sqs_queue_DLQ.metric_approximate_number_of_messages_visible(
                                                            period=core.Duration.minutes(1)
                                                        )],
                                               height=6)
                          )
        # Alarm for queue - DLQ
        alarm_DLQ = cw.Alarm(self, "SQS_DLQ",
                             alarm_name="s3-migration-serverless-SQS Dead Letter Queue",
                             metric=sqs_queue_DLQ.metric_approximate_number_of_messages_visible(),
                             threshold=0,
                             comparison_operator=cw.ComparisonOperator.GREATER_THAN_THRESHOLD,
                             evaluation_periods=1,
                             datapoints_to_alarm=1)
        alarm_topic = sns.Topic(self, "SQS queue-DLQ has dead letter")
        alarm_topic.add_subscription(subscription=sub.EmailSubscription(alarm_email))
        alarm_DLQ.add_alarm_action(action.SnsAction(alarm_topic))

        # Alarm for queue empty, i.e. no visible message and no in-visible message
        # metric_all_message = cw.MathExpression(
        #     expression="a + b",
        #     label="empty_queue_expression",
        #     using_metrics={
        #         "a": sqs_queue.metric_approximate_number_of_messages_visible(),
        #         "b": sqs_queue.metric_approximate_number_of_messages_not_visible()
        #     }
        # )
        # alarm_0 = cw.Alarm(self, "SQSempty",
        #                    alarm_name="SQS queue empty-Serverless",
        #                    metric=metric_all_message,
        #                    threshold=0,
        #                    comparison_operator=cw.ComparisonOperator.LESS_THAN_OR_EQUAL_TO_THRESHOLD,
        #                    evaluation_periods=3,
        #                    datapoints_to_alarm=3,
        #                    treat_missing_data=cw.TreatMissingData.IGNORE
        #                    )
        # alarm_topic = sns.Topic(self, "SQS queue empty-Serverless")
        # alarm_topic.add_subscription(subscription=sub.EmailSubscription(alarm_email))
        # alarm_0.add_alarm_action(action.SnsAction(alarm_topic))

        # core.CfnOutput(self, "Alarm", value="CloudWatch SQS queue empty Alarm for Serverless: " + alarm_email)
        core.CfnOutput(self, "Dashboard", value="CloudWatch Dashboard name s3_migrate_serverless")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Tag all constructs with the project for easy billing drilldown,
        # filtering, and organization.
        core.Tags.of(self).add('project', 'MediaTranscription')

        # Media files bucket
        media_bucket = s3.Bucket(
            self,
            'media-transcription-bucket',
            encryption=s3.BucketEncryption.S3_MANAGED,
        )

        # SQS queue for media files bucket event notifications
        media_bucket_event_queue = sqs.Queue(
            self,
            'media-transcription-event-notification-queue',
            queue_name='media-transcription-event-notification-queue',
            visibility_timeout=core.Duration.seconds(60),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3,
                queue=sqs.Queue(
                    self,
                    'media-transcription-event-notifications-dlq',
                    queue_name='media-transcription-event-notifications-dlq',
                )),
        )

        # S3 object created notifications sent to SQS queue
        media_bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            s3n.SqsDestination(media_bucket_event_queue),
            *[s3.NotificationKeyFilter(prefix='media-input/')],
        )

        # Lambda function to create/submit Transcribe jobs
        transcribe_job_init_fn = lambda_.Function(
            self,
            'transcribe-job-init-fn',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset(
                '../lambdas/transcribe-job-init-fn',
                # The following is just dumb.
                # The Lambda runtime doesn't use the latest boto3 by default.
                # In order to use the latest boto3, we have to pip install
                # and bundle locally using Docker.
                # Q: Why need the latest boto3?
                # A: https://github.com/boto/boto3/issues/2630
                # I'll have to delete the ECR containers to avoid cost.
                # TODO: Revert back to normal in like a month I guess.
                bundling={
                    'image':
                    lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    'command': [
                        'bash', '-c',
                        '\n        pip install -r requirements.txt -t /asset-output &&\n        cp -au . /asset-output\n        '
                    ]
                }),
            handler='fn.handler',
            reserved_concurrent_executions=1,  # Effectively single-threaded
        )
        # Triggered by SQS messages created for media file puts
        transcribe_job_init_fn.add_event_source(
            les.SqsEventSource(
                queue=media_bucket_event_queue,
                batch_size=5,
                enabled=True,
            ))
        # Grant access to start transcription jobs
        transcribe_job_init_fn.add_to_role_policy(
            statement=iam.PolicyStatement(
                actions=[
                    'transcribe:StartTranscriptionJob',
                ],
                resources=['*'],
                effect=iam.Effect.ALLOW,
            ))

        # Grant Lambda role to read and write to input and output portions of
        # the S3 bucket.
        # Q: Why grant Lambda the permissions instead of Transcribe service?
        # A: Two-fold:
        #   -  i) https://amzn.to/321Nx5I
        #   - ii) Granting just to this Lambda means other Transcribe jobs
        #         across the account cannot use this bucket (least privilege).
        media_bucket.grant_read(
            identity=transcribe_job_init_fn.grant_principal,
            objects_key_pattern='media-input/*')
        # Cannot specify a prefix for writes as Transcribe will not accept
        # a job unless it has write permission on the whole bucket.
        # Edit: The above statement was when I had to use '*' for writes. But
        #       now, I granted access to that .write_access_check_file.temp
        #       file and it seems to all work now?
        media_bucket.grant_write(
            identity=transcribe_job_init_fn.grant_principal,
            objects_key_pattern='transcribe-output-raw/*')
        # This is just as frustrating to you as it is to me.
        media_bucket.grant_write(
            identity=transcribe_job_init_fn.grant_principal,
            objects_key_pattern='.write_access_check_file.temp')

        # DynamoDB table for Jobs metadata
        jobs_metadata_table = ddb.Table(
            self,
            'MediaTranscription-TranscriptionJobs',
            table_name='MediaTranscription-TranscriptionJobs',
            partition_key=ddb.Attribute(
                name='Bucket-Key-ETag',
                type=ddb.AttributeType.STRING,
            ),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
        )
        jobs_metadata_table.grant(transcribe_job_init_fn.grant_principal, *[
            'dynamodb:GetItem',
            'dynamodb:PutItem',
        ])

        # Create IAM Group with read/write permissions to S3 bucket
        # TODO: Make this more federated and robust
        console_users_group = iam.Group(self, 'MediaTranscriptionConsoleUsers')
        console_users_group.attach_inline_policy(policy=iam.Policy(
            self,
            'MediaTranscriptionConsoleUserS3Access',
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        's3:ListBucket',
                    ],
                    resources=[
                        media_bucket.bucket_arn,
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        's3:GetObject',
                        's3:PutObject',
                    ],
                    resources=[
                        media_bucket.arn_for_objects('media-input/*'),
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        's3:GetObject',
                    ],
                    resources=[
                        media_bucket.arn_for_objects(
                            'transcribe-output-raw/*'),
                    ],
                ),
            ],
        ))
    def __init__(self, scope: core.Construct, id: str, instance_id: str,
                 contact_flow_id: str, source_phone_number: str, timeout: int,
                 **kwargs):
        super().__init__(scope, id, **kwargs)

        web_bucket = _s3.Bucket(self,
                                "StaticWebBucket",
                                website_index_document="index.html",
                                website_error_document="index.html",
                                removal_policy=core.RemovalPolicy.DESTROY,
                                public_read_access=True)

        core.CfnOutput(self,
                       'WebBucketUrl',
                       value=web_bucket.bucket_domain_name)

        web_distribution = _clf.CloudFrontWebDistribution(
            self,
            'StaticWebDistribution',
            origin_configs=[
                _clf.SourceConfiguration(
                    s3_origin_source=_clf.S3OriginConfig(
                        s3_bucket_source=web_bucket),
                    behaviors=[_clf.Behavior(is_default_behavior=True)])
            ],
            viewer_protocol_policy=_clf.ViewerProtocolPolicy.REDIRECT_TO_HTTPS)

        _s3d.BucketDeployment(
            self,
            "S3StaticWebContentDeploymentWithInvalidation",
            sources=[
                _s3d.Source.asset(
                    f"{pathlib.Path(__file__).parent.absolute()}/site-content/build"
                )
            ],
            destination_bucket=web_bucket,
            distribution=web_distribution,
            distribution_paths=["/*"])

        file_bucket = _s3.Bucket(self,
                                 "FileBucket",
                                 removal_policy=core.RemovalPolicy.DESTROY)

        call_dead_letter_queue = _sqs.Queue(self,
                                            "CallDeadLetterQueue",
                                            fifo=True,
                                            content_based_deduplication=True)

        call_sqs_queue = _sqs.Queue(
            self,
            "CallSqsQueue",
            fifo=True,
            content_based_deduplication=True,
            visibility_timeout=core.Duration.seconds(120),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=1, queue=call_dead_letter_queue))

        async_call_dead_letter_queue = _sqs.Queue(
            self,
            "AsyncCallDeadLetterQueue",
            fifo=True,
            content_based_deduplication=True)

        async_callout_queue = _sqs.Queue(
            self,
            "AsyncCalloutQueue",
            fifo=True,
            content_based_deduplication=True,
            visibility_timeout=core.Duration.seconds(120),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=1, queue=async_call_dead_letter_queue))

        call_job_complete_sns_topic = _sns.Topic(
            self, "CallJobCompleteSnsTopic", display_name="CallJobCompletion")

        call_result_table = _dynamodb.Table(
            self,
            "CallResultDynamodbTable",
            billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamodb.Attribute(
                name="task_id", type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name="receiver_id",
                                         type=_dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        callout_record_table = _dynamodb.Table(
            self,
            "CallTaskDynamodbTable",
            billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamodb.Attribute(
                name="task_id", type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name="created_at",
                                         type=_dynamodb.AttributeType.NUMBER),
            removal_policy=core.RemovalPolicy.DESTROY)
        callout_record_table.add_global_secondary_index(
            partition_key=_dynamodb.Attribute(
                name='call_type', type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name='created_at',
                                         type=_dynamodb.AttributeType.NUMBER),
            index_name='CallTypeCreatedAtGlobalIndex',
            projection_type=_dynamodb.ProjectionType.ALL)

        python_function_layer = _lambda.LayerVersion(
            self,
            "LambdaPythonFunctionLayer",
            code=_lambda.Code.asset("aws_callouts_cdk/layer/_python"),
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_8
            ],
            license="Available under the MIT-0 license")

        nodejs_function_layer = _lambda.LayerVersion(
            self,
            "LambdaNodeJsFunctionLayer",
            code=_lambda.Code.asset("aws_callouts_cdk/layer/_nodejs"),
            compatible_runtimes=[
                _lambda.Runtime.NODEJS_10_X, _lambda.Runtime.NODEJS_12_X
            ],
            license="Available under the MIT-0 license")

        global_python_function_arguments = {
            "code": _lambda.Code.asset("aws_callouts_cdk/src/python"),
            "layers": [python_function_layer],
            "runtime": _lambda.Runtime.PYTHON_3_7
        }

        global_nodeje_function_arguments = {
            "code": _lambda.Code.asset("aws_callouts_cdk/src/nodejs"),
            "layers": [nodejs_function_layer],
            "runtime": _lambda.Runtime.NODEJS_12_X
        }

        get_callout_job_function = _lambda.Function(
            self,
            "GetCalloutJobFunction",
            handler="get_call_job.lambda_handler",
            **global_python_function_arguments)
        get_callout_job_function.add_environment(key="S3Bucket",
                                                 value=file_bucket.bucket_name)
        file_bucket.grant_read(get_callout_job_function)

        callout_function = _lambda.Function(self,
                                            "CalloutFunction",
                                            handler="send_call.lambda_handler",
                                            **global_python_function_arguments)
        callout_function.add_environment(
            key="ContactFlowArn",
            value=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}/contact-flow/{contact_flow_id}"
        )
        callout_function.add_environment(key="SourcePhoneNumber",
                                         value=source_phone_number)
        callout_function.add_environment(key="ExcelFileBucket",
                                         value=file_bucket.bucket_name)
        callout_function.add_environment(key="AsynCalloutQueueUrl",
                                         value=async_callout_queue.queue_url)
        callout_function.add_to_role_policy(statement=_iam.PolicyStatement(
            resources=[
                f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}/*"
            ],
            actions=["connect:StartOutboundVoiceContact"]))
        callout_function.add_event_source(source=_les.SqsEventSource(
            queue=async_callout_queue, batch_size=1))
        'arn:aws:connect:751225572132:ap-southeast-2:instance/9d0c7cc5-7d2a-42e4-a3dd-70f402e0d040'
        file_bucket.grant_read_write(callout_function)

        response_handler_function = _lambda.Function(
            self,
            "ResponseHandlerFunction",
            handler="response_handler.lambda_handler",
            **global_python_function_arguments)
        response_handler_function.add_permission(
            id="ResponseHandlerFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        send_task_success_function = _lambda.Function(
            self,
            "SendTaskSuccessFunction",
            handler="send_task_success.lambda_handler",
            **global_python_function_arguments)
        send_task_success_function.add_permission(
            id="SendTaskSuccessFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        get_call_result_function = _lambda.Function(
            self,
            "GetCallResultFunction",
            handler="get_call_result.lambda_handler",
            memory_size=512,
            **global_python_function_arguments)
        get_call_result_function.add_environment(
            key="CallResultDynamoDBTable", value=call_result_table.table_name)
        get_call_result_function.add_environment(key="S3Bucket",
                                                 value=file_bucket.bucket_name)
        call_result_table.grant_read_data(grantee=get_call_result_function)
        file_bucket.grant_read_write(get_call_result_function)

        iterator_function = _lambda.Function(
            self,
            "IteratorFunction",
            handler="iterator.lambda_handler",
            **global_python_function_arguments)
        iterator_function.add_permission(
            id="IteratorFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        create_appsync_call_task_function = _lambda.Function(
            self,
            "CreateAppSyncCallTaskFunction",
            handler="create_appsync_call_task.lambda_handler",
            **global_nodeje_function_arguments)
        create_appsync_call_task_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        create_appsync_call_task_function.add_environment(
            key="CallRecordTableName", value=callout_record_table.table_name)
        call_sqs_queue.grant_send_messages(create_appsync_call_task_function)
        callout_record_table.grant_write_data(
            create_appsync_call_task_function)

        create_call_report_record_function = _lambda.Function(
            self,
            "CreateCallReportRecordFunction",
            handler="create_call_report_record.lambda_handler",
            **global_nodeje_function_arguments)

        create_excel_call_task_function = _lambda.Function(
            self,
            "CreateExcelCallTaskFunction",
            handler="create_excel_call_task.lambda_handler",
            **global_python_function_arguments)
        create_excel_call_task_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        call_sqs_queue.grant_send_messages(create_excel_call_task_function)

        create_excel_call_task_function.add_event_source(
            source=_les.S3EventSource(bucket=file_bucket,
                                      events=[_s3.EventType.OBJECT_CREATED],
                                      filters=[
                                          _s3.NotificationKeyFilter(
                                              prefix="call_task",
                                              suffix=".xlsx")
                                      ]))

        start_callout_flow_function = _lambda.Function(
            self,
            "StartCalloutFlowFunction",
            handler="start_call_out_flow.lambda_handler",
            reserved_concurrent_executions=1,
            **global_python_function_arguments)
        start_callout_flow_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        start_callout_flow_function.add_environment(
            key="ResponseHandlerFunctionArn",
            value=response_handler_function.function_arn)
        start_callout_flow_function.add_environment(
            key="IteratorFunctionArn", value=iterator_function.function_arn)
        start_callout_flow_function.add_environment(
            key="SendTaskSuccessFunctionArn",
            value=send_task_success_function.function_arn)
        start_callout_flow_function.add_environment(
            key="S3Bucket", value=file_bucket.bucket_name)
        start_callout_flow_function.add_event_source(
            source=_les.SqsEventSource(queue=call_sqs_queue, batch_size=1))
        file_bucket.grant_read_write(start_callout_flow_function)

        call_state_machine_definition = {
            "Comment":
            "Reading messages from an SQS queue and iteratively processing each message.",
            "StartAt": "Start",
            "States": {
                "Start": {
                    "Type": "Pass",
                    "Next": "Process Call Messages"
                },
                "Process Call Messages": {
                    "Type": "Map",
                    "Next": "Get Call Result",
                    "InputPath": "$",
                    "ItemsPath": "$",
                    "OutputPath": "$.[0]",
                    "Iterator": {
                        "StartAt": "Get Call out job",
                        "States": {
                            "Get Call out job": {
                                "Type": "Task",
                                "Resource":
                                get_callout_job_function.function_arn,
                                "Next": "Callout with AWS Connect"
                            },
                            "Callout with AWS Connect": {
                                "Type":
                                "Task",
                                "Resource":
                                "arn:aws:states:::sqs:sendMessage.waitForTaskToken",
                                "TimeoutSeconds":
                                timeout,
                                "Parameters": {
                                    "QueueUrl": async_callout_queue.queue_url,
                                    "MessageGroupId": "1",
                                    "MessageBody": {
                                        "Message.$": "$",
                                        "TaskToken.$": "$$.Task.Token"
                                    }
                                },
                                "Catch": [{
                                    "ErrorEquals": ["States.Timeout"],
                                    "ResultPath": None,
                                    "Next": "Call Timeout"
                                }],
                                "Next":
                                "Save call result"
                            },
                            "Call Timeout": {
                                "Type": "Pass",
                                "ResultPath": None,
                                "Next": "Save call result"
                            },
                            "Save call result": {
                                "Type": "Task",
                                "Resource":
                                "arn:aws:states:::dynamodb:putItem",
                                "Parameters": {
                                    "TableName": call_result_table.table_name,
                                    "Item": {
                                        "receiver_id": {
                                            "S.$": "$.receiver_id"
                                        },
                                        "task_id": {
                                            "S.$": "$.task_id"
                                        },
                                        "username": {
                                            "S.$": "$.username"
                                        },
                                        "phone_number": {
                                            "S.$": "$.phone_number"
                                        },
                                        "status": {
                                            "S.$": "$.status"
                                        },
                                        "answers": {
                                            "S.$": "$.answers"
                                        },
                                        "error": {
                                            "S.$": "$.error"
                                        },
                                        "call_at": {
                                            "S.$": "$.call_at"
                                        }
                                    }
                                },
                                "ResultPath": "$.Result",
                                "OutputPath": "$.task_id",
                                "End": True
                            }
                        }
                    }
                },
                "Get Call Result": {
                    "Type": "Task",
                    "Resource": get_call_result_function.function_arn,
                    "Next": "Create Call Report Record"
                },
                "Create Call Report Record": {
                    "Type": "Task",
                    "Resource":
                    create_call_report_record_function.function_arn,
                    "Next": "Send Completion message to SNS"
                },
                "Send Completion message to SNS": {
                    "Type": "Task",
                    "Resource": "arn:aws:states:::sns:publish",
                    "Parameters": {
                        "TopicArn": call_job_complete_sns_topic.topic_arn,
                        "Message.$": "$"
                    },
                    "Next": "Finish"
                },
                "Finish": {
                    "Type": "Succeed"
                }
            }
        }
        callout_state_machine_role = _iam.Role(
            self,
            "CalloutStatesExecutionRole",
            assumed_by=_iam.ServicePrincipal(
                f"states.{self.region}.amazonaws.com"))
        callout_state_machine_role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    "sqs:SendMessage", "dynamodb:PutItem",
                    "lambda:InvokeFunction", "SNS:Publish"
                ],
                resources=[
                    async_callout_queue.queue_arn, call_result_table.table_arn,
                    get_callout_job_function.function_arn,
                    get_call_result_function.function_arn,
                    call_job_complete_sns_topic.topic_arn,
                    create_appsync_call_task_function.function_arn,
                    create_call_report_record_function.function_arn
                ]))
        callout_state_machine = _sfn.CfnStateMachine(
            self,
            "CalloutStateMachine",
            role_arn=callout_state_machine_role.role_arn,
            definition_string=json.dumps(call_state_machine_definition))
        send_task_success_function.add_to_role_policy(
            _iam.PolicyStatement(actions=["states:SendTaskSuccess"],
                                 resources=[callout_state_machine.ref]))

        start_callout_flow_function.add_environment(
            key="CalloutStateMachineArn", value=callout_state_machine.ref)
        start_callout_flow_function.add_to_role_policy(
            _iam.PolicyStatement(effect=_iam.Effect.ALLOW,
                                 resources=[callout_state_machine.ref],
                                 actions=['states:StartExecution']))

        user_pool = _cognito.UserPool(
            self, "UserPool", sign_in_type=_cognito.SignInType.USERNAME)

        user_pool_client = _cognito.UserPoolClient(self,
                                                   "UserPoolClient",
                                                   user_pool=user_pool)

        appsync_api = _appsync.GraphQLApi(
            self,
            "AppSyncApi",
            name="AWSCalloutApi",
            user_pool_config=_appsync.UserPoolConfig(
                user_pool=user_pool,
                default_action=_appsync.UserPoolDefaultAction.ALLOW),
            log_config=_appsync.LogConfig(
                field_log_level=_appsync.FieldLogLevel.ALL),
            schema_definition_file=
            f"{pathlib.Path(__file__).parent.absolute()}/schema.graphql")

        callout_record_ddb_ds = appsync_api.add_dynamo_db_data_source(
            name="CalloutRecordDdb",
            description="Callout Record DynamoDB Data Source",
            table=callout_record_table)
        callout_record_ddb_ds.create_resolver(
            type_name="Query",
            field_name="getLatestCallTaskRecords",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"Query","index":"CallTypeCreatedAtGlobalIndex","query":{"expression":"call_type = :call_type","expressionValues":{":call_type":{"S":"TASK"}}},"scanIndexForward":false,"limit":${ctx.args.limit}}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_list())
        callout_record_ddb_ds.create_resolver(
            type_name="Query",
            field_name="getLatestCallReportRecords",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"Query","index":"CallTypeCreatedAtGlobalIndex","query":{"expression":"call_type = :call_type","expressionValues":{":call_type":{"S":"REPORT"}}},"scanIndexForward":false,"limit":${ctx.args.limit}}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_list())
        callout_record_ddb_ds.create_resolver(
            type_name="Mutation",
            field_name="createCallReport",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"PutItem","key":{"task_id":{"S":"${ctx.args.report.task_id}"},"created_at":{"N":"${ctx.args.report.created_at}"}},"attributeValues":$util.dynamodb.toMapValuesJson($ctx.args.report)}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_item())

        call_task_lambda_ds = appsync_api.add_lambda_data_source(
            name="CallTaskLambda",
            description="Call Task Lambda Data Source",
            lambda_function=create_appsync_call_task_function)
        call_task_lambda_ds.create_resolver(
            type_name="Mutation",
            field_name="createCallTask",
            request_mapping_template=_appsync.MappingTemplate.lambda_request(
                "$utils.toJson($ctx.args)"),
            response_mapping_template=_appsync.MappingTemplate.lambda_result())

        create_call_report_record_function.add_environment(
            value=appsync_api.graph_ql_url, key="AppSyncGraphQlApiUrl")

        create_call_report_record_function.add_to_role_policy(
            statement=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=['appsync:GraphQL'],
                resources=[
                    f"{appsync_api.arn}/types/Mutation/fields/createCallReport"
                ]))

        core.CfnOutput(self,
                       id="OutputCallSqsQueue",
                       value=call_sqs_queue.queue_arn)
        core.CfnOutput(self,
                       id="OutputCallJobCompletionSNSTopic",
                       value=call_job_complete_sns_topic.topic_arn)
        core.CfnOutput(self,
                       id="OutputExcelFileS3Bucket",
                       value=file_bucket.bucket_name)
        core.CfnOutput(self,
                       id="OutputStaticWebS3Bucket",
                       value=web_bucket.bucket_name)
        core.CfnOutput(self,
                       id="OutputStaticWebUrl",
                       value=web_bucket.bucket_website_url)

        identity_pool = _cognito.CfnIdentityPool(
            self,
            "IdentityPool",
            allow_unauthenticated_identities=True,
            cognito_identity_providers=[
                _cognito.CfnIdentityPool.CognitoIdentityProviderProperty(
                    provider_name=user_pool.user_pool_provider_name,
                    client_id=user_pool_client.user_pool_client_id)
            ])
        identity_pool_unauthorized_role = _iam.Role(
            self,
            'IdentityPoolUnAuthorizedRole',
            assumed_by=_iam.FederatedPrincipal(
                federated="cognito-identity.amazonaws.com",
                assume_role_action="sts:AssumeRoleWithWebIdentity",
                conditions={
                    "StringEquals": {
                        "cognito-identity.amazonaws.com:aud": identity_pool.ref
                    },
                    "ForAnyValue:StringLike": {
                        "cognito-identity.amazonaws.com:amr": "unauthenticated"
                    }
                }))
        identity_pool_unauthorized_role.add_to_policy(
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["appsync:GraphQL"],
                resources=[
                    f"{appsync_api.arn}/types/*",
                    # f"{appsync_api.arn}/types/Query/fields/getLatestCallTaskRecords",
                    # f"{appsync_api.arn}/types/Query/fields/getLatestCallReportRecords",
                    # f"{appsync_api.arn}/types/Mutation/fields/createCallRecord",
                    # f"{appsync_api.arn}/types/Subscription/fields/createCallTask",
                    # f"{appsync_api.arn}/types/Subscription/fields/createCallReport"
                ]))

        _cognito.CfnIdentityPoolRoleAttachment(
            self,
            "CognitoIdentityPoolRoleAttachment",
            identity_pool_id=identity_pool.ref,
            roles={
                "unauthenticated": identity_pool_unauthorized_role.role_arn
            })

        core.CfnOutput(self, id="UserPoolId", value=user_pool.user_pool_id)
        core.CfnOutput(self,
                       id="UserPoolClientId",
                       value=user_pool_client.user_pool_client_id)
        core.CfnOutput(self, id="IdentityPoolId", value=identity_pool.ref)
Exemple #25
0
    def __init__(self, scope: core.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.poc_config = {'api_poc': dict()}
        self.read_config()

        # shared stuff
        self._vpc = ec2.Vpc(
            self,
            'api_poc-vpc',
            cidr='10.0.0.0/23',
            max_azs=1,
            nat_gateways=1,
        )

        self._private_subnet_selection = self._vpc.select_subnets(
            subnet_type=ec2.SubnetType.PRIVATE)
        self._security_group = ec2.SecurityGroup.from_security_group_id(
            self,
            'default_sg',
            security_group_id=self._vpc.vpc_default_security_group)

        self._security_group.add_ingress_rule(description='redis',
                                              peer=self._security_group,
                                              connection=ec2.Port.tcp_range(
                                                  start_port=6379,
                                                  end_port=6379))

        self._python3_lib_layer = _lambda.LayerVersion(
            self,
            'python3-lib-layer',
            description="python3 module dependencies",
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_6
            ],
            code=_lambda.Code.from_asset('layers/python3-lib-layer.zip'))

        # redis cache cluster
        self._cache_subnet_group = elasticache.CfnSubnetGroup(
            self,
            'cache_subnet_group',
            description='elasticache subnet group',
            subnet_ids=self._private_subnet_selection.subnet_ids,
            cache_subnet_group_name='cache-subnet-group')

        self._redis_cache = elasticache.CfnCacheCluster(
            self,
            'cache',
            cache_node_type='cache.t2.micro',
            num_cache_nodes=1,
            engine='redis',
            cache_subnet_group_name='cache-subnet-group',
            vpc_security_group_ids=[self._security_group.security_group_id],
        )
        self._redis_cache.add_depends_on(self._cache_subnet_group)

        # external API simulator lambda
        api_handler = _lambda.Function(
            self,
            "external-api",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='external_api.handler',
            layers=[self._python3_lib_layer],
            vpc=self._vpc,
            vpc_subnets=self._private_subnet_selection,
            security_group=self._security_group,
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE)
        api_handler.add_environment('REDIS_ADDRESS', self.redis_address)
        api_handler.add_environment('REDIS_PORT', self.redis_port)

        # API Gateway frontend to simulator lambda
        self._api_gateway = apigw.LambdaRestApi(
            self,
            'external_api',
            description='external API emulator',
            options=apigw.StageOptions(stage_name='dev'),
            handler=api_handler,
            proxy=True)

        job_dlq = sqs.Queue(self, 'job-dlq')

        job_queue = sqs.Queue(self,
                              'job-queue',
                              dead_letter_queue=sqs.DeadLetterQueue(
                                  queue=job_dlq, max_receive_count=3))

        throttle_event_topic = sns.Topic(self, 'throttle-events-topic')

        self.add_sns_subscriptions(throttle_event_topic)

        worker = _lambda.Function(self,
                                  'worker',
                                  runtime=_lambda.Runtime.PYTHON_3_7,
                                  code=_lambda.Code.asset('lambda'),
                                  handler='worker.handler',
                                  layers=[self._python3_lib_layer],
                                  reserved_concurrent_executions=20,
                                  timeout=core.Duration.minutes(1),
                                  vpc=self._vpc,
                                  vpc_subnets=self._private_subnet_selection,
                                  security_group=self._security_group,
                                  log_retention=logs.RetentionDays.FIVE_DAYS,
                                  tracing=_lambda.Tracing.ACTIVE,
                                  dead_letter_queue_enabled=False)
        worker.add_environment('API_KEY', '212221848ab214821de993a9d')
        worker.add_environment('JOB_QUEUE_URL', job_queue.queue_url)
        worker.add_environment('THROTTLE_EVENTS_TOPIC',
                               throttle_event_topic.topic_arn)
        worker.add_environment('REDIS_ADDRESS', self.redis_address)
        worker.add_environment('REDIS_PORT', self.redis_port)
        job_queue.grant_send_messages(worker)
        throttle_event_topic.grant_publish(worker)

        orchestrator = _lambda.Function(
            self,
            'orchestrator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='orchestrator.handler',
            layers=[self._python3_lib_layer],
            reserved_concurrent_executions=1,
            timeout=core.Duration.minutes(2),
            vpc=self._vpc,
            vpc_subnets=self._private_subnet_selection,
            security_group=self._security_group,
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE,
        )
        orchestrator.add_environment('API_HOST_URL', self._api_gateway.url)
        orchestrator.add_environment('JOB_QUEUE_URL', job_queue.queue_url)
        orchestrator.add_environment('JOB_DLQ_URL', job_dlq.queue_url)
        orchestrator.add_environment('THROTTLE_EVENTS_TOPIC',
                                     throttle_event_topic.topic_arn)
        orchestrator.add_environment('REDIS_ADDRESS', self.redis_address)
        orchestrator.add_environment('REDIS_PORT', self.redis_port)
        orchestrator.add_environment('WORKER_FUNCTION_ARN',
                                     worker.function_arn)
        job_queue.grant_consume_messages(orchestrator)
        job_dlq.grant_send_messages(orchestrator)
        throttle_event_topic.grant_publish(orchestrator)
        worker.grant_invoke(orchestrator)

        task_master = _lambda.Function(
            self,
            'task_master',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='task_master.handler',
            layers=[self._python3_lib_layer],
            reserved_concurrent_executions=1,
            vpc=self._vpc,
            vpc_subnets=self._private_subnet_selection,
            security_group=self._security_group,
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE,
        )
        task_master.add_environment('SQS_URL', job_queue.queue_url)
        task_master.add_environment('REDIS_ADDRESS', self.redis_address)
        task_master.add_environment('REDIS_PORT', self.redis_port)
        task_master.add_environment('API_HOST_URL', self._api_gateway.url)
        job_queue.grant_send_messages(task_master)

        slack_notify = _lambda.Function(
            self,
            'slack-notify',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset('lambda'),
            handler='slack_notify.lambda_handler',
            log_retention=logs.RetentionDays.FIVE_DAYS,
            tracing=_lambda.Tracing.ACTIVE,
        )
        # lambda uses ssm parameter store to retrieve values
        slack_notify.add_environment('encryptedHookUrlKey',
                                     '/api_poc/notify/slack/hook_url')
        slack_notify.add_environment('slackChannelKey',
                                     '/api_poc/notify/slack/channel')
        slack_notify.add_environment('notifySlack', 'false')
        slack_notify.add_event_source(
            event_sources.SnsEventSource(throttle_event_topic))
        slack_notify.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                # TODO fix least privilege
                # actions=['ssm:GetParameter'],
                # resources=['arn:aws:ssm:::parameter/api_poc/notify/slack/*'],
                actions=['ssm:*'],
                resources=['*'],
            ))

        # kick off lambda(s) once per interval
        rule = events.Rule(self,
                           'orchestrator_rule',
                           schedule=events.Schedule.rate(
                               core.Duration.hours(1)))
        # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
        rule.add_target(targets.LambdaFunction(orchestrator))
        rule.add_target(targets.LambdaFunction(task_master))

        # stack outputs
        core.CfnOutput(self,
                       'Redis_Address',
                       value=self._redis_cache.attr_redis_endpoint_address +
                       ':' + self._redis_cache.attr_redis_endpoint_port)
Exemple #26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###########################################################################
        # AWS LAMBDA FUNCTIONS
        ###########################################################################
        parse_image_list_file = aws_lambda.Function(
            self,
            'parse_image_list_file',
            handler='parse_image_list_file.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('parse_image_list_file'),
            memory_size=10240,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        list_objects = aws_lambda.Function(
            self,
            'list_objects',
            handler='list_objects.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('list_objects'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        get_size_and_store = aws_lambda.Function(
            self,
            'get_size_and_store',
            handler='get_size_and_store.lambda_handler',
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset('get_size_and_store'),
            memory_size=4096,
            timeout=core.Duration.seconds(300),
            log_retention=aws_logs.RetentionDays.ONE_DAY)

        ###########################################################################
        # AMAZON S3 BUCKETS
        ###########################################################################
        images_bucket = aws_s3.Bucket(self, "images_bucket")

        ###########################################################################
        # LAMBDA SUPPLEMENTAL POLICIES
        ###########################################################################
        lambda_supplemental_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["s3:Get*", "s3:Head*", "s3:List*", "sqs:*", "es:*"],
            resources=["*"])

        parse_image_list_file.add_to_role_policy(
            lambda_supplemental_policy_statement)
        list_objects.add_to_role_policy(lambda_supplemental_policy_statement)
        get_size_and_store.add_to_role_policy(
            lambda_supplemental_policy_statement)

        ###########################################################################
        # AWS SNS TOPICS
        ###########################################################################
        # notification_topic = aws_sns.Topic(self, "notification_topic")

        ###########################################################################
        # ADD AMAZON S3 BUCKET NOTIFICATIONS
        ###########################################################################
        images_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,
            aws_s3_notifications.LambdaDestination(parse_image_list_file))

        ###########################################################################
        # AWS SQS QUEUES
        ###########################################################################
        comprehend_queue_iqueue = aws_sqs.Queue(self,
                                                "comprehend_queue_iqueue")
        comprehend_queue_iqueue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=comprehend_queue_iqueue)
        comprehend_queue = aws_sqs.Queue(
            self,
            "comprehend_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=comprehend_queue_iqueue_dlq)

        rekognition_queue_iqueue = aws_sqs.Queue(self,
                                                 "rekognition_queue_iqueue")
        rekognition_queue_dlq = aws_sqs.DeadLetterQueue(
            max_receive_count=10, queue=rekognition_queue_iqueue)
        rekognition_queue = aws_sqs.Queue(
            self,
            "rekognition_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=rekognition_queue_dlq)

        object_queue_iqueue = aws_sqs.Queue(self, "object_queue_iqueue")
        object_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10,
                                                   queue=object_queue_iqueue)
        object_queue = aws_sqs.Queue(
            self,
            "object_queue",
            visibility_timeout=core.Duration.seconds(301),
            dead_letter_queue=object_queue_dlq)

        ###########################################################################
        # AWS LAMBDA SQS EVENT SOURCE
        ###########################################################################
        get_size_and_store.add_event_source(
            SqsEventSource(object_queue, batch_size=10))

        ###########################################################################
        # AWS ELASTICSEARCH DOMAIN
        ###########################################################################
        s3workflow_domain = aws_elasticsearch.Domain(
            self,
            "s3workflow_domain",
            version=aws_elasticsearch.ElasticsearchVersion.V7_1,
            capacity={
                "master_nodes": 3,
                "data_nodes": 4
            },
            ebs={"volume_size": 100},
            zone_awareness={"availability_zone_count": 2},
            logging={
                "slow_search_log_enabled": True,
                "app_log_enabled": True,
                "slow_index_log_enabled": True
            })

        ###########################################################################
        # AMAZON COGNITO USER POOL
        ###########################################################################
        s3workflow_pool = aws_cognito.UserPool(
            self,
            "s3workflow-pool",
            account_recovery=None,
            auto_verify=None,
            custom_attributes=None,
            email_settings=None,
            enable_sms_role=None,
            lambda_triggers=None,
            mfa=None,
            mfa_second_factor=None,
            password_policy=None,
            self_sign_up_enabled=None,
            sign_in_aliases=aws_cognito.SignInAliases(email=True,
                                                      phone=None,
                                                      preferred_username=None,
                                                      username=True),
            sign_in_case_sensitive=None,
            sms_role=None,
            sms_role_external_id=None,
            standard_attributes=None,
            user_invitation=None,
            user_pool_name=None,
            user_verification=None)

        ###########################################################################
        # AMAZON VPC
        ###########################################################################
        vpc = aws_ec2.Vpc(self, "s3workflowVPC",
                          max_azs=3)  # default is all AZs in region

        ###########################################################################
        # AMAZON ECS CLUSTER
        ###########################################################################
        cluster = aws_ecs.Cluster(self, "s3", vpc=vpc)

        ###########################################################################
        # AMAZON ECS Repositories
        ###########################################################################
        rekognition_repository = aws_ecr.Repository(
            self,
            "rekognition_repository",
            image_scan_on_push=True,
            removal_policy=core.RemovalPolicy("DESTROY"))
        comprehend_repository = aws_ecr.Repository(
            self,
            "comprehend_repository",
            image_scan_on_push=True,
            removal_policy=core.RemovalPolicy("DESTROY"))

        ###########################################################################
        # AMAZON ECS Roles and Policies
        ###########################################################################
        task_execution_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*", "ecr:*"
            ],
            resources=["*"])
        task_execution_policy_document = aws_iam.PolicyDocument()
        task_execution_policy_document.add_statements(
            task_execution_policy_statement)
        task_execution_policy = aws_iam.Policy(
            self,
            "task_execution_policy",
            document=task_execution_policy_document)
        task_execution_role = aws_iam.Role(
            self,
            "task_execution_role",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        task_execution_role.attach_inline_policy(task_execution_policy)

        task_policy_statement = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "logs:*", "xray:*", "sqs:*", "s3:*", "rekognition:*",
                "comprehend:*", "es:*"
            ],
            resources=["*"])
        task_policy_document = aws_iam.PolicyDocument()
        task_policy_document.add_statements(task_policy_statement)
        task_policy = aws_iam.Policy(self,
                                     "task_policy",
                                     document=task_policy_document)
        task_role = aws_iam.Role(
            self,
            "task_role",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))
        task_role.attach_inline_policy(task_policy)

        ###########################################################################
        # AMAZON ECS Task definitions
        ###########################################################################
        rekognition_task_definition = aws_ecs.TaskDefinition(
            self,
            "rekognition_task_definition",
            compatibility=aws_ecs.Compatibility("FARGATE"),
            cpu="1024",
            # ipc_mode=None,
            memory_mib="2048",
            network_mode=aws_ecs.NetworkMode("AWS_VPC"),
            # pid_mode=None,                                      #Not supported in Fargate and Windows containers
            # placement_constraints=None,
            execution_role=task_execution_role,
            # family=None,
            # proxy_configuration=None,
            task_role=task_role
            # volumes=None
        )

        comprehend_task_definition = aws_ecs.TaskDefinition(
            self,
            "comprehend_task_definition",
            compatibility=aws_ecs.Compatibility("FARGATE"),
            cpu="1024",
            # ipc_mode=None,
            memory_mib="2048",
            network_mode=aws_ecs.NetworkMode("AWS_VPC"),
            # pid_mode=None,                                      #Not supported in Fargate and Windows containers
            # placement_constraints=None,
            execution_role=task_execution_role,
            # family=None,
            # proxy_configuration=None,
            task_role=task_role
            # volumes=None
        )

        ###########################################################################
        # AMAZON ECS Images
        ###########################################################################
        rekognition_ecr_image = aws_ecs.EcrImage(
            repository=rekognition_repository, tag="latest")
        comprehend_ecr_image = aws_ecs.EcrImage(
            repository=comprehend_repository, tag="latest")

        ###########################################################################
        # ENVIRONMENT VARIABLES
        ###########################################################################
        environment_variables = {}
        environment_variables["COMPREHEND_QUEUE"] = comprehend_queue.queue_url
        environment_variables[
            "REKOGNITION_QUEUE"] = rekognition_queue.queue_url
        environment_variables["IMAGES_BUCKET"] = images_bucket.bucket_name
        environment_variables[
            "ELASTICSEARCH_HOST"] = s3workflow_domain.domain_endpoint

        parse_image_list_file.add_environment(
            "ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint)
        parse_image_list_file.add_environment("QUEUEURL",
                                              rekognition_queue.queue_url)
        parse_image_list_file.add_environment("DEBUG", "False")
        parse_image_list_file.add_environment("BUCKET", "-")
        parse_image_list_file.add_environment("KEY", "-")

        list_objects.add_environment("QUEUEURL", object_queue.queue_url)
        list_objects.add_environment("ELASTICSEARCH_HOST",
                                     s3workflow_domain.domain_endpoint)
        list_objects.add_environment("S3_BUCKET_NAME",
                                     images_bucket.bucket_name)
        list_objects.add_environment("S3_BUCKET_PREFIX", "images/")
        list_objects.add_environment("S3_BUCKET_SUFFIX", "")
        list_objects.add_environment("LOGGING_LEVEL", "INFO")

        get_size_and_store.add_environment("QUEUEURL", object_queue.queue_url)
        get_size_and_store.add_environment("ELASTICSEARCH_HOST",
                                           s3workflow_domain.domain_endpoint)
        get_size_and_store.add_environment("S3_BUCKET_NAME",
                                           images_bucket.bucket_name)
        get_size_and_store.add_environment("S3_BUCKET_PREFIX", "images/")
        get_size_and_store.add_environment("S3_BUCKET_SUFFIX", "")
        get_size_and_store.add_environment("LOGGING_LEVEL", "INFO")

        ###########################################################################
        # ECS Log Drivers
        ###########################################################################
        rekognition_task_log_driver = aws_ecs.LogDriver.aws_logs(
            stream_prefix="s3workflow",
            log_retention=aws_logs.RetentionDays("ONE_DAY"))
        comprehend_task_log_driver = aws_ecs.LogDriver.aws_logs(
            stream_prefix="s3workflow",
            log_retention=aws_logs.RetentionDays("ONE_DAY"))

        ###########################################################################
        # ECS Task Definitions
        ###########################################################################
        rekognition_task_definition.add_container(
            "rekognition_task_definition",
            image=rekognition_ecr_image,
            memory_reservation_mib=1024,
            environment=environment_variables,
            logging=rekognition_task_log_driver)

        comprehend_task_definition.add_container(
            "comprehend_task_definition",
            image=comprehend_ecr_image,
            memory_reservation_mib=1024,
            environment=environment_variables,
            logging=comprehend_task_log_driver)

        ###########################################################################
        # AWS ROUTE53 HOSTED ZONE
        ###########################################################################
        hosted_zone = aws_route53.HostedZone(
            self,
            "hosted_zone",
            zone_name="s3workflow.com",
            comment="private hosted zone for s3workflow system")
        hosted_zone.add_vpc(vpc)
Exemple #27
0
    def __init__(
        self,
        scope: Construct,
        stack_id: str,
        *,
        botocore_lambda_layer: aws_lambda_python.PythonLayerVersion,
        env_name: str,
        storage_bucket: aws_s3.Bucket,
        validation_results_table: Table,
    ) -> None:
        # pylint: disable=too-many-locals, too-many-statements

        super().__init__(scope, stack_id)

        ############################################################################################
        # PROCESSING ASSETS TABLE
        processing_assets_table = Table(
            self,
            f"{env_name}-processing-assets",
            env_name=env_name,
            parameter_name=ParameterName.PROCESSING_ASSETS_TABLE_NAME,
            sort_key=aws_dynamodb.Attribute(name="sk", type=aws_dynamodb.AttributeType.STRING),
        )

        ############################################################################################
        # BATCH JOB DEPENDENCIES
        batch_job_queue = BatchJobQueue(
            self,
            "batch-job-queue",
            env_name=env_name,
            processing_assets_table=processing_assets_table,
        ).job_queue

        s3_read_only_access_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name(
            "AmazonS3ReadOnlyAccess"
        )

        ############################################################################################
        # UPDATE CATALOG UPDATE MESSAGE QUEUE

        dead_letter_queue = aws_sqs.Queue(
            self,
            "dead-letter-queue",
            visibility_timeout=LAMBDA_TIMEOUT,
        )

        self.message_queue = aws_sqs.Queue(
            self,
            "update-catalog-message-queue",
            visibility_timeout=LAMBDA_TIMEOUT,
            dead_letter_queue=aws_sqs.DeadLetterQueue(max_receive_count=3, queue=dead_letter_queue),
        )
        self.message_queue_name_parameter = aws_ssm.StringParameter(
            self,
            "update-catalog-message-queue-name",
            string_value=self.message_queue.queue_name,
            description=f"Update Catalog Message Queue Name for {env_name}",
            parameter_name=ParameterName.UPDATE_CATALOG_MESSAGE_QUEUE_NAME.value,
        )

        populate_catalog_lambda = BundledLambdaFunction(
            self,
            "populate-catalog-bundled-lambda-function",
            directory="populate_catalog",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
            botocore_lambda_layer=botocore_lambda_layer,
        )

        self.message_queue.grant_consume_messages(populate_catalog_lambda)
        populate_catalog_lambda.add_event_source(
            SqsEventSource(self.message_queue, batch_size=1)  # type: ignore[arg-type]
        )

        ############################################################################################
        # STATE MACHINE TASKS

        check_stac_metadata_task = LambdaTask(
            self,
            "check-stac-metadata-task",
            directory="check_stac_metadata",
            botocore_lambda_layer=botocore_lambda_layer,
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        assert check_stac_metadata_task.lambda_function.role
        check_stac_metadata_task.lambda_function.role.add_managed_policy(
            policy=s3_read_only_access_policy
        )

        for table in [processing_assets_table, validation_results_table]:
            table.grant_read_write_data(check_stac_metadata_task.lambda_function)
            table.grant(
                check_stac_metadata_task.lambda_function,
                "dynamodb:DescribeTable",
            )

        content_iterator_task = LambdaTask(
            self,
            "content-iterator-task",
            directory="content_iterator",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path=f"$.{CONTENT_KEY}",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )

        check_files_checksums_directory = "check_files_checksums"
        check_files_checksums_default_payload_object = {
            f"{DATASET_ID_KEY}.$": f"$.{DATASET_ID_KEY}",
            f"{VERSION_ID_KEY}.$": f"$.{VERSION_ID_KEY}",
            f"{METADATA_URL_KEY}.$": f"$.{METADATA_URL_KEY}",
            f"{FIRST_ITEM_KEY}.$": f"$.{CONTENT_KEY}.{FIRST_ITEM_KEY}",
            f"{ASSETS_TABLE_NAME_KEY}.$": f"$.{CONTENT_KEY}.{ASSETS_TABLE_NAME_KEY}",
            f"{RESULTS_TABLE_NAME_KEY}.$": f"$.{CONTENT_KEY}.{RESULTS_TABLE_NAME_KEY}",
        }
        check_files_checksums_single_task = BatchSubmitJobTask(
            self,
            "check-files-checksums-single-task",
            env_name=env_name,
            directory=check_files_checksums_directory,
            s3_policy=s3_read_only_access_policy,
            job_queue=batch_job_queue,
            payload_object=check_files_checksums_default_payload_object,
            container_overrides_command=[
                "--dataset-id",
                f"Ref::{DATASET_ID_KEY}",
                "--version-id",
                f"Ref::{VERSION_ID_KEY}",
                "--first-item",
                f"Ref::{FIRST_ITEM_KEY}",
                "--assets-table-name",
                f"Ref::{ASSETS_TABLE_NAME_KEY}",
                "--results-table-name",
                f"Ref::{RESULTS_TABLE_NAME_KEY}",
            ],
        )
        array_size = int(
            aws_stepfunctions.JsonPath.number_at(f"$.{CONTENT_KEY}.{ITERATION_SIZE_KEY}")
        )
        check_files_checksums_array_task = BatchSubmitJobTask(
            self,
            "check-files-checksums-array-task",
            env_name=env_name,
            directory=check_files_checksums_directory,
            s3_policy=s3_read_only_access_policy,
            job_queue=batch_job_queue,
            payload_object=check_files_checksums_default_payload_object,
            container_overrides_command=[
                "--dataset-id",
                f"Ref::{DATASET_ID_KEY}",
                "--version-id",
                f"Ref::{VERSION_ID_KEY}",
                "--first-item",
                f"Ref::{FIRST_ITEM_KEY}",
                "--assets-table-name",
                f"Ref::{ASSETS_TABLE_NAME_KEY}",
                "--results-table-name",
                f"Ref::{RESULTS_TABLE_NAME_KEY}",
            ],
            array_size=array_size,
        )

        for reader in [
            content_iterator_task.lambda_function,
            check_files_checksums_single_task.job_role,
            check_files_checksums_array_task.job_role,
        ]:
            processing_assets_table.grant_read_data(reader)  # type: ignore[arg-type]
            processing_assets_table.grant(
                reader, "dynamodb:DescribeTable"  # type: ignore[arg-type]
            )

        for writer in [
            check_files_checksums_single_task.job_role,
            check_files_checksums_array_task.job_role,
        ]:
            validation_results_table.grant_read_write_data(writer)  # type: ignore[arg-type]
            validation_results_table.grant(
                writer, "dynamodb:DescribeTable"  # type: ignore[arg-type]
            )

        validation_summary_task = LambdaTask(
            self,
            "validation-summary-task",
            directory="validation_summary",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path=f"$.{VALIDATION_KEY}",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        validation_results_table.grant_read_data(validation_summary_task.lambda_function)
        validation_results_table.grant(
            validation_summary_task.lambda_function, "dynamodb:DescribeTable"
        )

        import_dataset_role = aws_iam.Role(
            self,
            "import-dataset",
            assumed_by=aws_iam.ServicePrincipal(  # type: ignore[arg-type]
                "batchoperations.s3.amazonaws.com"
            ),
        )

        import_asset_file_function = ImportFileFunction(
            self,
            directory="import_asset_file",
            invoker=import_dataset_role,
            env_name=env_name,
            botocore_lambda_layer=botocore_lambda_layer,
        )
        import_metadata_file_function = ImportFileFunction(
            self,
            directory="import_metadata_file",
            invoker=import_dataset_role,
            env_name=env_name,
            botocore_lambda_layer=botocore_lambda_layer,
        )

        import_dataset_task = LambdaTask(
            self,
            "import-dataset-task",
            directory="import_dataset",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path=f"$.{IMPORT_DATASET_KEY}",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )

        import_dataset_task.lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(
                resources=[import_dataset_role.role_arn],
                actions=["iam:PassRole"],
            ),
        )
        import_dataset_task.lambda_function.add_to_role_policy(
            aws_iam.PolicyStatement(resources=["*"], actions=["s3:CreateJob"])
        )

        for table in [processing_assets_table]:
            table.grant_read_data(import_dataset_task.lambda_function)
            table.grant(import_dataset_task.lambda_function, "dynamodb:DescribeTable")

        # Import status check
        wait_before_upload_status_check = Wait(
            self,
            "wait-before-upload-status-check",
            time=WaitTime.duration(Duration.seconds(10)),
        )
        upload_status_task = LambdaTask(
            self,
            "upload-status",
            directory="upload_status",
            botocore_lambda_layer=botocore_lambda_layer,
            result_path="$.upload_status",
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        validation_results_table.grant_read_data(upload_status_task.lambda_function)
        validation_results_table.grant(upload_status_task.lambda_function, "dynamodb:DescribeTable")

        upload_status_task.lambda_function.add_to_role_policy(ALLOW_DESCRIBE_ANY_S3_JOB)

        # Parameters
        import_asset_file_function_arn_parameter = aws_ssm.StringParameter(
            self,
            "import asset file function arn",
            string_value=import_asset_file_function.function_arn,
            description=f"Import asset file function ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_IMPORT_ASSET_FILE_FUNCTION_TASK_ARN.value,
        )
        import_metadata_file_function_arn_parameter = aws_ssm.StringParameter(
            self,
            "import metadata file function arn",
            string_value=import_metadata_file_function.function_arn,
            description=f"Import metadata file function ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_IMPORT_METADATA_FILE_FUNCTION_TASK_ARN.value,
        )

        import_dataset_role_arn_parameter = aws_ssm.StringParameter(
            self,
            "import dataset role arn",
            string_value=import_dataset_role.role_arn,
            description=f"Import dataset role ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_IMPORT_DATASET_ROLE_ARN.value,
        )

        update_dataset_catalog = LambdaTask(
            self,
            "update-dataset-catalog",
            directory="update_dataset_catalog",
            botocore_lambda_layer=botocore_lambda_layer,
            extra_environment={ENV_NAME_VARIABLE_NAME: env_name},
        )
        self.message_queue.grant_send_messages(update_dataset_catalog.lambda_function)

        for storage_writer in [
            import_dataset_role,
            import_dataset_task.lambda_function,
            import_asset_file_function,
            import_metadata_file_function,
            populate_catalog_lambda,
            update_dataset_catalog.lambda_function,
        ]:
            storage_bucket.grant_read_write(storage_writer)  # type: ignore[arg-type]

        grant_parameter_read_access(
            {
                import_asset_file_function_arn_parameter: [import_dataset_task.lambda_function],
                import_dataset_role_arn_parameter: [import_dataset_task.lambda_function],
                import_metadata_file_function_arn_parameter: [import_dataset_task.lambda_function],
                processing_assets_table.name_parameter: [
                    check_stac_metadata_task.lambda_function,
                    content_iterator_task.lambda_function,
                    import_dataset_task.lambda_function,
                ],
                validation_results_table.name_parameter: [
                    check_stac_metadata_task.lambda_function,
                    content_iterator_task.lambda_function,
                    validation_summary_task.lambda_function,
                    upload_status_task.lambda_function,
                ],
                self.message_queue_name_parameter: [update_dataset_catalog.lambda_function],
            }
        )

        success_task = aws_stepfunctions.Succeed(self, "success")
        upload_failure = aws_stepfunctions.Fail(self, "upload failure")
        validation_failure = aws_stepfunctions.Succeed(self, "validation failure")

        ############################################################################################
        # STATE MACHINE
        dataset_version_creation_definition = (
            check_stac_metadata_task.next(content_iterator_task)
            .next(
                aws_stepfunctions.Choice(  # type: ignore[arg-type]
                    self, "check_files_checksums_maybe_array"
                )
                .when(
                    aws_stepfunctions.Condition.number_equals(
                        f"$.{CONTENT_KEY}.{ITERATION_SIZE_KEY}", 1
                    ),
                    check_files_checksums_single_task.batch_submit_job,
                )
                .otherwise(check_files_checksums_array_task.batch_submit_job)
                .afterwards()
            )
            .next(
                aws_stepfunctions.Choice(self, "content_iteration_finished")
                .when(
                    aws_stepfunctions.Condition.number_equals(
                        f"$.{CONTENT_KEY}.{NEXT_ITEM_KEY}", -1
                    ),
                    validation_summary_task.next(
                        aws_stepfunctions.Choice(  # type: ignore[arg-type]
                            self, "validation_successful"
                        )
                        .when(
                            aws_stepfunctions.Condition.boolean_equals(
                                f"$.{VALIDATION_KEY}.{SUCCESS_KEY}", True
                            ),
                            import_dataset_task.next(
                                wait_before_upload_status_check  # type: ignore[arg-type]
                            )
                            .next(upload_status_task)
                            .next(
                                aws_stepfunctions.Choice(
                                    self, "import_completed"  # type: ignore[arg-type]
                                )
                                .when(
                                    aws_stepfunctions.Condition.and_(
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{ASSET_UPLOAD_KEY}.status", "Complete"
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{METADATA_UPLOAD_KEY}.status",
                                            "Complete",
                                        ),
                                    ),
                                    update_dataset_catalog.next(
                                        success_task  # type: ignore[arg-type]
                                    ),
                                )
                                .when(
                                    aws_stepfunctions.Condition.or_(
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{ASSET_UPLOAD_KEY}.status",
                                            "Cancelled",
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{ASSET_UPLOAD_KEY}.status", "Failed"
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{METADATA_UPLOAD_KEY}.status",
                                            "Cancelled",
                                        ),
                                        aws_stepfunctions.Condition.string_equals(
                                            f"$.upload_status.{METADATA_UPLOAD_KEY}.status",
                                            "Failed",
                                        ),
                                    ),
                                    upload_failure,  # type: ignore[arg-type]
                                )
                                .otherwise(
                                    wait_before_upload_status_check  # type: ignore[arg-type]
                                )
                            ),
                        )
                        .otherwise(validation_failure)  # type: ignore[arg-type]
                    ),
                )
                .otherwise(content_iterator_task)
            )
        )

        self.state_machine = aws_stepfunctions.StateMachine(
            self,
            f"{env_name}-dataset-version-creation",
            definition=dataset_version_creation_definition,  # type: ignore[arg-type]
        )

        self.state_machine_parameter = aws_ssm.StringParameter(
            self,
            "state machine arn",
            description=f"State machine ARN for {env_name}",
            parameter_name=ParameterName.PROCESSING_DATASET_VERSION_CREATION_STEP_FUNCTION_ARN.value,  # pylint:disable=line-too-long
            string_value=self.state_machine.state_machine_arn,
        )

        Tags.of(self).add("ApplicationLayer", "processing")  # type: ignore[arg-type]
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id)

        lambda_dir = kwargs["lambda_dir"]

        # Note: A dead-letter queue is optional but it helps capture any failed messages
        dlq = sqs.Queue(self,
                        id="dead_letter_queue_id",
                        retention_period=Duration.days(7))
        dead_letter_queue = sqs.DeadLetterQueue(max_receive_count=1, queue=dlq)

        upload_queue = sqs.Queue(self,
                                 id="sample_queue_id",
                                 visibility_timeout=Duration.seconds(30),
                                 dead_letter_queue=dead_letter_queue)

        sqs_subscription = sns_subs.SqsSubscription(upload_queue,
                                                    raw_message_delivery=True)

        upload_event_topic = sns.Topic(self, id="sample_sns_topic_id")

        # This binds the SNS Topic to the SQS Queue
        upload_event_topic.add_subscription(sqs_subscription)

        # Note: Lifecycle Rules are optional but are included here to keep costs
        #       low by cleaning up old files or moving them to lower cost storage options
        s3_bucket = s3.Bucket(
            self,
            id="sample_bucket_id",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            versioned=True,
            lifecycle_rules=[
                s3.LifecycleRule(
                    enabled=True,
                    expiration=Duration.days(365),
                    transitions=[
                        s3.Transition(
                            storage_class=s3.StorageClass.INFREQUENT_ACCESS,
                            transition_after=Duration.days(30)),
                        s3.Transition(storage_class=s3.StorageClass.GLACIER,
                                      transition_after=Duration.days(90)),
                    ])
            ])

        # Note: If you don't specify a filter all uploads will trigger an event.
        #       Also, modifying the event type will handle other object operations
        # This binds the S3 bucket to the SNS Topic
        s3_bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED_PUT,
            s3n.SnsDestination(upload_event_topic),
            s3.NotificationKeyFilter(prefix="uploads", suffix=".csv"))

        function = _lambda.Function(
            self,
            "lambda_function",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="lambda_function.handler",
            code=_lambda.Code.from_asset(path=lambda_dir))

        # This binds the lambda to the SQS Queue
        invoke_event_source = lambda_events.SqsEventSource(upload_queue)
        function.add_event_source(invoke_event_source)

        # Examples of CloudFormation outputs
        CfnOutput(
            self,
            "UploadFileToS3Example",
            value="aws s3 cp <local-path-to-file> s3://{}/".format(
                s3_bucket.bucket_name),
            description=
            "Upload a file to S3 (using AWS CLI) to trigger the SQS chain",
        )
        CfnOutput(
            self,
            "UploadSqsQueueUrl",
            value=upload_queue.queue_url,
            description="Link to the SQS Queue triggered on S3 uploads",
        )
        CfnOutput(
            self,
            "LambdaFunctionName",
            value=function.function_name,
        )
        CfnOutput(
            self,
            "LambdaFunctionLogGroupName",
            value=function.log_group.log_group_name,
        )
Exemple #29
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        # Image Bucket
        image_bucket = s3.Bucket(self,
                                 IMG_BUCKET_NAME,
                                 removal_policy=cdk.RemovalPolicy.DESTROY)
        cdk.CfnOutput(self, "imageBucket", value=image_bucket.bucket_name)

        image_bucket.add_cors_rule(
            allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT],
            allowed_origins=["*"],
            allowed_headers=["*"],
            max_age=3000,
        )

        # Thumbnail Bucket
        resized_image_bucket = s3.Bucket(
            self,
            RESIZED_IMG_BUCKET_NAME,
            removal_policy=cdk.RemovalPolicy.DESTROY)
        cdk.CfnOutput(self,
                      "resizedBucket",
                      value=resized_image_bucket.bucket_name)

        resized_image_bucket.add_cors_rule(
            allowed_methods=[s3.HttpMethods.GET, s3.HttpMethods.PUT],
            allowed_origins=["*"],
            allowed_headers=["*"],
            max_age=3000,
        )
        # S3 Static bucket for website code
        web_bucket = s3.Bucket(
            self,
            WEBSITE_BUCKET_NAME,
            website_index_document="index.html",
            website_error_document="index.html",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            # uncomment this and delete the policy statement below to allow public access to our
            # static website
            # public_read_access=true
        )

        web_policy_statement = iam.PolicyStatement(
            actions=["s3:GetObject"],
            resources=[web_bucket.arn_for_objects("*")],
            principals=[iam.AnyPrincipal()],
            conditions={"IpAddress": {
                "aws:SourceIp": ["139.138.203.36"]
            }},
        )

        web_bucket.add_to_resource_policy(web_policy_statement)

        cdk.CfnOutput(self,
                      "bucketURL",
                      value=web_bucket.bucket_website_domain_name)

        # Deploy site contents to S3 Bucket
        s3_dep.BucketDeployment(
            self,
            "DeployWebsite",
            sources=[s3_dep.Source.asset("./public")],
            destination_bucket=web_bucket,
        )

        # DynamoDB to store image labels
        partition_key = dynamodb.Attribute(name="image",
                                           type=dynamodb.AttributeType.STRING)
        table = dynamodb.Table(
            self,
            "ImageLabels",
            partition_key=partition_key,
            removal_policy=cdk.RemovalPolicy.DESTROY,
        )
        cdk.CfnOutput(self, "ddbTable", value=table.table_name)

        # Lambda layer for Pillow library
        layer = lb.LayerVersion(
            self,
            "pil",
            code=lb.Code.from_asset("reklayer"),
            compatible_runtimes=[lb.Runtime.PYTHON_3_7],
            license="Apache-2.0",
            description=
            "A layer to enable the PIL library in our Rekognition Lambda",
        )

        # Lambda function
        rek_fn = lb.Function(
            self,
            "rekognitionFunction",
            code=lb.Code.from_asset("rekognitionFunction"),
            runtime=lb.Runtime.PYTHON_3_7,
            handler="index.handler",
            timeout=cdk.Duration.seconds(30),
            memory_size=1024,
            layers=[layer],
            environment={
                "TABLE": table.table_name,
                "BUCKET": image_bucket.bucket_name,
                "THUMBBUCKET": resized_image_bucket.bucket_name,
            },
        )

        image_bucket.grant_read(rek_fn)
        resized_image_bucket.grant_write(rek_fn)
        table.grant_write_data(rek_fn)

        rek_fn.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=["rekognition:DetectLabels"],
                                resources=["*"]))

        # Lambda for Synchronous front end
        serviceFn = lb.Function(
            self,
            "serviceFunction",
            code=lb.Code.from_asset("servicelambda"),
            runtime=lb.Runtime.PYTHON_3_7,
            handler="index.handler",
            environment={
                "TABLE": table.table_name,
                "BUCKET": image_bucket.bucket_name,
                "RESIZEDBUCKET": resized_image_bucket.bucket_name,
            },
        )

        image_bucket.grant_write(serviceFn)
        resized_image_bucket.grant_write(serviceFn)
        table.grant_read_write_data(serviceFn)

        # Cognito User Pool Auth
        auto_verified_attrs = cognito.AutoVerifiedAttrs(email=True)
        sign_in_aliases = cognito.SignInAliases(email=True, username=True)
        user_pool = cognito.UserPool(
            self,
            "UserPool",
            self_sign_up_enabled=True,
            auto_verify=auto_verified_attrs,
            sign_in_aliases=sign_in_aliases,
        )

        user_pool_client = cognito.UserPoolClient(self,
                                                  "UserPoolClient",
                                                  user_pool=user_pool,
                                                  generate_secret=False)

        identity_pool = cognito.CfnIdentityPool(
            self,
            "ImageRekognitionIdentityPool",
            allow_unauthenticated_identities=False,
            cognito_identity_providers=[{
                "clientId":
                user_pool_client.user_pool_client_id,
                "providerName":
                user_pool.user_pool_provider_name,
            }],
        )

        # API Gateway
        cors_options = apigw.CorsOptions(allow_origins=apigw.Cors.ALL_ORIGINS,
                                         allow_methods=apigw.Cors.ALL_METHODS)
        api = apigw.LambdaRestApi(
            self,
            "imageAPI",
            default_cors_preflight_options=cors_options,
            handler=serviceFn,
            proxy=False,
        )

        auth = apigw.CfnAuthorizer(
            self,
            "ApiGatewayAuthorizer",
            name="customer-authorizer",
            identity_source="method.request.header.Authorization",
            provider_arns=[user_pool.user_pool_arn],
            rest_api_id=api.rest_api_id,
            # type=apigw.AuthorizationType.COGNITO,
            type="COGNITO_USER_POOLS",
        )

        assumed_by = iam.FederatedPrincipal(
            "cognito-identity.amazon.com",
            conditions={
                "StringEquals": {
                    "cognito-identity.amazonaws.com:aud": identity_pool.ref
                },
                "ForAnyValue:StringLike": {
                    "cognito-identity.amazonaws.com:amr": "authenticated"
                },
            },
            assume_role_action="sts:AssumeRoleWithWebIdentity",
        )
        authenticated_role = iam.Role(
            self,
            "ImageRekognitionAuthenticatedRole",
            assumed_by=assumed_by,
        )
        # IAM policy granting users permission to get and put their pictures
        policy_statement = iam.PolicyStatement(
            actions=["s3:GetObject", "s3:PutObject"],
            effect=iam.Effect.ALLOW,
            resources=[
                image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/*",
                image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/",
                resized_image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/*",
                resized_image_bucket.bucket_arn +
                "/private/${cognito-identity.amazonaws.com:sub}/",
            ],
        )

        # IAM policy granting users permission to list their pictures
        list_policy_statement = iam.PolicyStatement(
            actions=["s3:ListBucket"],
            effect=iam.Effect.ALLOW,
            resources=[
                image_bucket.bucket_arn, resized_image_bucket.bucket_arn
            ],
            conditions={
                "StringLike": {
                    "s3:prefix":
                    ["private/${cognito-identity.amazonaws.com:sub}/*"]
                }
            },
        )

        authenticated_role.add_to_policy(policy_statement)
        authenticated_role.add_to_policy(list_policy_statement)

        # Attach role to our Identity Pool
        cognito.CfnIdentityPoolRoleAttachment(
            self,
            "IdentityPoolRoleAttachment",
            identity_pool_id=identity_pool.ref,
            roles={"authenticated": authenticated_role.role_arn},
        )

        # Get some outputs from cognito
        cdk.CfnOutput(self, "UserPoolId", value=user_pool.user_pool_id)
        cdk.CfnOutput(self,
                      "AppClientId",
                      value=user_pool_client.user_pool_client_id)
        cdk.CfnOutput(self, "IdentityPoolId", value=identity_pool.ref)

        # New Amazon API Gateway with AWS Lambda Integration
        success_response = apigw.IntegrationResponse(
            status_code="200",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": "'*'"
            },
        )
        error_response = apigw.IntegrationResponse(
            selection_pattern="(\n|.)+",
            status_code="500",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": "'*'"
            },
        )

        request_template = json.dumps({
            "action":
            "$util.escapeJavaScript($input.params('action'))",
            "key":
            "$util.escapeJavaScript($input.params('key'))",
        })

        lambda_integration = apigw.LambdaIntegration(
            serviceFn,
            proxy=False,
            request_parameters={
                "integration.request.querystring.action":
                "method.request.querystring.action",
                "integration.request.querystring.key":
                "method.request.querystring.key",
            },
            request_templates={"application/json": request_template},
            passthrough_behavior=apigw.PassthroughBehavior.WHEN_NO_TEMPLATES,
            integration_responses=[success_response, error_response],
        )

        imageAPI = api.root.add_resource("images")

        success_resp = apigw.MethodResponse(
            status_code="200",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": True
            },
        )
        error_resp = apigw.MethodResponse(
            status_code="500",
            response_parameters={
                "method.response.header.Access-Control-Allow-Origin": True
            },
        )

        # GET /images
        get_method = imageAPI.add_method(
            "GET",
            lambda_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            request_parameters={
                "method.request.querystring.action": True,
                "method.request.querystring.key": True,
            },
            method_responses=[success_resp, error_resp],
        )
        # DELETE /images
        delete_method = imageAPI.add_method(
            "DELETE",
            lambda_integration,
            authorization_type=apigw.AuthorizationType.COGNITO,
            request_parameters={
                "method.request.querystring.action": True,
                "method.request.querystring.key": True,
            },
            method_responses=[success_resp, error_resp],
        )

        # Override the authorizer id because it doesn't work when defininting it as a param
        # in add_method
        get_method_resource = get_method.node.find_child("Resource")
        get_method_resource.add_property_override("AuthorizerId", auth.ref)
        delete_method_resource = delete_method.node.find_child("Resource")
        delete_method_resource.add_property_override("AuthorizerId", auth.ref)

        # Building SQS queue and DeadLetter Queue
        dl_queue = sqs.Queue(
            self,
            "ImageDLQueue",
            queue_name="ImageDLQueue",
        )

        dl_queue_opts = sqs.DeadLetterQueue(max_receive_count=2,
                                            queue=dl_queue)

        queue = sqs.Queue(
            self,
            "ImageQueue",
            queue_name="ImageQueue",
            visibility_timeout=cdk.Duration.seconds(30),
            receive_message_wait_time=cdk.Duration.seconds(20),
            dead_letter_queue=dl_queue_opts,
        )

        # S3 Bucket Create Notification to SQS
        # Whenever an image is uploaded add it to the queue

        image_bucket.add_object_created_notification(
            s3n.SqsDestination(queue),
            s3.NotificationKeyFilter(prefix="private/"))
Exemple #30
0
    def create_all_queues(self) -> None:
        """
        Create all STACK queues, attach subscriptions and alarms
        """

        # General DLQs for lambdas (not API)
        self.create_queue(id="dead_letter_queue")
        general_dlq_alarm = cloudwatch.Alarm(
            self,
            "DLQAlarm",
            metric=self.queues_["dead_letter_queue"].metric(
                "ApproximateNumberOfMessagesVisible"),
            evaluation_periods=1,
            threshold=0.0,
            comparison_operator=ComparisonOperator.GREATER_THAN_THRESHOLD,
        )
        general_dlq_alarm.add_alarm_action(
            cw_actions.SnsAction(self.topics_["alarm_topic"]))

        # DLQ for API lambdas
        self.create_queue(id="api_dead_letter_queue")
        api_dlq_alarm = cloudwatch.Alarm(
            self,
            "APIDLQAlarm",
            metric=self.queues_["api_dead_letter_queue"].metric(
                "ApproximateNumberOfMessagesVisible"),
            evaluation_periods=1,
            threshold=0.0,
            comparison_operator=ComparisonOperator.GREATER_THAN_THRESHOLD,
        )
        api_dlq_alarm.add_alarm_action(
            cw_actions.SnsAction(self.topics_["alarm_topic"]))

        # The new_scenes_queue subscribe to CBERS 4/4A quicklooks notifications. The
        # STAC items are generated from the original INPE metadata file as
        # soon as the quicklooks are created in the PDS bucket
        # This code fragment creates the queue, the associated dlq and
        # subscribe to CBERS 4/4A quicklook notification topics
        self.create_queue(
            id="process_new_scenes_queue_dlq",
            retention_period=core.Duration.seconds(1209600),
        )
        process_new_scenes_queue_alarm = cloudwatch.Alarm(
            self,
            "ProcessNewScenesQueueAlarm",
            metric=self.queues_["process_new_scenes_queue_dlq"].metric(
                "ApproximateNumberOfMessagesVisible"),
            evaluation_periods=1,
            threshold=0.0,
            comparison_operator=ComparisonOperator.GREATER_THAN_THRESHOLD,
        )
        process_new_scenes_queue_alarm.add_alarm_action(
            cw_actions.SnsAction(self.topics_["alarm_topic"]))
        self.create_queue(
            id="new_scenes_queue",
            visibility_timeout=core.Duration.seconds(385),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=1,
                queue=self.queues_["process_new_scenes_queue_dlq"]),
        )
        # Add subscriptions for each CB4 camera
        sns.Topic.from_topic_arn(
            self,
            id="CB4MUX",
            topic_arn="arn:aws:sns:us-east-1:599544552497:NewCB4MUXQuicklook",
        ).add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["new_scenes_queue"]))
        sns.Topic.from_topic_arn(
            self,
            id="CB4AWFI",
            topic_arn="arn:aws:sns:us-east-1:599544552497:NewCB4AWFIQuicklook",
        ).add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["new_scenes_queue"]))
        sns.Topic.from_topic_arn(
            self,
            id="CB4PAN10M",
            topic_arn=
            "arn:aws:sns:us-east-1:599544552497:NewCB4PAN10MQuicklook",
        ).add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["new_scenes_queue"]))
        sns.Topic.from_topic_arn(
            self,
            id="CBPAN5M",
            topic_arn="arn:aws:sns:us-east-1:599544552497:NewCB4PAN5MQuicklook",
        ).add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["new_scenes_queue"]))
        # Subscription for CB4A (all cameras)
        sns.Topic.from_topic_arn(
            self,
            id="CB4A",
            topic_arn="arn:aws:sns:us-east-1:599544552497:NewCB4AQuicklook",
        ).add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["new_scenes_queue"]))

        self.create_queue(
            id="catalog_prefix_update_queue",
            visibility_timeout=core.Duration.seconds(60),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3, queue=self.queues_["dead_letter_queue"]),
        )

        # Reconcile queue for INPE's XML metadata
        self.create_queue(
            id="consume_reconcile_queue_dlq",
            retention_period=core.Duration.seconds(1209600),
        )
        consume_reconcile_queue_alarm = cloudwatch.Alarm(
            self,
            "ConsumeReconcileQueueAlarm",
            metric=self.queues_["consume_reconcile_queue_dlq"].metric(
                "ApproximateNumberOfMessagesVisible"),
            evaluation_periods=1,
            threshold=0.0,
            comparison_operator=ComparisonOperator.GREATER_THAN_THRESHOLD,
        )
        consume_reconcile_queue_alarm.add_alarm_action(
            cw_actions.SnsAction(self.topics_["alarm_topic"]))
        self.create_queue(
            id="reconcile_queue",
            visibility_timeout=core.Duration.seconds(1000),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3,
                queue=self.queues_["consume_reconcile_queue_dlq"]),
        )

        # Reconcile queue for STAC items
        self.create_queue(
            id="consume_stac_reconcile_queue_dlq",
            retention_period=core.Duration.seconds(1209600),
        )
        consume_stac_reconcile_queue_alarm = cloudwatch.Alarm(
            self,
            "ConsumeStacReconcileQueueAlarm",
            metric=self.queues_["consume_stac_reconcile_queue_dlq"].metric(
                "ApproximateNumberOfMessagesVisible"),
            evaluation_periods=1,
            threshold=0.0,
            comparison_operator=ComparisonOperator.GREATER_THAN_THRESHOLD,
        )
        consume_stac_reconcile_queue_alarm.add_alarm_action(
            cw_actions.SnsAction(self.topics_["alarm_topic"]))
        self.create_queue(
            id="stac_reconcile_queue",
            visibility_timeout=core.Duration.seconds(1000),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3,
                queue=self.queues_["consume_stac_reconcile_queue_dlq"],
            ),
        )

        # Queue for STAC items to be inserted into Elasticsearch. Subscribe to the
        # topic with new stac items
        self.create_queue(
            id="insert_into_elasticsearch_queue",
            visibility_timeout=core.Duration.seconds(180),
            retention_period=core.Duration.seconds(1209600),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3, queue=self.queues_["dead_letter_queue"]),
        )
        # Subscription for new item topics
        self.topics_["stac_item_topic"].add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["insert_into_elasticsearch_queue"]))
        # Subscription for reconciled item topics
        self.topics_["reconcile_stac_item_topic"].add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["insert_into_elasticsearch_queue"]))

        # Backup queue for STAC items inserted into Elasticsearch.
        # This holds the same items received by "insert_into_elasticsearch_queue",
        # simply holding them for some time to allow recover from ES
        # cluster failures (see #78)
        # This queue subscribe only to new item topics
        self.create_queue(
            id="backup_insert_into_elasticsearch_queue",
            visibility_timeout=core.Duration.seconds(180),
            retention_period=core.Duration.days(
                settings.backup_queue_retention_days),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3, queue=self.queues_["dead_letter_queue"]),
        )
        # Subscription for new item topics
        self.topics_["stac_item_topic"].add_subscription(
            sns_subscriptions.SqsSubscription(
                self.queues_["backup_insert_into_elasticsearch_queue"]))