Пример #1
0
    def __init__(self, scope: core.Construct, id: str, *, prefix: str,
                 environment: str, configuration, **kwargs):
        """
        :param scope: Stack class, used by CDK.
        :param id: ID of the construct, used by CDK.
        :param prefix: Prefix of the construct, used for naming purposes.
        :param environment: Environment of the construct, used for naming purposes.
        :param configuration: Configuration of the construct. In this case SQS_CONFIG_SCHEMA.
        :param kwargs: Other parameters that could be used by the construct.
        """
        super().__init__(scope, id, **kwargs)
        self.prefix = prefix
        self.environment_ = environment
        self._configuration = configuration

        # Validating that the payload passed is correct
        validate_configuration(configuration_schema=SQS_CONFIG_SCHEMA,
                               configuration_received=self._configuration)

        # Defining SQS Queue
        queue_data = deepcopy(self._configuration["queue"])
        self._sqs_queue = base_queue(construct=self, **queue_data)

        # Validating Lambda Function Runtime
        functions_data = self._configuration["lambda_handlers"]
        self._lambda_functions = list()
        for lambda_function in functions_data:
            _lambda_function = base_lambda_function(self, **lambda_function)
            self._lambda_functions.append(_lambda_function)

            _lambda_function.add_event_source(
                lambda_sources.SqsEventSource(queue=self._sqs_queue,
                                              batch_size=10))
Пример #2
0
    def catalog(self, bucket, notification_queue):
        catalog_table = _ddb.Table(
            self,
            "CatalogTable",
            billing_mode=_ddb.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
            partition_key=_ddb.Attribute(name='Source',
                                         type=_ddb.AttributeType.STRING),
            sort_key=_ddb.Attribute(name='Timestamp',
                                    type=_ddb.AttributeType.STRING))

        event_recorder = _lambda.Function(
            self,
            "EventRecorder",
            handler='lambda_function.lambda_handler',
            # https://github.com/aws/aws-cdk/issues/5491
            # pylint: disable=no-value-for-parameter
            code=_lambda.Code.asset('src/event_recorder'),
            runtime=_lambda.Runtime.PYTHON_3_7,
            log_retention=_logs.RetentionDays.ONE_MONTH,
            environment={
                'BUCKET_NAME': bucket.bucket_name,
                'TABLE_NAME': catalog_table.table_name,
                'TOPIC_SSM_PREFIX':
                "/{}/DistributionTopics/".format(self.stack_id)
            })
        bucket.grant_read(event_recorder)
        catalog_table.grant_write_data(event_recorder)
        event_recorder.add_event_source(
            _lambda_event_sources.SqsEventSource(notification_queue))
        return catalog_table, event_recorder
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = _sqs.Queue(self,
                           "VsamToDynamoQueue",
                           visibility_timeout=Duration.seconds(300),
                           queue_name='VsamToDynamoQueue')

        dynamoTable = _dyn.Table(
            self,
            "CLIENT",
            partition_key=_dyn.Attribute(name="CLIENT-KEY",
                                         type=_dyn.AttributeType.STRING),
            table_name="CLIENT",
        )

        # Create the Lambda function to subscribe to SQS and store the record in DynamoDB
        # The source code is in './src' directory
        lambda_fn = _lambda.Function(
            self,
            "SQSToDynamoFunction",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="insertRecord.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
        )

        dynamoTable.grant_write_data(lambda_fn)

        queue.grant_consume_messages(lambda_fn)
        lambda_fn.add_event_source(_event.SqsEventSource(queue))
Пример #4
0
    def __init__(self, scope: core.Construct, id: str, sns_topic_arn: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Queue Setup
        sqs_queue = sqs.Queue(self, 'RDSPublishQueue', visibility_timeout=core.Duration.seconds(300))

        sqs_lambda = _lambda.Function(self, "sqsLambdaHandler",
                                      runtime=_lambda.Runtime.NODEJS_12_X,
                                      handler="sqs.handler",
                                      code=_lambda.Code.from_asset("lambda_fns"),
                                      tracing=_lambda.Tracing.ACTIVE,
                                      environment={
                                          "SQS_URL": sqs_queue.queue_url
                                      }
                                      )
        sqs_queue.grant_send_messages(sqs_lambda)

        topic = sns.Topic.from_topic_arn(self, 'SNSTopic', sns_topic_arn)
        topic.add_subscription(subscriptions.LambdaSubscription(sqs_lambda))

        sqs_subscribe_lambda = _lambda.Function(self, "sqsSubscribeLambdaHandler",
                                                runtime=_lambda.Runtime.NODEJS_12_X,
                                                handler="sqs_subscribe.handler",
                                                code=_lambda.Code.from_asset("lambda_fns"),
                                                tracing=_lambda.Tracing.ACTIVE
                                                )
        sqs_queue.grant_consume_messages(sqs_subscribe_lambda)
        sqs_subscribe_lambda.add_event_source(lambda_event.SqsEventSource(sqs_queue))
 def setLambdaTriggers(self):
     ## **************** Event Sources & Triggers **************** 
     # Read Comprehend completion notification and prepare triples for neptune
     self.sqs_queue = sqs.Queue.from_queue_arn(
         self, "CDKQueue",
         self.comprehend_complete_sqs.get_att("Arn").to_string() #f'arn:aws:sqs:us-east-1:{core.Aws.ACCOUNT_ID}:{self.comprehend_complete_sqs.queue_name}'
     )        
     self.generate_triples_lambda.add_event_source(aws_lambda_event_sources.SqsEventSource(self.sqs_queue, batch_size = 1))
  def add_reviewer(self):
    reviewer = AnalyzerLambda(self,'review-queued-repo',
      project_name='review-queued-repo',
      concurrency= 50,
      datalake=self.datalake)

    queue = sqs.Queue(self,'PendingReviewQueue', 
      visibility_timeout= core.Duration.minutes(15))
    reviewer.function.add_event_source(events.SqsEventSource(queue=queue, batch_size=1))
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        queue = _sqs.Queue(self,
                           "MyQueue",
                           visibility_timeout=Duration.seconds(300))

        # Create the AWS Lambda function to subscribe to Amazon SQS queue
        # The source code is in './lambda' directory
        lambda_function = _lambda.Function(
            self,
            "MyLambdaFunction",
            runtime=_lambda.Runtime.PYTHON_3_9,
            handler="get_messages.handler",
            code=_lambda.Code.from_asset("lambda"),
        )

        # Set Lambda Logs Retention and Removal Policy
        logs.LogGroup(
            self,
            'logs',
            log_group_name=f"/aws/lambda/{lambda_function.function_name}",
            removal_policy=RemovalPolicy.DESTROY,
            retention=logs.RetentionDays.ONE_DAY)
        #Grant permission to AWS Lambda function to consume messages from the Amazon SQS queue
        queue.grant_consume_messages(lambda_function)

        #Configure the Amazon SQS queue to trigger the AWS Lambda function
        lambda_function.add_event_source(_event.SqsEventSource(queue))

        CfnOutput(self,
                  "FunctionName",
                  value=lambda_function.function_name,
                  export_name='FunctionName',
                  description='Function name')

        CfnOutput(self,
                  "QueueName",
                  value=queue.queue_name,
                  export_name='QueueName',
                  description='SQS queue name')

        CfnOutput(self,
                  "QueueArn",
                  value=queue.queue_arn,
                  export_name='QueueArn',
                  description='SQS queue ARN')

        CfnOutput(self,
                  "QueueUrl",
                  value=queue.queue_url,
                  export_name='QueueUrl',
                  description='SQS queue URL')
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue = _sqs.Queue(self, 'HelloQueue')

        my_lambda = _lambda.Function(self,
                                     'HelloFunction',
                                     runtime=_lambda.Runtime.PYTHON_3_7,
                                     code=_lambda.Code.asset('lambda'),
                                     handler='hello.handler')

        my_lambda.add_event_source(event_sources.SqsEventSource(queue))
    def create_events(self, services):
        # kickoff_notification = aws_s3_notifications.LambdaDestination(services["lambda"]["kickoff"])
        extensions = [
            "pdf", "pDf", "pDF", "pdF", "PDF", "Pdf",
            "png", "pNg", "pNG", "pnG", "PNG", "Png",
            "jpg", "jPg", "jPG", "jpG", "JPG", "Jpg"
        ]
        for extension in extensions:
            services["main_s3_bucket"].add_event_notification(
                aws_s3.EventType.OBJECT_CREATED,  
                aws_s3_notifications.SqsDestination(services["sf_sqs"]),
                aws_s3.NotificationKeyFilter(prefix="uploads/", suffix=extension)
            )    
        
        services["lambda"]["kickoff"].add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                services["sf_sqs"], 
                batch_size=1
            )
        )
        
        services["lambda"]["analyzepdf"].add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                services["textract_sqs"], 
                batch_size=1
            )
        )

        human_complete_target = aws_events_targets.LambdaFunction(services["lambda"]["humancomplete"])

        human_review_event_pattern = aws_events.EventPattern(
            source=["aws.sagemaker"],
            detail_type=["SageMaker A2I HumanLoop Status Change"]
        )

        aws_events.Rule(self, 
            "multipadepdfa2i_HumanReviewComplete", 
            event_pattern=human_review_event_pattern,
            targets=[human_complete_target]
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # DynamoDB Table
        # This is standing in for what is RDS on the diagram due to simpler/cheaper setup
        table = dynamo_db.Table(self,
                                "Messages",
                                partition_key=dynamo_db.Attribute(
                                    name="id",
                                    type=dynamo_db.AttributeType.STRING))

        # Queue Setup
        sqs_queue = sqs.Queue(self,
                              'RDSPublishQueue',
                              visibility_timeout=core.Duration.seconds(300))

        # defines an AWS  Lambda resource to publish to our sqs_queue
        sqs_publish_lambda = _lambda.Function(
            self,
            "SQSPublishLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,  # execution environment
            handler="lambda.handler",  # file is "lambda", function is "handler"
            code=_lambda.Code.from_asset(
                "lambda_fns/publish"
            ),  # Code loaded from the lambda_fns/publish dir
            environment={'queueURL': sqs_queue.queue_url})
        sqs_queue.grant_send_messages(sqs_publish_lambda)

        # defines an AWS  Lambda resource to pull from our sqs_queue
        sqs_subscribe_lambda = _lambda.Function(
            self,
            "SQSSubscribeLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,  # execution environment
            handler="lambda.handler",  # file is "lambda", function is "handler"
            code=_lambda.Code.from_asset(
                "lambda_fns/subscribe"
            ),  # Code loaded from the lambda_fns/subscribe dir
            environment={
                'queueURL': sqs_queue.queue_url,
                'tableName': table.table_name
            },
            reserved_concurrent_executions=2)
        sqs_queue.grant_consume_messages(sqs_subscribe_lambda)
        sqs_subscribe_lambda.add_event_source(
            lambda_event.SqsEventSource(sqs_queue))
        table.grant_read_write_data(sqs_subscribe_lambda)

        # defines an API Gateway REST API resource backed by our "sqs_publish_lambda" function.
        api_gw.LambdaRestApi(self, 'Endpoint', handler=sqs_publish_lambda)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Primary S3 Bucket
        main_bucket = aws_s3.Bucket(self, "multipagepdfa2i", removal_policy=core.RemovalPolicy.DESTROY)
        
        # Create sqs queue
        page_sqs = aws_sqs.Queue(
            self, "multipagepdfa2i_page_sqs",
            queue_name = "multipagepdfa2i_page_sqs",
            visibility_timeout=core.Duration.minutes(3)
        )
        
        # Create all of the Lambda Functions
        lambda_functions = self.create_lambda_functions(page_sqs)

        # Create notification that triggers kick off lambda on pdf being uploaded to kickoff
        kickoff_notification = aws_s3_notifications.LambdaDestination(lambda_functions["kickoff"])
        
        lambda_functions["analyzepdf"].add_event_source(aws_lambda_event_sources.SqsEventSource(page_sqs, batch_size=3))

        main_bucket.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED,  
            kickoff_notification,
            aws_s3.NotificationKeyFilter(prefix="uploads/", suffix="pdf")
        )

        self.configure_dynamo_table("multia2ipdf_callback", "jobid", "callback_token")
        self.configure_dynamo_table("multipagepdfa2i_upload_ids", "id", "key")       

        self.create_state_machine(lambda_functions, page_sqs)

        human_complete_target = aws_events_targets.LambdaFunction(lambda_functions["humancomplete"])

        human_review_event_pattern = aws_events.EventPattern(
            source=["aws.sagemaker"],
            detail_type=["SageMaker A2I HumanLoop Status Change"]
        )

        aws_events.Rule(self, 
            "multipadepdfa2i_HumanReviewComplete", 
            event_pattern=human_review_event_pattern,
            targets=[human_complete_target]
        )

        
    def __init__(self, scope: core.Construct, id: str, publisher_lambda,
                 suscriber_lambda, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        queue_fail = sqs.Queue(self,
                               "queue_fail",
                               visibility_timeout=core.Duration.seconds(20))
        dlq = sqs.DeadLetterQueue(max_receive_count=100, queue=queue_fail)
        self.queue = sqs.Queue(self,
                               "base_queue",
                               visibility_timeout=core.Duration.seconds(20),
                               dead_letter_queue=dlq)

        #Configuramos la lambda para que reciba los mensajes de la cola
        self.queue.grant_consume_messages(suscriber_lambda)
        event_source = aws_lambda_event_sources.SqsEventSource(self.queue,
                                                               batch_size=1)
        suscriber_lambda.add_event_source(event_source)

        #la lambda que publica mensajes
        self.queue.grant_send_messages(publisher_lambda)
Пример #13
0
    def __init__(self, scope: core.Construct, id: str, *, prefix: str, environment: str, configuration, **kwargs):
        """
        :param scope: Stack class, used by CDK.
        :param id: ID of the construct, used by CDK.
        :param prefix: Prefix of the construct, used for naming purposes.
        :param environment: Environment of the construct, used for naming purposes.
        :param configuration: Configuration of the construct. In this case IOT_SNS_CONFIG_SCHEMA.
        :param kwargs: Other parameters that could be used by the construct.
        """
        super().__init__(scope, id, **kwargs)
        self.prefix = prefix
        self.environment_ = environment
        self._configuration = configuration

        # Validating that the payload passed is correct
        validate_configuration(configuration_schema=IOT_SQS_CONFIG_SCHEMA, configuration_received=self._configuration)

        # Defining SQS Queue
        queue_data = deepcopy(self._configuration["queue"])
        self._sqs_queue = base_queue(construct=self, **queue_data)

        # Defining IAM Role
        role = base_sqs_role(self, resource_name=queue_data["queue_name"], principal_resource="iot")

        # Validating Lambda Function Runtime
        functions_data = self._configuration["lambda_handlers"]
        self._lambda_functions = list()
        for lambda_function in functions_data:
            _lambda_function = base_lambda_function(self, **lambda_function)
            self._lambda_functions.append(_lambda_function)

            _lambda_function.add_event_source(lambda_sources.SqsEventSource(queue=self._sqs_queue, batch_size=10))

        # Defining Topic Rule properties
        action = iot.CfnTopicRule.SqsActionProperty(queue_url=self._sqs_queue.queue_url, role_arn=role.role_arn)
        action_property = iot.CfnTopicRule.ActionProperty(sqs=action)

        rule_data = self._configuration["iot_rule"]
        self._iot_rule = base_iot_rule(self, action_property=action_property, **rule_data)
Пример #14
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Creates Queue
        requestqueue = sqs.Queue(
            self,
            "RequestsQueue",
            visibility_timeout=core.Duration.seconds(300),
        )

        # Create DDB Table
        requests_table = ddb.Table(self,
                                   "requests_table",
                                   partition_key=ddb.Attribute(
                                       name="id",
                                       type=ddb.AttributeType.STRING))

        # Defines an AWS Lambda resource
        hello_lambda = _lambda.Function(
            self,
            'HelloHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='hello.handler',
        )

        # Adds DN Table as env
        hello_lambda.add_environment("TABLE_NAME", requests_table.table_name)

        # grant permission to lambda to write to demo table
        requests_table.grant_write_data(hello_lambda)

        # SQS Event Source for Lambda
        sqs_event_source = lambda_event_source.SqsEventSource(requestqueue)

        # SQS event source to Function
        hello_lambda.add_event_source(sqs_event_source)
Пример #15
0
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id, **kwargs)

        # Policies
        s3_access_policy = iam.ManagedPolicy.from_managed_policy_arn(
            self,
            id="s3_access_policy",
            managed_policy_arn="arn:aws:iam::aws:policy/AmazonS3FullAccess")

        lambda_access_policy = iam.ManagedPolicy.from_managed_policy_arn(
            self,
            id="lambda_access_policy",
            managed_policy_arn="arn:aws:iam::aws:policy/AWSLambda_FullAccess")

        logs_policy = iam.ManagedPolicy.from_managed_policy_arn(
            self,
            id="logs_policy",
            managed_policy_arn=
            "arn:aws:iam::aws:policy/CloudWatchLogsFullAccess")

        # Roles
        lambda_role = iam.Role(
            self,
            id="lambda_role",
            assumed_by=iam.ServicePrincipal(service="lambda.amazonaws.com"),
            managed_policies=[
                s3_access_policy, lambda_access_policy, logs_policy
            ],
            role_name=f"midi-to-mp3-lambda-role")

        # SQS
        conversion_sqs = sqs.Queue(self,
                                   id=f"conversion_sqs",
                                   queue_name=f"conversion_sqs",
                                   visibility_timeout=core.Duration.hours(12),
                                   retention_period=core.Duration.days(1))

        kickoff_sqs = sqs.Queue(self,
                                id=f"kickoff_sqs",
                                queue_name=f"kickoff_sqs",
                                visibility_timeout=core.Duration.hours(12),
                                retention_period=core.Duration.days(1))

        # S3

        midi_file_dropoff_bucket = s3.Bucket(
            self,
            id="midi_files_dropoff",
            bucket_name=MIDI_FILE_DROPOFF_BUCKET,
            auto_delete_objects=True,
            removal_policy=core.RemovalPolicy.DESTROY)

        midi_file_dropoff_bucket.add_event_notification(
            event=s3.EventType.OBJECT_CREATED,
            dest=s3n.SqsDestination(kickoff_sqs))

        created_mp3_files_bucket = s3.Bucket(
            self,
            id="created_mp3_files",
            bucket_name=CREATED_MP3_FILES_BUCKET,
            auto_delete_objects=True,
            removal_policy=core.RemovalPolicy.DESTROY)

        component_midi_files_bucket = s3.Bucket(
            self,
            id="component_midi_files",
            bucket_name=COMPONENT_MIDI_FILES_BUCKET,
            auto_delete_objects=True,
            removal_policy=core.RemovalPolicy.DESTROY)

        component_midi_files_bucket.add_event_notification(
            event=s3.EventType.OBJECT_CREATED,
            dest=s3n.SqsDestination(conversion_sqs))

        # Lambdas
        lambda_code = lambda_.DockerImageCode.from_image_asset(
            directory='./midi_to_mp3_lambda/',
            file="Dockerfile",
            build_args={
                "AWS_ACCESS_KEY_ID": os.environ.get("AWS_ACCESS_KEY_ID"),
                "AWS_SECRET_ACCESS_KEY":
                os.environ.get("AWS_SECRET_ACCESS_KEY")
            })

        midi_to_mp3_lambda = lambda_.DockerImageFunction(
            self,
            id="midi_to_mp3_lambda",
            role=lambda_role,
            function_name="midi-to-mp3",
            memory_size=1024,
            timeout=core.Duration.minutes(5),
            code=lambda_code)

        midi_split_lambda = alg.GoFunction(
            self,
            id="midi_split_lambda",
            entry="./midi_split_lambda/midi_split_lambda.go",
            timeout=core.Duration.minutes(15),
            runtime=lambda_.Runtime.GO_1_X,
            role=lambda_role,
            function_name=f"midi-split-lambda",
            memory_size=512,
            bundling={"environment": {
                "GO111MODULE": "off"
            }})

        s3_cleanup_lambda = lambda_.Function(
            self,
            id="s3_cleanup_lambda",
            runtime=lambda_.Runtime.PYTHON_3_8,
            role=lambda_role,
            function_name="s3-cleanup-lambda",
            memory_size=256,
            timeout=core.Duration.minutes(5),
            environment={"NUM_WEEKS_TO_KEEP_FILES": "1"},
            handler="s3_cleanup_lambda.handler",
            code=lambda_.Code.from_asset(os.path.join(".",
                                                      "s3_cleanup_lambda")))

        weekly_on_sunday_cron = events.Rule(
            self,
            "Rule",
            schedule=events.Schedule.cron(minute='0',
                                          hour='0',
                                          week_day="SUN",
                                          month='*',
                                          year='*'),
        )
        weekly_on_sunday_cron.add_target(
            targets.LambdaFunction(s3_cleanup_lambda))

        # Event Sources

        midi_to_mp3_lambda.add_event_source(
            eventsources.SqsEventSource(queue=conversion_sqs))

        midi_split_lambda.add_event_source(
            eventsources.SqsEventSource(queue=kickoff_sqs))
Пример #16
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #Create the SQS queue
        queue = sqs.Queue(self, "SQSQueue")

        #Create the API GW service role with permissions to call SQS
        rest_api_role = iam.Role(
            self,
            "RestAPIRole",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSQSFullAccess")
            ])

        #Create an API GW Rest API
        base_api = apigw.RestApi(self, 'ApiGW', rest_api_name='TestAPI')
        base_api.root.add_method("ANY")

        #Create a resource named "example" on the base API
        api_resource = base_api.root.add_resource('example')

        #Create API Integration Response object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html
        integration_response = apigw.IntegrationResponse(
            status_code="200",
            response_templates={"application/json": ""},
        )

        #Create API Integration Options object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html
        api_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[integration_response],
            request_templates={
                "application/json":
                "Action=SendMessage&MessageBody=$input.body"
            },
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type":
                "'application/x-www-form-urlencoded'"
            },
        )

        #Create AWS Integration Object for SQS: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/AwsIntegration.html
        api_resource_sqs_integration = apigw.AwsIntegration(
            service="sqs",
            integration_http_method="POST",
            path="{}/{}".format(Aws.ACCOUNT_ID, queue.queue_name),
            options=api_integration_options)

        #Create a Method Response Object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/MethodResponse.html
        method_response = apigw.MethodResponse(status_code="200")

        #Add the API GW Integration to the "example" API GW Resource
        api_resource.add_method("POST",
                                api_resource_sqs_integration,
                                method_responses=[method_response])

        #Creating Lambda function that will be triggered by the SQS Queue
        sqs_lambda = _lambda.Function(
            self,
            'SQSTriggerLambda',
            handler='lambda-handler.handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset('lambda'),
        )

        #Create an SQS event source for Lambda
        sqs_event_source = lambda_event_source.SqsEventSource(queue)

        #Add SQS event source to the Lambda function
        sqs_lambda.add_event_source(sqs_event_source)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        env_name = self.node.try_get_context('env')

        #Create the SQS queue
        queue = sqs.Queue(self,
                          id=f"{env_name}-SQSQueue",
                          queue_name=f"{env_name}-queue")

        #Create the API GW service role with permissions to call SQS
        rest_api_role = iam.Role(
            self,
            id=f"{env_name}-RestAPISQSRole",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSQSFullAccess")
            ])

        #Create an API GW Rest API
        base_api = apigw.RestApi(
            self,
            id=f'{env_name}-ApiGW',
            rest_api_name=f'{env_name}SQSTestAPI',
            api_key_source_type=apigw.ApiKeySourceType.HEADER)

        usage_api_key_value = ''.join(
            random.choice(string.ascii_uppercase + string.ascii_lowercase +
                          string.digits) for _ in range(40))

        usage_api_key = base_api.add_api_key(id=f'{env_name}-apikey',
                                             value=usage_api_key_value)
        usage_plan = base_api.add_usage_plan(id=f'{env_name}-usageplan',
                                             name=f'{env_name}-usageplan',
                                             api_key=usage_api_key,
                                             throttle=apigw.ThrottleSettings(
                                                 rate_limit=10, burst_limit=2))
        usage_plan.add_api_stage(stage=base_api.deployment_stage)

        #Create a resource named "example" on the base API
        api_resource = base_api.root.add_resource('sqstest')

        #Create API Integration Response object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationResponse.html
        integration_response = apigw.IntegrationResponse(
            status_code="200",
            response_templates={"application/json": ""},
        )

        #Create API Integration Options object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/IntegrationOptions.html
        api_integration_options = apigw.IntegrationOptions(
            credentials_role=rest_api_role,
            integration_responses=[integration_response],
            request_templates={
                "application/json":
                "Action=SendMessage&MessageBody=$input.body"
            },
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type":
                "'application/x-www-form-urlencoded'"
            },
        )

        #Create AWS Integration Object for SQS: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/AwsIntegration.html
        api_resource_sqs_integration = apigw.AwsIntegration(
            service="sqs",
            integration_http_method="POST",
            # must be ACCOUNT_ID. Just the way URL to SQS is created
            path="{}/{}".format(core.Aws.ACCOUNT_ID, queue.queue_name),
            options=api_integration_options)

        #Create a Method Response Object: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_apigateway/MethodResponse.html
        method_response = apigw.MethodResponse(status_code="200")

        #Add the API GW Integration to the "example" API GW Resource
        api_resource.add_method("POST",
                                api_resource_sqs_integration,
                                method_responses=[method_response],
                                api_key_required=True)

        #Creating Lambda function that will be triggered by the SQS Queue
        sqs_lambda = _lambda.Function(
            self,
            'SQSTriggerLambda',
            handler='sqs_lambda.handler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset('pr_sqs_lambda'),
        )

        #Create an SQS event source for Lambda
        sqs_event_source = lambda_event_source.SqsEventSource(queue)

        #Add SQS event source to the Lambda function
        sqs_lambda.add_event_source(sqs_event_source)

        # https://67ixnggm81.execute-api.us-east-1.amazonaws.com/prod/sqstest
        region = core.Aws.REGION
        core.CfnOutput(self,
                       'api-gw-url',
                       value='https://' + base_api.rest_api_id +
                       '.execute-api.' + region +
                       '.amazonaws.com/prod/sqstest',
                       export_name='api-sqs-gw-url')
        print(f'API Key: {usage_api_key_value}')
        """
Пример #18
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        create_dependency_layer: Callable[[], None],
        **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # create dependency layer zip for lambda function
        create_dependency_layer()

        api_secret = secretsmanager.Secret(
            self,
            "ActualApiSecret",
            description="Secrets required to communicate with Aarogya Setu OpenAPI",
        )

        # create cognito user pool for authentication
        user_pool = cognito.UserPool(
            self,
            "AppUserPool",
            self_sign_up_enabled=True,
            account_recovery=cognito.AccountRecovery.PHONE_AND_EMAIL,
            user_verification=cognito.VerificationEmailStyle.CODE,
            auto_verify={"email": True},
            standard_attributes={"email": {"required": True, "mutable": True}},
        )

        user_pool_client = cognito.UserPoolClient(
            self, "UserPoolClient", user_pool=user_pool
        )

        # Create storage and queue
        bulk_request_queue = sqs.Queue(
            self,
            "BulkRequestQueue",
        )

        user_status_table = ddb.Table(
            self,
            "UserStatusTable",
            partition_key={"name": "mobile_number", "type": ddb.AttributeType.STRING},
            time_to_live_attribute="expdate",
        )
        self._user_status_table = user_status_table

        requests_table = ddb.Table(
            self,
            "RequestsTable",
            partition_key={"name": "mobile_number", "type": ddb.AttributeType.STRING},
            time_to_live_attribute="expdate",
        )

        # Create layer for lambda run time dependencies
        dependency_layer = _lambda.LayerVersion(
            self,
            "PythonDependencies",
            code=_lambda.Code.from_asset(path.join("lambda", "dependency-layer.zip")),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
            description="The layer contains requests and pyjwt dependencies",
        )

        # Create Lambda functions
        single_request = _lambda.Function(
            self,
            "SingleRequesetHandler",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("lambda"),
            handler="single_request.handler",
            timeout=core.Duration.seconds(10),
            layers=[dependency_layer],
            environment={
                "USER_STATUS_TABLE": user_status_table.table_name,
                "REQUESTS_TABLE": requests_table.table_name,
                "API_SECRET_ARN": api_secret.secret_full_arn,
            },
        )

        # give lambda access permissions to ddb tables and secrets
        user_status_table.grant_read_write_data(single_request)
        requests_table.grant_read_write_data(single_request)
        api_secret.grant_read(single_request)

        bulk_request = _lambda.Function(
            self,
            "BulkRequestHandler",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("lambda"),
            handler="bulk_request.handler",
            timeout=core.Duration.seconds(30),
            environment={
                "QUEUE_URL": bulk_request_queue.queue_url,
            },
        )

        # give lambda access to write to queue
        bulk_request_queue.grant_send_messages(bulk_request)

        queue_receiver = _lambda.Function(
            self,
            "QueueReceiverHandler",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("lambda"),
            handler="queue_receiver.handler",
            timeout=core.Duration.seconds(10),
            layers=[dependency_layer],
            environment={
                "USER_STATUS_TABLE": user_status_table.table_name,
                "REQUESTS_TABLE": requests_table.table_name,
                "QUEUE_URL": bulk_request_queue.queue_url,
                "API_SECRET_ARN": api_secret.secret_full_arn,
            },
        )

        # lambda gets triggered by sqs queue and writes to both tables
        queue_receiver.add_event_source(
            events.SqsEventSource(bulk_request_queue, batch_size=1)
        )

        # give queue receiver access to tables, queue and secrets
        bulk_request_queue.grant_consume_messages(queue_receiver)
        user_status_table.grant_read_write_data(queue_receiver)
        requests_table.grant_read_write_data(queue_receiver)

        api_secret.grant_read(queue_receiver)

        scan_table = _lambda.Function(
            self,
            "ScanTableHandler",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("lambda"),
            handler="scan_table.handler",
            timeout=core.Duration.seconds(30),
            environment={
                "USER_STATUS_TABLE": user_status_table.table_name,
            },
        )

        user_status_table.grant_read_data(scan_table)

        # create api endpoints with authorization
        api = apigw.RestApi(
            self,
            "ASetuApiGateway",
            default_cors_preflight_options=apigw.CorsOptions(
                allow_origins=apigw.Cors.ALL_ORIGINS
            ),
        )

        auth = apigw.CfnAuthorizer(
            self,
            "ApiCognitoAuthorizer",
            name="CognitoAuthorizer",
            type="COGNITO_USER_POOLS",
            authorizer_result_ttl_in_seconds=300,
            identity_source="method.request.header.Authorization",
            rest_api_id=api.rest_api_id,
            provider_arns=[user_pool.user_pool_arn],
        )

        single_request_integration = apigw.LambdaIntegration(single_request, proxy=True)
        single_request_resource = api.root.add_resource("status")
        single_method = single_request_resource.add_method(
            "POST",
            single_request_integration,
            api_key_required=False,
            authorizer=auth,
            authorization_type=apigw.AuthorizationType.COGNITO,
        )

        bulk_request_integration = apigw.LambdaIntegration(bulk_request, proxy=True)
        bulk_request_resource = api.root.add_resource("bulk_status")
        bulk_method = bulk_request_resource.add_method(
            "POST",
            bulk_request_integration,
            api_key_required=False,
            authorizer=auth,
            authorization_type=apigw.AuthorizationType.COGNITO,
        )

        scan_table_integration = apigw.LambdaIntegration(scan_table, proxy=True)
        scan_table_resource = api.root.add_resource("scan")
        scan_method = scan_table_resource.add_method(
            "GET",
            scan_table_integration,
            api_key_required=False,
            authorizer=auth,
            authorization_type=apigw.AuthorizationType.COGNITO,
        )

        # Override authorizer to use COGNITO to authorize apis
        # Solution from: https://github.com/aws/aws-cdk/issues/9023#issuecomment-658309644
        methods = [single_method, bulk_method, scan_method]
        for method in methods:
            method.node.find_child("Resource").add_property_override(
                "AuthorizationType", "COGNITO_USER_POOLS"
            )
            method.node.find_child("Resource").add_property_override(
                "AuthorizerId", {"Ref": auth.logical_id}
            )

        # Export output values for frontend application
        core.CfnOutput(
            self,
            "user-pool-id",
            value=user_pool.user_pool_id,
            export_name="USER-POOL-ID",
        )
        core.CfnOutput(
            self,
            "user-pool-web-client",
            value=user_pool_client.user_pool_client_id,
            export_name="WEB-CLIENT-ID",
        )
        core.CfnOutput(
            self, "api-endpoint-url", value=api.url, export_name="API-ENDPOINT-URL"
        )
        core.CfnOutput(
            self,
            "deployment-region",
            value=self.region,
            export_name="REGION",
        )
        core.CfnOutput(
            self, "stack-name", value=self.stack_name, export_name="STACK-NAME"
        )
        core.CfnOutput(
            self,
            "api-secret-arn",
            value=api_secret.secret_full_arn,
            export_name="API-SECRET-ARN",
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create image bucket
        image_bucket = s3.Bucket(self, 'inbound_image_s3_bucket')

        # Create the image processing queue
        image_process_queue = sqs.Queue(
            self, "image_process_queue",
            visibility_timeout=core.Duration.seconds(300),
            retention_period=core.Duration.days(1)
        )

        # Create the image response queue
        response_queue = sqs.Queue(
            self, "results_queue",
            visibility_timeout=core.Duration.seconds(300),
            retention_period=core.Duration.days(1)
        )

        # Set the put object notification to the SQS Queue
        image_bucket.add_event_notification(event=s3.EventType.OBJECT_CREATED_PUT,
                                            dest=s3n.SqsDestination(image_process_queue))

        # Define the AWS Lambda to call Amazon Rekognition DetectFaces
        detect_faces_lambda = _lambda.Function(self, 'detect_faces',
                                               runtime=_lambda.Runtime.PYTHON_3_7,
                                               handler='detect_faces.lambda_handler',
                                               code=_lambda.Code.asset('./lambda'),
                                               timeout=core.Duration.seconds(30),
                                               environment={'SQS_RESPONSE_QUEUE': response_queue.queue_name},
                                               reserved_concurrent_executions=50
                                               )

        # Set SQS image_process_queue Queue as event source for detect_faces_lambda
        detect_faces_lambda.add_event_source(_lambda_events.SqsEventSource(image_process_queue,
                                                                           batch_size=1))

        # Allow response queue messages from lambda
        response_queue.grant_send_messages(detect_faces_lambda)

        # Allow lambda to call Rekognition by adding a IAM Policy Statement
        detect_faces_lambda.add_to_role_policy(iam.PolicyStatement(actions=['rekognition:*'],
                                                                   resources=['*']))
        # Allow lambda to read from S3
        image_bucket.grant_read(detect_faces_lambda)

        # Define the DynamoDB Table
        results_table = dynamodb.Table(self, 'detect_faces_results',
                                       table_name='detect_faces_results',
                                       partition_key=dynamodb.Attribute(name='id', type=dynamodb.AttributeType.STRING),
                                       read_capacity=200,
                                       write_capacity=200
                                       )

        # Define the AWS Lambda to write results into DyanamoDB results_table
        write_results_lambda = _lambda.Function(self, 'write_results',
                                               runtime=_lambda.Runtime.PYTHON_3_7,
                                               handler='write_results.lambda_handler',
                                               code=_lambda.Code.asset('./lambda'),
                                               timeout=core.Duration.seconds(30),
                                               environment={'TABLE_NAME': results_table.table_name}
                                               )

        # Set SQS response_queue Queue as event source for write_results_lambda results_table
        write_results_lambda.add_event_source(_lambda_events.SqsEventSource(response_queue,
                                                                            batch_size=1))

        # Allow AWS Lambda write_results_lambda to Write to Dynamodb
        results_table.grant_write_data(write_results_lambda)

        # Allow AWS Lambda write_results_lambda to read messages from the SQS response_queue Queue
        response_queue.grant_consume_messages(write_results_lambda)

        # Output to Amazon S3 Image Bucket
        core.CfnOutput(self, 'cdk_output',
                       value=image_bucket.bucket_name,
                       description='Input Amazon S3 Image Bucket')
Пример #20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.current_dir = os.path.dirname(__file__)

        self.bucket = s3.Bucket(
            self,
            "qs-migration-bucket",
            bucket_name=f'quicksight-migration-{core.Aws.ACCOUNT_ID}',
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        self.quicksight_migration_lambda_role = iam.Role(
            self,
            'quicksight-migration-lambda-role',
            description='Role for the Quicksight dashboard migration Lambdas',
            role_name='quicksight-migration-lambda-role',
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={
                'AllowAccess':
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            'logs:CreateLogGroup', 'logs:CreateLogStream',
                            'logs:PutLogEvents'
                        ],
                        resources=[
                            f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:*'
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["sts:AssumeRole", "iam:ListRoles"],
                        resources=[
                            "arn:aws:iam::*:role/quicksight-migration-*-assume-role"
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["s3:PutObject", "s3:ListBucket"],
                        resources=[
                            self.bucket.bucket_arn,
                            f"{self.bucket.bucket_arn}/*"
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["secrets:GetSecretValue"],
                        resources=[
                            f"arn:aws:secretsmanager:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:secret:*"
                        ]),
                    iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                        actions=[
                                            "quicksight:*",
                                        ],
                                        resources=["*"])
                ])
            })

        # API Gateway to SQS
        self.apigw_sqs = ApiGatewayToSqs(
            self,
            "ApiGatewayToSQSqsMigration",
            allow_create_operation=True,
            allow_read_operation=False,
            allow_delete_operation=False,
            api_gateway_props=apigw.RestApiProps(
                rest_api_name="quicksight-migration-sqs",
                deploy=True,
                default_method_options=apigw.MethodOptions(
                    authorization_type=apigw.AuthorizationType.NONE),
                default_cors_preflight_options=apigw.CorsOptions(
                    allow_origins=apigw.Cors.ALL_ORIGINS,
                    allow_methods=apigw.Cors.ALL_METHODS,
                    allow_headers=[
                        'Access-Control-Allow-Origin',
                        'Access-Control-Allow-Headers', 'Content-Type'
                    ]),
                policy=iam.PolicyDocument(statements=[
                    iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                        actions=['execute-api:Invoke'],
                                        resources=["execute-api:/prod/*"],
                                        principals=[iam.ArnPrincipal("*")])
                ])),
            queue_props=sqs.QueueProps(
                queue_name="quicksight-migration-sqs",
                visibility_timeout=core.Duration.minutes(15)))

        self.quicksight_migration_lambda = _lambda.Function(
            self,
            'quicksight-migration-lambda',
            handler='quicksight_migration.lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                os.path.join(self.current_dir,
                             '../lambda/quicksight_migration/')),
            function_name='quicksight_migration_lambda',
            role=self.quicksight_migration_lambda_role,
            timeout=core.Duration.minutes(15),
            memory_size=1024,
            environment={
                'BUCKET_NAME': self.bucket.bucket_name,
                'S3_KEY': 'None',
                'INFRA_CONFIG_PARAM': '/infra/config',
                'SQS_URL': self.apigw_sqs.sqs_queue.queue_url
            })

        self.quicksight_migration_lambda.add_event_source(
            event_sources.SqsEventSource(
                enabled=True,
                queue=self.apigw_sqs.sqs_queue,
            ))
Пример #21
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open("stack/config.yml", 'r') as stream:
            configs = yaml.safe_load(stream)

        ### S3 core
        images_S3_bucket = _s3.Bucket(self, "ICS_IMAGES")

        images_S3_bucket.add_cors_rule(
            allowed_methods=[_s3.HttpMethods.POST],
            allowed_origins=["*"] # add API gateway web resource URL
        )

        ### SQS core
        image_deadletter_queue = _sqs.Queue(self, "ICS_IMAGES_DEADLETTER_QUEUE")
        image_queue = _sqs.Queue(self, "ICS_IMAGES_QUEUE",
            dead_letter_queue={
                "max_receive_count": configs["DeadLetterQueue"]["MaxReceiveCount"],
                "queue": image_deadletter_queue
            })

        ### api gateway core
        api_gateway = RestApi(self, 'ICS_API_GATEWAY', rest_api_name='ImageContentSearchApiGateway')
        api_gateway_resource = api_gateway.root.add_resource(configs["ProjectName"])
        api_gateway_landing_page_resource = api_gateway_resource.add_resource('web')
        api_gateway_get_signedurl_resource = api_gateway_resource.add_resource('signedUrl')
        api_gateway_image_search_resource = api_gateway_resource.add_resource('search')

        ### landing page function
        get_landing_page_function = Function(self, "ICS_GET_LANDING_PAGE",
            function_name="ICS_GET_LANDING_PAGE",
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/landingPage"))

        get_landing_page_integration = LambdaIntegration(
            get_landing_page_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_landing_page_resource.add_method('GET', get_landing_page_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        ### cognito
        required_attribute = _cognito.StandardAttribute(required=True)

        users_pool = _cognito.UserPool(self, "ICS_USERS_POOL",
            auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up
            standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up
            self_sign_up_enabled=configs["Cognito"]["SelfSignUp"])

        user_pool_app_client = _cognito.CfnUserPoolClient(self, "ICS_USERS_POOL_APP_CLIENT", 
            supported_identity_providers=["COGNITO"],
            allowed_o_auth_flows=["implicit"],
            allowed_o_auth_scopes=configs["Cognito"]["AllowedOAuthScopes"],
            user_pool_id=users_pool.user_pool_id,
            callback_ur_ls=[api_gateway_landing_page_resource.url],
            allowed_o_auth_flows_user_pool_client=True,
            explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"])

        user_pool_domain = _cognito.UserPoolDomain(self, "ICS_USERS_POOL_DOMAIN", 
            user_pool=users_pool, 
            cognito_domain=_cognito.CognitoDomainOptions(domain_prefix=configs["Cognito"]["DomainPrefix"]))

        ### get signed URL function
        get_signedurl_function = Function(self, "ICS_GET_SIGNED_URL",
            function_name="ICS_GET_SIGNED_URL",
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_SIGNEDURL_EXPIRY_SECONDS": configs["Functions"]["DefaultSignedUrlExpirySeconds"]
            },
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/getSignedUrl"))

        get_signedurl_integration = LambdaIntegration(
            get_signedurl_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_get_signedurl_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            rest_api_id=api_gateway_get_signedurl_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            type="COGNITO_USER_POOLS",
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_get_signedurl_resource.add_method('GET', get_signedurl_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_get_signedurl_authorizer.ref)

        images_S3_bucket.grant_put(get_signedurl_function, objects_key_pattern="new/*")

        ### image massage function
        image_massage_function = Function(self, "ICS_IMAGE_MASSAGE",
            function_name="ICS_IMAGE_MASSAGE",
            timeout=core.Duration.seconds(6),
            runtime=Runtime.PYTHON_3_7,
            environment={"ICS_IMAGE_MASSAGE": image_queue.queue_name},
            handler="main.handler",
            code=Code.asset("./src/imageMassage"))

        images_S3_bucket.grant_write(image_massage_function, "processed/*")
        images_S3_bucket.grant_delete(image_massage_function, "new/*")
        images_S3_bucket.grant_read(image_massage_function, "new/*")
        
        new_image_added_notification = _s3notification.LambdaDestination(image_massage_function)

        images_S3_bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, 
            new_image_added_notification, 
            _s3.NotificationKeyFilter(prefix="new/")
            )

        image_queue.grant_send_messages(image_massage_function)

        ### image analyzer function
        image_analyzer_function = Function(self, "ICS_IMAGE_ANALYSIS",
            function_name="ICS_IMAGE_ANALYSIS",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(10),
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "REGION": core.Aws.REGION,
                },
            handler="main.handler",
            code=Code.asset("./src/imageAnalysis")) 

        image_analyzer_function.add_event_source(_lambda_event_source.SqsEventSource(queue=image_queue, batch_size=10))
        image_queue.grant_consume_messages(image_massage_function)

        lambda_rekognition_access = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"],
            resources=["*"]                    
        )

        image_analyzer_function.add_to_role_policy(lambda_rekognition_access)
        images_S3_bucket.grant_read(image_analyzer_function, "processed/*")

        ### API gateway finalizing
        self.add_cors_options(api_gateway_get_signedurl_resource)
        self.add_cors_options(api_gateway_landing_page_resource)
        self.add_cors_options(api_gateway_image_search_resource)

        ### database 
        database_secret = _secrets_manager.Secret(self, "ICS_DATABASE_SECRET",
            secret_name="rds-db-credentials/image-content-search-rds-secret",
            generate_secret_string=_secrets_manager.SecretStringGenerator(
                generate_string_key='password',
                secret_string_template='{"username": "******"}',
                exclude_punctuation=True,
                exclude_characters='/@\" \\\'',
                require_each_included_type=True
            )
        )

        database = _rds.CfnDBCluster(self, "ICS_DATABASE",
            engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
            engine_mode="serverless",
            database_name=configs["Database"]["Name"],
            enable_http_endpoint=True,
            deletion_protection=configs["Database"]["DeletionProtection"],
            master_username=database_secret.secret_value_from_json("username").to_string(),
            master_user_password=database_secret.secret_value_from_json("password").to_string(),
            scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty(
                auto_pause=configs["Database"]["Scaling"]["AutoPause"],
                min_capacity=configs["Database"]["Scaling"]["Min"],
                max_capacity=configs["Database"]["Scaling"]["Max"],
                seconds_until_auto_pause=configs["Database"]["Scaling"]["SecondsToAutoPause"]
            ),
        )

        database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref)
   
        secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"ICS_DATABASE_SECRET_TARGET",
            target_type="AWS::RDS::DBCluster",
            target_id=database.ref,
            secret_id=database_secret.secret_arn
        )

        secret_target.node.add_dependency(database)

        ### database function
        image_data_function_role = _iam.Role(self, "ICS_IMAGE_DATA_FUNCTION_ROLE",
            role_name="ICS_IMAGE_DATA_FUNCTION_ROLE",
            assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess")
            ]
        )
        
        image_data_function = Function(self, "ICS_IMAGE_DATA",
            function_name="ICS_IMAGE_DATA",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(5),
            role=image_data_function_role,
            environment={
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "CLUSTER_ARN": database_cluster_arn,
                "CREDENTIALS_ARN": database_secret.secret_arn,
                "DB_NAME": database.database_name,
                "REGION": core.Aws.REGION
                },
            handler="main.handler",
            code=Code.asset("./src/imageData")
        ) 

        image_search_integration = LambdaIntegration(
            image_data_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_image_search_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            rest_api_id=api_gateway_image_search_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            type="COGNITO_USER_POOLS", 
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_image_search_resource.add_method('POST', image_search_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_image_search_authorizer.ref)


        lambda_access_search = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["translate:TranslateText"],
            resources=["*"]            
        ) 

        image_data_function.add_to_role_policy(lambda_access_search)

        ### custom resource
        lambda_provider = Provider(self, 'ICS_IMAGE_DATA_PROVIDER', 
            on_event_handler=image_data_function
        )

        core.CustomResource(self, 'ICS_IMAGE_DATA_RESOURCE', 
            service_token=lambda_provider.service_token,
            pascal_case_properties=False,
            resource_type="Custom::SchemaCreation",
            properties={
                "source": "Cloudformation"
            }
        )

        ### event bridge
        event_bus = _events.EventBus(self, "ICS_IMAGE_CONTENT_BUS")

        event_rule = _events.Rule(self, "ICS_IMAGE_CONTENT_RULE",
            rule_name="ICS_IMAGE_CONTENT_RULE",
            description="The event from image analyzer to store the data",
            event_bus=event_bus,
            event_pattern=_events.EventPattern(resources=[image_analyzer_function.function_arn]),
        )

        event_rule.add_target(_event_targets.LambdaFunction(image_data_function))

        event_bus.grant_put_events(image_analyzer_function)
        image_analyzer_function.add_environment("EVENT_BUS", event_bus.event_bus_name)

        ### outputs
        core.CfnOutput(self, 'CognitoHostedUILogin',
            value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(user_pool_domain.domain_name, core.Aws.REGION, user_pool_app_client.ref, '+'.join(user_pool_app_client.allowed_o_auth_scopes), api_gateway_landing_page_resource.url),
            description='The Cognito Hosted UI Login Page'
        )
Пример #22
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        """Initializes the stack

        :param scope: parent of this stack, scope within which resources defined here are accessible
        :type file_loc: Optional[aws_cdk.core.Construct]
        :param id: the id of the stack
        :type id: Optional[str]
        :param **kwargs: additional optional arguments
            ``description``:
                a description of the stack (`Optional[str]`).
            ``env``
                AWS environment (account/region) where this stack will be deployed (`Optional[aws_cdk.core.Environment]`).
            ``stack_name``
                name with which to deploy the stack (`Optional[str]`).
            ``synthesizer``
                synthesis method to use while deploying this stack (`Optional[aws_cdk.core.IStackSynthesizer]`).
            ``tags``
                stack tags that will be applied to all taggable resources as well as the stack (`Optional[Mapping[str, str]]`).
            ``termination_protection``
                whether to enable termination protection for this stack (`Optional[bool]`).
        """
        super().__init__(scope, id, **kwargs)
        metric_handler_dict, webhook_creator_dict = self.handle_parameters()
        # timeout used for lambda and sqs, in seconds
        lambda_timeout = 300
        if self.node.try_get_context('lambda_timeout'):
            lambda_timeout = int(self.node.try_get_context('lambda_timeout'))

        dead_letter_queue = sqs.Queue(self,
                                      'DeadLetterQueue',
                                      queue_name='DeadLetterQueue')
        webhook_queue = sqs.Queue(
            self,
            'WebhookQueue',
            queue_name='WebhookQueue',
            visibility_timeout=core.Duration.seconds(lambda_timeout),
            dead_letter_queue=sqs.DeadLetterQueue(max_receive_count=3,
                                                  queue=dead_letter_queue))
        metric_handler_dict['queue_url'] = webhook_queue.queue_url

        metric_handler_management_role = self.create_lambda_role_and_policy(
            'MetricHandlerManagementRole', [
                'cloudwatch:GetDashboard', 'cloudwatch:GetMetricData',
                'cloudwatch:ListDashboards', 'cloudwatch:PutDashboard',
                'cloudwatch:PutMetricData', 'logs:CreateLogGroup',
                'logs:CreateLogStream', 'logs:PutLogEvents',
                'secretsmanager:GetSecretValue'
            ])
        metric_handler_timeout = lambda_timeout
        metric_handler_function = _lambda.Function(
            self,
            'MetricsHandler',
            function_name='MetricsHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda_dir'),
            handler='cloudwatch_dashboard_handler.handler',
            role=metric_handler_management_role,
            environment=metric_handler_dict,
            timeout=core.Duration.seconds(metric_handler_timeout))
        self.create_event_with_permissions(metric_handler_function)

        # Connect SQS to Lambda
        sqs_event_source = lambda_event_source.SqsEventSource(webhook_queue)
        metric_handler_function.add_event_source(sqs_event_source)

        apigw_webhook_url = self.create_and_integrate_apigw(
            webhook_queue, metric_handler_dict['dashboard_name_prefix'])
        webhook_creator_dict['apigw_endpoint'] = apigw_webhook_url

        webhook_role = self.create_lambda_role_and_policy(
            'WebhookCreatorRole', ['secretsmanager:GetSecretValue'])
        _lambda.Function(self,
                         'WebhookCreator',
                         function_name='WebhookCreator',
                         runtime=_lambda.Runtime.PYTHON_3_7,
                         code=_lambda.Code.asset('lambda_dir'),
                         handler='webhook_creator.handler',
                         role=webhook_role,
                         environment=webhook_creator_dict,
                         timeout=core.Duration.seconds(5))
Пример #23
0
    def __init__(self, app: core.App, id: str) -> None:
        super().__init__(app, id)

        ##################################
        # Lambda Timeouts (seconds) & Queue Redrive
        ##################################

        lambda_gatherer_timeout = 600
        lambda_joiner_timeout = 350
        # pa11y's timeout is set to 50, so the lambda is just a little longer
        lambda_a11y_scan_timeout = 55
        max_receive_count = 2

        ##################################
        # S3 Bucket with Domains
        ##################################

        asset = aws_s3_assets.Asset(
            self, 'domain-list', path=os.path.abspath('./domains/domains.csv'))

        ##################################
        # Domain Gatherer Lambda and Queue
        ##################################

        domain_queue = sqs.Queue(
            self,
            'domain-queue',
            visibility_timeout=core.Duration.seconds(
                (max_receive_count + 1) * lambda_gatherer_timeout),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=max_receive_count,
                queue=sqs.Queue(self,
                                'domain-queue-dlq',
                                retention_period=core.Duration.days(5))))

        lambda_gatherer = lambda_.Function(
            self,
            "domain-gatherer",
            code=lambda_.Code.from_asset('./lambdas/domain_gatherer'),
            handler="handler.main",
            timeout=core.Duration.seconds(lambda_gatherer_timeout),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=150)

        lambda_gatherer.add_environment('SQS_URL', domain_queue.queue_url)
        lambda_gatherer.add_environment('BUCKET_NAME', asset.s3_bucket_name)
        lambda_gatherer.add_environment('OBJECT_KEY', asset.s3_object_key)

        lambda_gatherer_sqs_exec_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'lambda:InvokeFunction', 'sqs:SendMessage',
                'sqs:DeleteMessage', 'sqs:SendMessageBatch',
                'sqs:SetQueueAttributes', 'sqs:GetQueueAttributes',
                'sqs:GetQueueUrl', 'sqs:GetQueueAttributes'
            ],
            resources=[domain_queue.queue_arn])
        lambda_gatherer.add_to_role_policy(lambda_gatherer_sqs_exec_policy)
        domain_queue.grant_send_messages(lambda_gatherer)

        # trigger for 1st and 15th of the month at 18:00 UTC (1pm EST)
        lambda_gatherer_rule = events.Rule(self,
                                           "Lambda Gatherer Rule",
                                           schedule=events.Schedule.cron(
                                               minute='0',
                                               hour='18',
                                               day="1,15",
                                               month='*',
                                               year='*'))
        lambda_gatherer_rule.add_target(
            targets.LambdaFunction(lambda_gatherer))
        asset.grant_read(lambda_gatherer)

        ##################################
        # A11y Scanner Lambda and S3
        ##################################

        layer = lambda_.LayerVersion(
            self,
            'chrome-aws-lambda',
            code=lambda_.Code.from_asset('./lambdas/chrome_aws_lambda.zip'),
            compatible_runtimes=[lambda_.Runtime.NODEJS_12_X],
            description='A layer of chrome-aws-lambda')

        lambda_a11y_scan = lambda_.Function(
            self,
            "a11y-scan",
            code=lambda_.Code.from_asset('./lambdas/a11y_scan'),
            handler="index.handler",
            timeout=core.Duration.seconds(lambda_a11y_scan_timeout),
            runtime=lambda_.Runtime.NODEJS_12_X,
            memory_size=1000,
            layers=[layer])

        lambda_a11y_scan.add_event_source(
            sources.SqsEventSource(domain_queue, batch_size=1))

        # create s3 bucket to put results
        results_bucket = s3.Bucket(self,
                                   'results-bucket',
                                   versioned=False,
                                   removal_policy=core.RemovalPolicy.DESTROY,
                                   block_public_access=s3.BlockPublicAccess(
                                       block_public_acls=True,
                                       ignore_public_acls=True,
                                       block_public_policy=True,
                                       restrict_public_buckets=True),
                                   lifecycle_rules=[
                                       s3.LifecycleRule(
                                           enabled=True,
                                           expiration=core.Duration.days(10))
                                   ])

        lambda_a11y_scan.add_environment('BUCKET_NAME',
                                         results_bucket.bucket_name)
        results_bucket.grant_put(lambda_a11y_scan)

        ##################################
        # Results Joiner Lambda
        ##################################

        # create s3 bucket to put site data
        data_bucket = s3.Bucket(self,
                                'data-bucket',
                                versioned=False,
                                removal_policy=core.RemovalPolicy.DESTROY,
                                block_public_access=s3.BlockPublicAccess(
                                    block_public_acls=True,
                                    ignore_public_acls=True,
                                    block_public_policy=True,
                                    restrict_public_buckets=True))

        lambda_joiner = lambda_.Function(
            self,
            "results-joiner",
            code=lambda_.Code.from_asset(
                './lambda-releases/results_joiner.zip'),
            handler="handler.main",
            timeout=core.Duration.seconds(lambda_joiner_timeout),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=400)
        lambda_joiner.add_environment('DATA_BUCKET_NAME',
                                      data_bucket.bucket_name)
        lambda_joiner.add_environment('RESULTS_BUCKET_NAME',
                                      results_bucket.bucket_name)
        results_bucket.grant_read_write(lambda_joiner)
        data_bucket.grant_read_write(lambda_joiner)

        # trigger for 8th and 23rd of the month at 18:00 UTC (1pm EST)
        lambda_joiner_rule = events.Rule(self,
                                         "Lambda Joiner Rule",
                                         schedule=events.Schedule.cron(
                                             minute='0',
                                             hour='18',
                                             day="8,23",
                                             month='*',
                                             year='*'))
        lambda_joiner_rule.add_target(targets.LambdaFunction(lambda_joiner))
Пример #24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if self.node.try_get_context('vpc_type'):
            validate_cdk_json(self)

        ES_LOADER_TIMEOUT = 600
        ######################################################################
        # REGION mapping / ELB & Lambda Arch
        ######################################################################
        elb_id_temp = region_info.FactName.ELBV2_ACCOUNT
        elb_map_temp = region_info.RegionInfo.region_map(elb_id_temp)
        region_dict = {}
        for region in elb_map_temp:
            # ELB account ID
            region_dict[region] = {'ElbV2AccountId': elb_map_temp[region]}
            # Lambda Arch
            if region in ('us-east-1', 'us-east-2', 'us-west-2', 'ap-south-1',
                          'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1',
                          'eu-central-1', 'eu-west-1', 'eu-west-2'):
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.ARM_64.name)
            else:
                region_dict[region]['LambdaArch'] = (
                    aws_lambda.Architecture.X86_64.name)
        region_mapping = core.CfnMapping(
            scope=self, id='RegionMap', mapping=region_dict)

        ######################################################################
        # get params
        ######################################################################
        allow_source_address = core.CfnParameter(
            self, 'AllowedSourceIpAddresses', allowed_pattern=r'^[0-9./\s]*',
            description='Space-delimited list of CIDR blocks',
            default='10.0.0.0/8 172.16.0.0/12 192.168.0.0/16')
        sns_email = core.CfnParameter(
            self, 'SnsEmail', allowed_pattern=r'^[0-9a-zA-Z@_\-\+\.]*',
            description=('Input your email as SNS topic, where Amazon '
                         'OpenSearch Service will send alerts to'),
            default='*****@*****.**')
        geoip_license_key = core.CfnParameter(
            self, 'GeoLite2LicenseKey', allowed_pattern=r'^[0-9a-zA-Z]{16}$',
            default='xxxxxxxxxxxxxxxx',
            description=("If you wolud like to enrich geoip locaiton such as "
                         "IP address's country, get a license key form MaxMind"
                         " and input the key. If you not, keep "
                         "xxxxxxxxxxxxxxxx"))
        reserved_concurrency = core.CfnParameter(
            self, 'ReservedConcurrency', default=10, type='Number',
            description=('Input reserved concurrency. Increase this value if '
                         'there are steady logs delay despite no errors'))
        aes_domain_name = self.node.try_get_context('aes_domain_name')
        bucket = f'{aes_domain_name}-{core.Aws.ACCOUNT_ID}'
        s3bucket_name_geo = f'{bucket}-geo'
        s3bucket_name_log = f'{bucket}-log'
        s3bucket_name_snapshot = f'{bucket}-snapshot'

        # organizations / multiaccount
        org_id = self.node.try_get_context('organizations').get('org_id')
        org_mgmt_id = self.node.try_get_context(
            'organizations').get('management_id')
        org_member_ids = self.node.try_get_context(
            'organizations').get('member_ids')
        no_org_ids = self.node.try_get_context(
            'no_organizations').get('aws_accounts')

        # Overwrite default S3 bucket name as customer name
        temp_geo = self.node.try_get_context('s3_bucket_name').get('geo')
        if temp_geo:
            s3bucket_name_geo = temp_geo
        else:
            print('Using default bucket names')
        temp_log = self.node.try_get_context('s3_bucket_name').get('log')
        if temp_log:
            s3bucket_name_log = temp_log
        elif org_id or no_org_ids:
            s3bucket_name_log = f'{aes_domain_name}-{self.account}-log'
        else:
            print('Using default bucket names')
        temp_snap = self.node.try_get_context('s3_bucket_name').get('snapshot')
        if temp_snap:
            s3bucket_name_snapshot = temp_snap
        else:
            print('Using default bucket names')
        kms_cmk_alias = self.node.try_get_context('kms_cmk_alias')
        if not kms_cmk_alias:
            kms_cmk_alias = 'aes-siem-key'
            print('Using default key alais')

        ######################################################################
        # deploy VPC when context is defined as using VPC
        ######################################################################
        # vpc_type is 'new' or 'import' or None
        vpc_type = self.node.try_get_context('vpc_type')

        if vpc_type == 'new':
            is_vpc = True
            vpc_cidr = self.node.try_get_context('new_vpc_nw_cidr_block')
            subnet_cidr_mask = int(
                self.node.try_get_context('new_vpc_subnet_cidr_mask'))
            is_vpc = True
            # VPC
            vpc_aes_siem = aws_ec2.Vpc(
                self, 'VpcAesSiem', cidr=vpc_cidr,
                max_azs=3, nat_gateways=0,
                subnet_configuration=[
                    aws_ec2.SubnetConfiguration(
                        subnet_type=aws_ec2.SubnetType.ISOLATED,
                        name='aes-siem-subnet', cidr_mask=subnet_cidr_mask)])
            subnet1 = vpc_aes_siem.isolated_subnets[0]
            subnets = [{'subnet_type': aws_ec2.SubnetType.ISOLATED}]
            vpc_subnets = aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.ISOLATED)
            vpc_aes_siem_opt = vpc_aes_siem.node.default_child.cfn_options
            vpc_aes_siem_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
            for subnet in vpc_aes_siem.isolated_subnets:
                subnet_opt = subnet.node.default_child.cfn_options
                subnet_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN
        elif vpc_type == 'import':
            vpc_id = self.node.try_get_context('imported_vpc_id')
            vpc_aes_siem = aws_ec2.Vpc.from_lookup(
                self, 'VpcAesSiem', vpc_id=vpc_id)

            subnet_ids = get_subnet_ids(self)
            subnets = []
            for number, subnet_id in enumerate(subnet_ids, 1):
                obj_id = 'Subenet' + str(number)
                subnet = aws_ec2.Subnet.from_subnet_id(self, obj_id, subnet_id)
                subnets.append(subnet)
            subnet1 = subnets[0]
            vpc_subnets = aws_ec2.SubnetSelection(subnets=subnets)

        if vpc_type:
            is_vpc = True
            # Security Group
            sg_vpc_noinbound_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcNoinboundSecurityGroup',
                security_group_name='aes-siem-noinbound-vpc-sg',
                vpc=vpc_aes_siem)

            sg_vpc_aes_siem = aws_ec2.SecurityGroup(
                self, 'AesSiemVpcSecurityGroup',
                security_group_name='aes-siem-vpc-sg',
                vpc=vpc_aes_siem)
            sg_vpc_aes_siem.add_ingress_rule(
                peer=aws_ec2.Peer.ipv4(vpc_aes_siem.vpc_cidr_block),
                connection=aws_ec2.Port.tcp(443),)
            sg_vpc_opt = sg_vpc_aes_siem.node.default_child.cfn_options
            sg_vpc_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

            # VPC Endpoint
            vpc_aes_siem.add_gateway_endpoint(
                'S3Endpoint', service=aws_ec2.GatewayVpcEndpointAwsService.S3,
                subnets=subnets)
            vpc_aes_siem.add_interface_endpoint(
                'SQSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.SQS,)
            vpc_aes_siem.add_interface_endpoint(
                'KMSEndpoint', security_groups=[sg_vpc_aes_siem],
                service=aws_ec2.InterfaceVpcEndpointAwsService.KMS,)
        else:
            is_vpc = False

        is_vpc = core.CfnCondition(
            self, 'IsVpc', expression=core.Fn.condition_equals(is_vpc, True))
        """
        CloudFormation実行時の条件式の書き方
        ClassのBasesが aws_cdk.core.Resource の時は、
        node.default_child.cfn_options.condition = is_vpc
        ClassのBasesが aws_cdk.core.CfnResource の時は、
        cfn_options.condition = is_vpc
        """

        ######################################################################
        # create cmk of KMS to encrypt S3 bucket
        ######################################################################
        kms_aes_siem = aws_kms.Key(
            self, 'KmsAesSiemLog', description='CMK for SIEM solution',
            removal_policy=core.RemovalPolicy.RETAIN)

        aws_kms.Alias(
            self, 'KmsAesSiemLogAlias', alias_name=kms_cmk_alias,
            target_key=kms_aes_siem,
            removal_policy=core.RemovalPolicy.RETAIN)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow GuardDuty to use the key',
                actions=['kms:GenerateDataKey'],
                principals=[aws_iam.ServicePrincipal(
                    'guardduty.amazonaws.com')],
                resources=['*'],),)

        kms_aes_siem.add_to_resource_policy(
            aws_iam.PolicyStatement(
                sid='Allow VPC Flow Logs to use the key',
                actions=['kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*',
                         'kms:GenerateDataKey*', 'kms:DescribeKey'],
                principals=[aws_iam.ServicePrincipal(
                    'delivery.logs.amazonaws.com')],
                resources=['*'],),)
        # basic policy
        key_policy_basic1 = aws_iam.PolicyStatement(
            sid='Allow principals in the account to decrypt log files',
            actions=['kms:DescribeKey', 'kms:ReEncryptFrom'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_basic1)

        # for Athena
        key_policy_athena = aws_iam.PolicyStatement(
            sid='Allow Athena to query s3 objects with this key',
            actions=['kms:Decrypt', 'kms:DescribeKey', 'kms:Encrypt',
                     'kms:GenerateDataKey*', 'kms:ReEncrypt*'],
            principals=[aws_iam.AccountPrincipal(
                account_id=core.Aws.ACCOUNT_ID)],
            resources=['*'],
            conditions={'ForAnyValue:StringEquals': {
                'aws:CalledVia': 'athena.amazonaws.com'}})
        kms_aes_siem.add_to_resource_policy(key_policy_athena)

        # for CloudTrail
        key_policy_trail1 = aws_iam.PolicyStatement(
            sid='Allow CloudTrail to describe key',
            actions=['kms:DescribeKey'],
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            resources=['*'],)
        kms_aes_siem.add_to_resource_policy(key_policy_trail1)

        key_policy_trail2 = aws_iam.PolicyStatement(
            sid=('Allow CloudTrail to encrypt logs'),
            actions=['kms:GenerateDataKey*'],
            principals=[aws_iam.ServicePrincipal(
                'cloudtrail.amazonaws.com')],
            resources=['*'],
            conditions={'StringLike': {
                'kms:EncryptionContext:aws:cloudtrail:arn': [
                    f'arn:aws:cloudtrail:*:{core.Aws.ACCOUNT_ID}:trail/*']}})
        kms_aes_siem.add_to_resource_policy(key_policy_trail2)

        ######################################################################
        # create s3 bucket
        ######################################################################
        block_pub = aws_s3.BlockPublicAccess(
            block_public_acls=True,
            ignore_public_acls=True,
            block_public_policy=True,
            restrict_public_buckets=True
        )
        s3_geo = aws_s3.Bucket(
            self, 'S3BucketForGeoip', block_public_access=block_pub,
            bucket_name=s3bucket_name_geo,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for log collector
        s3_log = aws_s3.Bucket(
            self, 'S3BucketForLog', block_public_access=block_pub,
            bucket_name=s3bucket_name_log, versioned=True,
            encryption=aws_s3.BucketEncryption.S3_MANAGED,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        # create s3 bucket for aes snapshot
        s3_snapshot = aws_s3.Bucket(
            self, 'S3BucketForSnapshot', block_public_access=block_pub,
            bucket_name=s3bucket_name_snapshot,
            # removal_policy=core.RemovalPolicy.DESTROY,
        )

        ######################################################################
        # IAM Role
        ######################################################################
        # delopyment policy for lambda deploy-aes
        arn_prefix = f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
        loggroup_aes = f'log-group:/aws/aes/domains/{aes_domain_name}/*'
        loggroup_opensearch = (
            f'log-group:/aws/OpenSearchService/domains/{aes_domain_name}/*')
        loggroup_lambda = 'log-group:/aws/lambda/aes-siem-*'
        policydoc_create_loggroup = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:PutResourcePolicy',
                        'logs:DescribeLogGroups',
                        'logs:DescribeLogStreams'
                    ],
                    resources=[f'{arn_prefix}:*', ]
                ),
                aws_iam.PolicyStatement(
                    actions=[
                        'logs:CreateLogGroup', 'logs:CreateLogStream',
                        'logs:PutLogEvents', 'logs:PutRetentionPolicy'],
                    resources=[
                        f'{arn_prefix}:{loggroup_aes}',
                        f'{arn_prefix}:{loggroup_opensearch}',
                        f'{arn_prefix}:{loggroup_lambda}',
                    ],
                )
            ]
        )

        policydoc_crhelper = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=[
                        'lambda:AddPermission',
                        'lambda:RemovePermission',
                        'events:ListRules',
                        'events:PutRule',
                        'events:DeleteRule',
                        'events:PutTargets',
                        'events:RemoveTargets'],
                    resources=['*']
                )
            ]
        )

        # snaphot rule for AES
        policydoc_snapshot = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['s3:ListBucket'],
                    resources=[s3_snapshot.bucket_arn]
                ),
                aws_iam.PolicyStatement(
                    actions=['s3:GetObject', 's3:PutObject',
                             's3:DeleteObject'],
                    resources=[s3_snapshot.bucket_arn + '/*']
                )
            ]
        )
        aes_siem_snapshot_role = aws_iam.Role(
            self, 'AesSiemSnapshotRole',
            role_name='aes-siem-snapshot-role',
            inline_policies=[policydoc_snapshot, ],
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        policydoc_assume_snapshrole = aws_iam.PolicyDocument(
            statements=[
                aws_iam.PolicyStatement(
                    actions=['iam:PassRole'],
                    resources=[aes_siem_snapshot_role.role_arn]
                ),
            ]
        )

        aes_siem_deploy_role_for_lambda = aws_iam.Role(
            self, 'AesSiemDeployRoleForLambda',
            role_name='aes-siem-deploy-role-for-lambda',
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonOpenSearchServiceFullAccess'),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
            ],
            inline_policies=[policydoc_assume_snapshrole, policydoc_snapshot,
                             policydoc_create_loggroup, policydoc_crhelper],
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        if vpc_type:
            aes_siem_deploy_role_for_lambda.add_managed_policy(
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaVPCAccessExecutionRole')
            )

        # for alert from Amazon OpenSearch Service
        aes_siem_sns_role = aws_iam.Role(
            self, 'AesSiemSnsRole',
            role_name='aes-siem-sns-role',
            assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')
        )

        # EC2 role
        aes_siem_es_loader_ec2_role = aws_iam.Role(
            self, 'AesSiemEsLoaderEC2Role',
            role_name='aes-siem-es-loader-for-ec2',
            assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com'),
        )

        aws_iam.CfnInstanceProfile(
            self, 'AesSiemEsLoaderEC2InstanceProfile',
            instance_profile_name=aes_siem_es_loader_ec2_role.role_name,
            roles=[aes_siem_es_loader_ec2_role.role_name]
        )

        ######################################################################
        # in VPC
        ######################################################################
        aes_role_exist = check_iam_role('/aws-service-role/es.amazonaws.com/')
        if vpc_type and not aes_role_exist:
            slr_aes = aws_iam.CfnServiceLinkedRole(
                self, 'AWSServiceRoleForAmazonOpenSearchService',
                aws_service_name='es.amazonaws.com',
                description='Created by cloudformation of siem stack'
            )
            slr_aes.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # SQS for es-laoder's DLQ
        ######################################################################
        sqs_aes_siem_dlq = aws_sqs.Queue(
            self, 'AesSiemDlq', queue_name='aes-siem-dlq',
            retention_period=core.Duration.days(14))

        sqs_aes_siem_splitted_logs = aws_sqs.Queue(
            self, 'AesSiemSqsSplitLogs',
            queue_name='aes-siem-sqs-splitted-logs',
            dead_letter_queue=aws_sqs.DeadLetterQueue(
                max_receive_count=2, queue=sqs_aes_siem_dlq),
            visibility_timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            retention_period=core.Duration.days(14))

        ######################################################################
        # Setup Lambda
        ######################################################################
        # setup lambda of es_loader
        lambda_es_loader_vpc_kwargs = {}
        if vpc_type:
            lambda_es_loader_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': vpc_subnets,
            }

        lambda_es_loader = aws_lambda.Function(
            self, 'LambdaEsLoader', **lambda_es_loader_vpc_kwargs,
            function_name='aes-siem-es-loader',
            description=f'{SOLUTION_NAME} / es-loader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/es_loader.zip'),
            code=aws_lambda.Code.asset('../lambda/es_loader'),
            handler='index.lambda_handler',
            memory_size=2048,
            timeout=core.Duration.seconds(ES_LOADER_TIMEOUT),
            reserved_concurrent_executions=(
                reserved_concurrency.value_as_number),
            dead_letter_queue_enabled=True,
            dead_letter_queue=sqs_aes_siem_dlq,
            environment={
                'GEOIP_BUCKET': s3bucket_name_geo, 'LOG_LEVEL': 'info',
                'POWERTOOLS_LOGGER_LOG_EVENT': 'false',
                'POWERTOOLS_SERVICE_NAME': 'es-loader',
                'POWERTOOLS_METRICS_NAMESPACE': 'SIEM'})
        es_loader_newver = lambda_es_loader.add_version(
            name=__version__, description=__version__)
        es_loader_opt = es_loader_newver.node.default_child.cfn_options
        es_loader_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # send only
        # sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage')
        # send and reieve. but it must be loop
        sqs_aes_siem_dlq.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        sqs_aes_siem_splitted_logs.grant(
            lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage',
            'sqs:DeleteMessage', 'sqs:GetQueueAttributes')

        lambda_es_loader.add_event_source(
            aws_lambda_event_sources.SqsEventSource(
                sqs_aes_siem_splitted_logs, batch_size=1))

        # es-loaer on EC2 role
        sqs_aes_siem_dlq.grant(
            aes_siem_es_loader_ec2_role, 'sqs:GetQueue*', 'sqs:ListQueues*',
            'sqs:ReceiveMessage*', 'sqs:DeleteMessage*')

        lambda_geo = aws_lambda.Function(
            self, 'LambdaGeoipDownloader',
            function_name='aes-siem-geoip-downloader',
            description=f'{SOLUTION_NAME} / geoip-downloader',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/geoip_downloader'),
            handler='index.lambda_handler',
            memory_size=320,
            timeout=core.Duration.seconds(300),
            environment={
                's3bucket_name': s3bucket_name_geo,
                'license_key': geoip_license_key.value_as_string,
            }
        )
        lambda_geo_newver = lambda_geo.add_version(
            name=__version__, description=__version__)
        lamba_geo_opt = lambda_geo_newver.node.default_child.cfn_options
        lamba_geo_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        ######################################################################
        # setup OpenSearch Service
        ######################################################################
        lambda_deploy_es = aws_lambda.Function(
            self, 'LambdaDeployAES',
            function_name='aes-siem-deploy-aes',
            description=f'{SOLUTION_NAME} / opensearch domain deployment',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            # code=aws_lambda.Code.asset('../lambda/deploy_es.zip'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_domain_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_deploy_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_deploy_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_deploy_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_deploy_es.add_environment('vpc_subnet_id', 'None')
            lambda_deploy_es.add_environment('security_group_id', 'None')
        deploy_es_newver = lambda_deploy_es.add_version(
            name=__version__, description=__version__)
        deploy_es_opt = deploy_es_newver.node.default_child.cfn_options
        deploy_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # execute lambda_deploy_es to deploy Amaozon ES Domain
        aes_domain = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainDeployedR2',
            service_token=lambda_deploy_es.function_arn,)
        aes_domain.add_override('Properties.ConfigVersion', __version__)

        es_endpoint = aes_domain.get_att('es_endpoint').to_string()
        lambda_es_loader.add_environment('ES_ENDPOINT', es_endpoint)
        lambda_es_loader.add_environment(
            'SQS_SPLITTED_LOGS_URL', sqs_aes_siem_splitted_logs.queue_url)

        lambda_configure_es_vpc_kwargs = {}
        if vpc_type:
            lambda_configure_es_vpc_kwargs = {
                'security_group': sg_vpc_noinbound_aes_siem,
                'vpc': vpc_aes_siem,
                'vpc_subnets': aws_ec2.SubnetSelection(subnets=[subnet1, ]), }
        lambda_configure_es = aws_lambda.Function(
            self, 'LambdaConfigureAES', **lambda_configure_es_vpc_kwargs,
            function_name='aes-siem-configure-aes',
            description=f'{SOLUTION_NAME} / opensearch configuration',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            architecture=aws_lambda.Architecture.X86_64,
            # architecture=region_mapping.find_in_map(
            #    core.Aws.REGION, 'LambdaArm'),
            code=aws_lambda.Code.asset('../lambda/deploy_es'),
            handler='index.aes_config_handler',
            memory_size=128,
            timeout=core.Duration.seconds(300),
            environment={
                'accountid': core.Aws.ACCOUNT_ID,
                'aes_domain_name': aes_domain_name,
                'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn,
                'es_loader_role': lambda_es_loader.role.role_arn,
                'allow_source_address': allow_source_address.value_as_string,
                'es_endpoint': es_endpoint,
            },
            role=aes_siem_deploy_role_for_lambda,
        )
        lambda_configure_es.add_environment(
            's3_snapshot', s3_snapshot.bucket_name)
        if vpc_type:
            lambda_configure_es.add_environment(
                'vpc_subnet_id', subnet1.subnet_id)
            lambda_configure_es.add_environment(
                'security_group_id', sg_vpc_aes_siem.security_group_id)
        else:
            lambda_configure_es.add_environment('vpc_subnet_id', 'None')
            lambda_configure_es.add_environment('security_group_id', 'None')
        configure_es_newver = lambda_configure_es.add_version(
            name=__version__, description=__version__)
        configure_es_opt = configure_es_newver.node.default_child.cfn_options
        configure_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN

        aes_config = aws_cloudformation.CfnCustomResource(
            self, 'AesSiemDomainConfiguredR2',
            service_token=lambda_configure_es.function_arn,)
        aes_config.add_override('Properties.ConfigVersion', __version__)
        aes_config.add_depends_on(aes_domain)
        aes_config.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        es_arn = (f'arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}'
                  f':domain/{aes_domain_name}')
        # grant permission to es_loader role
        inline_policy_to_load_entries_into_es = aws_iam.Policy(
            self, 'aes-siem-policy-to-load-entries-to-es',
            policy_name='aes-siem-policy-to-load-entries-to-es',
            statements=[
                aws_iam.PolicyStatement(
                    actions=['es:*'],
                    resources=[es_arn + '/*', ]),
            ]
        )
        lambda_es_loader.role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)
        aes_siem_es_loader_ec2_role.attach_inline_policy(
            inline_policy_to_load_entries_into_es)

        # grant additional permission to es_loader role
        additional_kms_cmks = self.node.try_get_context('additional_kms_cmks')
        if additional_kms_cmks:
            inline_policy_access_to_additional_cmks = aws_iam.Policy(
                self, 'access_to_additional_cmks',
                policy_name='access_to_additional_cmks',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['kms:Decrypt'],
                        resources=sorted(set(additional_kms_cmks))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_cmks)
        additional_buckets = self.node.try_get_context('additional_s3_buckets')

        if additional_buckets:
            buckets_list = []
            for bucket in additional_buckets:
                buckets_list.append(f'arn:aws:s3:::{bucket}')
                buckets_list.append(f'arn:aws:s3:::{bucket}/*')
            inline_policy_access_to_additional_buckets = aws_iam.Policy(
                self, 'access_to_additional_buckets',
                policy_name='access_to_additional_buckets',
                statements=[
                    aws_iam.PolicyStatement(
                        actions=['s3:GetObject*', 's3:GetBucket*', 's3:List*'],
                        resources=sorted(set(buckets_list))
                    )
                ]
            )
            lambda_es_loader.role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)
            aes_siem_es_loader_ec2_role.attach_inline_policy(
                inline_policy_access_to_additional_buckets)

        kms_aes_siem.grant_decrypt(lambda_es_loader)
        kms_aes_siem.grant_decrypt(aes_siem_es_loader_ec2_role)

        ######################################################################
        # s3 notification and grant permisssion
        ######################################################################
        s3_geo.grant_read_write(lambda_geo)
        s3_geo.grant_read(lambda_es_loader)
        s3_geo.grant_read(aes_siem_es_loader_ec2_role)
        s3_log.grant_read(lambda_es_loader)
        s3_log.grant_read(aes_siem_es_loader_ec2_role)

        # create s3 notification for es_loader
        notification = aws_s3_notifications.LambdaDestination(lambda_es_loader)

        # assign notification for the s3 PUT event type
        # most log system use PUT, but also CLB use POST & Multipart Upload
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='AWSLogs/'))

        # For user logs, not AWS logs
        s3_log.add_event_notification(
            aws_s3.EventType.OBJECT_CREATED, notification,
            aws_s3.NotificationKeyFilter(prefix='UserLogs/'))

        # Download geoip to S3 once by executing lambda_geo
        get_geodb = aws_cloudformation.CfnCustomResource(
            self, 'ExecLambdaGeoipDownloader',
            service_token=lambda_geo.function_arn,)
        get_geodb.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN

        # Download geoip every day at 6PM UTC
        rule = aws_events.Rule(
            self, 'CwlRuleLambdaGeoipDownloaderDilly',
            schedule=aws_events.Schedule.rate(core.Duration.hours(12)))
        rule.add_target(aws_events_targets.LambdaFunction(lambda_geo))

        ######################################################################
        # bucket policy
        ######################################################################
        s3_awspath = s3_log.bucket_arn + '/AWSLogs/' + core.Aws.ACCOUNT_ID
        bucket_policy_common1 = aws_iam.PolicyStatement(
            sid='ELB Policy',
            principals=[aws_iam.AccountPrincipal(
                account_id=region_mapping.find_in_map(
                    core.Aws.REGION, 'ElbV2AccountId'))],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],)
        # NLB / ALB / R53resolver / VPC Flow Logs
        bucket_policy_elb1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_elb2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For ALB NLB R53Resolver Flowlogs',
            principals=[aws_iam.ServicePrincipal(
                'delivery.logs.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_common1)
        s3_log.add_to_resource_policy(bucket_policy_elb1)
        s3_log.add_to_resource_policy(bucket_policy_elb2)

        # CloudTrail
        bucket_policy_trail1 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryAclCheck For Cloudtrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:GetBucketAcl'], resources=[s3_log.bucket_arn],)
        bucket_policy_trail2 = aws_iam.PolicyStatement(
            sid='AWSLogDeliveryWrite For CloudTrail',
            principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_trail1)
        s3_log.add_to_resource_policy(bucket_policy_trail2)

        # GuardDuty
        bucket_policy_gd1 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to use the getBucketLocation operation',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:GetBucketLocation'], resources=[s3_log.bucket_arn],)
        bucket_policy_gd2 = aws_iam.PolicyStatement(
            sid='Allow GuardDuty to upload objects to the bucket',
            principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_log.bucket_arn + '/*'],)
        bucket_policy_gd5 = aws_iam.PolicyStatement(
            sid='Deny non-HTTPS access', effect=aws_iam.Effect.DENY,
            actions=['s3:*'], resources=[s3_log.bucket_arn + '/*'],
            conditions={'Bool': {'aws:SecureTransport': 'false'}})
        bucket_policy_gd5.add_any_principal()
        s3_log.add_to_resource_policy(bucket_policy_gd1)
        s3_log.add_to_resource_policy(bucket_policy_gd2)
        s3_log.add_to_resource_policy(bucket_policy_gd5)

        # Config
        bucket_policy_config1 = aws_iam.PolicyStatement(
            sid='AWSConfig BucketPermissionsCheck and BucketExistenceCheck',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:GetBucketAcl', 's3:ListBucket'],
            resources=[s3_log.bucket_arn],)
        bucket_policy_config2 = aws_iam.PolicyStatement(
            sid='AWSConfigBucketDelivery',
            principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
            actions=['s3:PutObject'], resources=[s3_awspath + '/Config/*'],
            conditions={
                'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}})
        s3_log.add_to_resource_policy(bucket_policy_config1)
        s3_log.add_to_resource_policy(bucket_policy_config2)

        # geoip
        bucket_policy_geo1 = aws_iam.PolicyStatement(
            sid='Allow geoip downloader and es-loader to read/write',
            principals=[lambda_es_loader.role, lambda_geo.role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_geo.bucket_arn + '/*'],)
        s3_geo.add_to_resource_policy(bucket_policy_geo1)

        # ES Snapshot
        bucket_policy_snapshot = aws_iam.PolicyStatement(
            sid='Allow ES to store snapshot',
            principals=[aes_siem_snapshot_role],
            actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'],
            resources=[s3_snapshot.bucket_arn + '/*'],)
        s3_snapshot.add_to_resource_policy(bucket_policy_snapshot)

        ######################################################################
        # for multiaccount / organizaitons
        ######################################################################
        if org_id or no_org_ids:
            ##################################################################
            # KMS key policy for multiaccount / organizaitons
            ##################################################################
            # for CloudTrail
            cond_tail2 = self.make_resource_list(
                path='arn:aws:cloudtrail:*:', tail=':trail/*',
                keys=self.list_without_none(org_mgmt_id, no_org_ids))
            key_policy_mul_trail2 = aws_iam.PolicyStatement(
                sid=('Allow CloudTrail to encrypt logs for multiaccounts'),
                actions=['kms:GenerateDataKey*'],
                principals=[aws_iam.ServicePrincipal(
                    'cloudtrail.amazonaws.com')],
                resources=['*'],
                conditions={'StringLike': {
                    'kms:EncryptionContext:aws:cloudtrail:arn': cond_tail2}})
            kms_aes_siem.add_to_resource_policy(key_policy_mul_trail2)

            # for replicaiton
            key_policy_rep1 = aws_iam.PolicyStatement(
                sid=('Enable cross account encrypt access for S3 Cross Region '
                     'Replication'),
                actions=['kms:Encrypt'],
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                resources=['*'],)
            kms_aes_siem.add_to_resource_policy(key_policy_rep1)

            ##################################################################
            # Buckdet Policy for multiaccount / organizaitons
            ##################################################################
            s3_log_bucket_arn = 'arn:aws:s3:::' + s3bucket_name_log

            # for CloudTrail
            s3_mulpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_org_trail = aws_iam.PolicyStatement(
                sid='AWSCloudTrailWrite for Multiaccounts / Organizations',
                principals=[
                    aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_mulpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_org_trail)

            # config
            s3_conf_multpaths = self.make_resource_list(
                path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/Config/*',
                keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids))
            bucket_policy_mul_config2 = aws_iam.PolicyStatement(
                sid='AWSConfigBucketDelivery',
                principals=[aws_iam.ServicePrincipal('config.amazonaws.com')],
                actions=['s3:PutObject'], resources=s3_conf_multpaths,
                conditions={'StringEquals': {
                    's3:x-amz-acl': 'bucket-owner-full-control'}})
            s3_log.add_to_resource_policy(bucket_policy_mul_config2)

            # for replication
            bucket_policy_rep1 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on objects',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:ReplicateDelete', 's3:ReplicateObject',
                         's3:ReplicateTags', 's3:GetObjectVersionTagging',
                         's3:ObjectOwnerOverrideToBucketOwner'],
                resources=[f'{s3_log_bucket_arn}/*'])
            bucket_policy_rep2 = aws_iam.PolicyStatement(
                sid='PolicyForDestinationBucket / Permissions on bucket',
                principals=self.make_account_principals(
                    org_mgmt_id, org_member_ids, no_org_ids),
                actions=['s3:List*', 's3:GetBucketVersioning',
                         's3:PutBucketVersioning'],
                resources=[f'{s3_log_bucket_arn}'])
            s3_log.add_to_resource_policy(bucket_policy_rep1)
            s3_log.add_to_resource_policy(bucket_policy_rep2)

        ######################################################################
        # SNS topic for Amazon OpenSearch Service Alert
        ######################################################################
        sns_topic = aws_sns.Topic(
            self, 'SnsTopic', topic_name='aes-siem-alert',
            display_name='AES SIEM')

        sns_topic.add_subscription(aws_sns_subscriptions.EmailSubscription(
            email_address=sns_email.value_as_string))
        sns_topic.grant_publish(aes_siem_sns_role)

        ######################################################################
        # output of CFn
        ######################################################################
        kibanaurl = f'https://{es_endpoint}/_dashboards/'
        kibanaadmin = aes_domain.get_att('kibanaadmin').to_string()
        kibanapass = aes_domain.get_att('kibanapass').to_string()

        core.CfnOutput(self, 'RoleDeploy', export_name='role-deploy',
                       value=aes_siem_deploy_role_for_lambda.role_arn)
        core.CfnOutput(self, 'DashboardsUrl', export_name='dashboards-url',
                       value=kibanaurl)
        core.CfnOutput(self, 'DashboardsPassword',
                       export_name='dashboards-pass', value=kibanapass,
                       description=('Please change the password in OpenSearch '
                                    'Dashboards ASAP'))
        core.CfnOutput(self, 'DashboardsAdminID',
                       export_name='dashboards-admin', value=kibanaadmin)
Пример #25
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Tag all constructs with the project for easy billing drilldown,
        # filtering, and organization.
        core.Tags.of(self).add('project', 'MediaTranscription')

        # Media files bucket
        media_bucket = s3.Bucket(
            self,
            'media-transcription-bucket',
            encryption=s3.BucketEncryption.S3_MANAGED,
        )

        # SQS queue for media files bucket event notifications
        media_bucket_event_queue = sqs.Queue(
            self,
            'media-transcription-event-notification-queue',
            queue_name='media-transcription-event-notification-queue',
            visibility_timeout=core.Duration.seconds(60),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=3,
                queue=sqs.Queue(
                    self,
                    'media-transcription-event-notifications-dlq',
                    queue_name='media-transcription-event-notifications-dlq',
                )),
        )

        # S3 object created notifications sent to SQS queue
        media_bucket.add_event_notification(
            s3.EventType.OBJECT_CREATED,
            s3n.SqsDestination(media_bucket_event_queue),
            *[s3.NotificationKeyFilter(prefix='media-input/')],
        )

        # Lambda function to create/submit Transcribe jobs
        transcribe_job_init_fn = lambda_.Function(
            self,
            'transcribe-job-init-fn',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset(
                '../lambdas/transcribe-job-init-fn',
                # The following is just dumb.
                # The Lambda runtime doesn't use the latest boto3 by default.
                # In order to use the latest boto3, we have to pip install
                # and bundle locally using Docker.
                # Q: Why need the latest boto3?
                # A: https://github.com/boto/boto3/issues/2630
                # I'll have to delete the ECR containers to avoid cost.
                # TODO: Revert back to normal in like a month I guess.
                bundling={
                    'image':
                    lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    'command': [
                        'bash', '-c',
                        '\n        pip install -r requirements.txt -t /asset-output &&\n        cp -au . /asset-output\n        '
                    ]
                }),
            handler='fn.handler',
            reserved_concurrent_executions=1,  # Effectively single-threaded
        )
        # Triggered by SQS messages created for media file puts
        transcribe_job_init_fn.add_event_source(
            les.SqsEventSource(
                queue=media_bucket_event_queue,
                batch_size=5,
                enabled=True,
            ))
        # Grant access to start transcription jobs
        transcribe_job_init_fn.add_to_role_policy(
            statement=iam.PolicyStatement(
                actions=[
                    'transcribe:StartTranscriptionJob',
                ],
                resources=['*'],
                effect=iam.Effect.ALLOW,
            ))

        # Grant Lambda role to read and write to input and output portions of
        # the S3 bucket.
        # Q: Why grant Lambda the permissions instead of Transcribe service?
        # A: Two-fold:
        #   -  i) https://amzn.to/321Nx5I
        #   - ii) Granting just to this Lambda means other Transcribe jobs
        #         across the account cannot use this bucket (least privilege).
        media_bucket.grant_read(
            identity=transcribe_job_init_fn.grant_principal,
            objects_key_pattern='media-input/*')
        # Cannot specify a prefix for writes as Transcribe will not accept
        # a job unless it has write permission on the whole bucket.
        # Edit: The above statement was when I had to use '*' for writes. But
        #       now, I granted access to that .write_access_check_file.temp
        #       file and it seems to all work now?
        media_bucket.grant_write(
            identity=transcribe_job_init_fn.grant_principal,
            objects_key_pattern='transcribe-output-raw/*')
        # This is just as frustrating to you as it is to me.
        media_bucket.grant_write(
            identity=transcribe_job_init_fn.grant_principal,
            objects_key_pattern='.write_access_check_file.temp')

        # DynamoDB table for Jobs metadata
        jobs_metadata_table = ddb.Table(
            self,
            'MediaTranscription-TranscriptionJobs',
            table_name='MediaTranscription-TranscriptionJobs',
            partition_key=ddb.Attribute(
                name='Bucket-Key-ETag',
                type=ddb.AttributeType.STRING,
            ),
            billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
        )
        jobs_metadata_table.grant(transcribe_job_init_fn.grant_principal, *[
            'dynamodb:GetItem',
            'dynamodb:PutItem',
        ])

        # Create IAM Group with read/write permissions to S3 bucket
        # TODO: Make this more federated and robust
        console_users_group = iam.Group(self, 'MediaTranscriptionConsoleUsers')
        console_users_group.attach_inline_policy(policy=iam.Policy(
            self,
            'MediaTranscriptionConsoleUserS3Access',
            statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        's3:ListBucket',
                    ],
                    resources=[
                        media_bucket.bucket_arn,
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        's3:GetObject',
                        's3:PutObject',
                    ],
                    resources=[
                        media_bucket.arn_for_objects('media-input/*'),
                    ],
                ),
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        's3:GetObject',
                    ],
                    resources=[
                        media_bucket.arn_for_objects(
                            'transcribe-output-raw/*'),
                    ],
                ),
            ],
        ))
Пример #26
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        layer = aws.PipLayers(self,
                              "scoreboard_layer",
                              layers={
                                  "htmlgen": "htmlgen/requirements.txt",
                                  "parse_globals":
                                  "parse_globals/requirements.txt",
                                  "bot": "bot/requirements.txt"
                              })

        # Create
        # * the generator function
        # * Namemap-table
        #   * allow generator to read from namemap-table
        #   (This might change - why not pass the mapping structure in the message?)
        # * Datacache-bucket
        #   * Allow generator to read and write to bucket

        htmlgen = aws.Function(self,
                               "htmlgen",
                               layers=layer.layers,
                               timeout=core.Duration.seconds(20),
                               memory_size=1024)

        # id: str (boardid), name: str (username), value: str (replacement value)
        namemap = aws.Table(self,
                            "namemap",
                            sort_key=aws_dynamodb.Attribute(
                                name='name',
                                type=aws_dynamodb.AttributeType.STRING),
                            removal_policy=CONFIGDATA)
        namemap.grant_read_data(htmlgen)

        no_point_days = aws.Table(self, "nopointdays")

        # id: str (boardid), day: int, results_1: dict ({player: score, ...}), results_2: dict ({player: score, ...})
        globalscores = aws.Table(self,
                                 "globalscores",
                                 partition_key=aws_dynamodb.Attribute(
                                     name='year',
                                     type=aws_dynamodb.AttributeType.NUMBER),
                                 sort_key=aws_dynamodb.Attribute(
                                     name='day',
                                     type=aws_dynamodb.AttributeType.NUMBER),
                                 removal_policy=EPHEMERALDATA)
        parse_globals = aws.Function(self,
                                     "parse_globals",
                                     layers=layer.layers,
                                     timeout=core.Duration.seconds(20),
                                     memory_size=1024)
        parse_globals.add_environment("DDB_GLOBALSCORES",
                                      globalscores.table_name)
        globalscores.grant_read_write_data(parse_globals)
        globalscores.grant_read_data(htmlgen)

        timestamps = aws.Table(self,
                               "timestamps",
                               removal_policy=EPHEMERALDATA)
        htmlgen.add_environment("DDB_TIMESTAMPS", timestamps.table_name)
        timestamps.grant_write_data(htmlgen)

        datacache = aws.Bucket(self, "datacache")
        datacache.grant_read_write(htmlgen)

        htmlbucket = aws.Bucket(
            self,
            "html",
            removal_policy=EPHEMERALDATA,
            auto_delete_objects=True,
            block_public_access=None,
            website_error_document="error.html",
            website_index_document="scoreboard.html",
            cors=[
                aws_s3.CorsRule(allowed_methods=[aws_s3.HttpMethods.GET],
                                allowed_headers=["*"],
                                allowed_origins=["*"])
            ])
        htmlbucket.grant_public_access()
        core.CfnOutput(self,
                       f"{id}_bucketurl",
                       value=f"BUCKET_URL={htmlbucket.bucket_website_url}")
        htmlbucket.grant_read_write(htmlgen)
        htmlgen.add_environment("S3_DATACACHE", datacache.bucket_name)
        htmlgen.add_environment("S3_HTML", htmlbucket.bucket_name)
        htmlgen.add_environment("DDB_NAMEMAP", namemap.table_name)

        aws_s3_deployment.BucketDeployment(
            self,
            "StaticHtml",
            sources=[aws_s3_deployment.Source.asset("htmlgen/frontend")],
            destination_bucket=htmlbucket,
            prune=False)

        # Create
        # * spawner function
        # * boardconfig-table
        #   * allow spawner to read from boardconfig-table
        # * generator_queue
        #   allow spawner to post messages to queue
        spawner = aws.Function(self, "spawner", layers=layer.layers)
        boardconfig = aws.Table(
            self,
            "boardconfig",
            stream=aws_dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
            removal_policy=CONFIGDATA)
        boardconfig.grant_read_data(spawner)
        spawner.add_environment("DDB_CONFIG", boardconfig.table_name)
        spawner.add_environment("DDB_NOPOINTDAYS", no_point_days.table_name)

        boardconfig_source = aws_lambda_event_sources.DynamoEventSource(
            boardconfig, starting_position=aws_lambda.StartingPosition.LATEST)

        boarddeletions = aws.Function(self, "boarddeletions")
        boarddeletions.add_event_source(boardconfig_source)
        boarddeletions.add_environment("S3_HTML", htmlbucket.bucket_name)
        htmlbucket.grant_read_write(boarddeletions)

        generator_queue = aws.Queue(self, "generator_queue")
        generator_queue.grant_send_messages(spawner)
        spawner.add_environment("SQS_GENERATOR", generator_queue.queue_name)
        spawner.add_environment("DDB_TIMESTAMPS", timestamps.table_name)
        timestamps.grant_read_data(spawner)

        # Connect the generator_queue to the htmlgen-function
        event_source = aws_lambda_event_sources.SqsEventSource(generator_queue,
                                                               batch_size=10)
        htmlgen.add_event_source(event_source)

        # Admin API
        adminhandler = aws.Function(self, "adminhandler")
        adminhandlerApi = aws_apigateway.LambdaRestApi(self,
                                                       "adminapi",
                                                       handler=adminhandler)
        core.CfnOutput(self,
                       "root_url",
                       value=f"Admin URL={adminhandlerApi.url_for_path()}")
        adminhandler.add_environment("DDB_CONFIG", boardconfig.table_name)
        boardconfig.grant_read_write_data(adminhandler)

        # Slack API
        api = aws.RestApi(self, "slack")

        slack = aws.ResourceWithLambda(
            self,
            "bot",
            verb="POST",
            description="Handle incoming Slack-bot interaction",
            parent_resource=api.root,
            lambda_layers=[layer.idlayers["bot"]])
        slack.handler.add_environment(
            "BOT_TOKEN", read_token_from_file('slack_bot_token.txt'))
        slack.handler.add_environment(
            "BOT_VERIFICATION",
            read_token_from_file('slack_verification_token.txt'))
        # "xoxb-1033954193568-1654676166455-Vzom9aQY9NUjAYR5mhKZP70k")
        slack.handler.add_environment("DDB_CONFIG", boardconfig.table_name)
        slack.handler.add_environment("DDB_NAMEMAP", namemap.table_name)
        namemap.grant_read_write_data(slack.handler)
        boardconfig.grant_read_write_data(slack.handler)

        # aws.Rule(
        #     self,
        #     "Test",
        #     description="Remove after functions verified - Fire every minute for some duration in Februaryx",
        #     schedule=aws_events.Schedule.cron(minute="*", hour="*", week_day="2", month="FEB"),
        #     target=spawner)

        aws.Rule(self,
                 "RestOfYear",
                 description="Fire every week jan-novx",
                 schedule=aws_events.Schedule.cron(minute="0",
                                                   hour="4",
                                                   week_day="2",
                                                   month="JAN-NOV"),
                 target=spawner)
        aws.Rule(self,
                 "Mornings_December",
                 description="Every second minute 06-08 (CET) 1-25 decx",
                 schedule=aws_events.Schedule.cron(minute="0/2",
                                                   hour="6-7",
                                                   day="1-25",
                                                   month="DEC"),
                 target=spawner)
        aws.Rule(self,
                 "Daytime_December",
                 description="Every 20 minutes 08-15 (CET) 1-25 decx",
                 schedule=aws_events.Schedule.cron(minute="0/20",
                                                   hour="8-15",
                                                   day="1-25",
                                                   month="DEC"),
                 target=spawner)
        aws.Rule(self,
                 "Nighttime_December",
                 description="Every hour 00-6,14-24 (CET) 1-25 decx",
                 schedule=aws_events.Schedule.cron(minute="0",
                                                   hour="0-6,14-23",
                                                   day="1-25",
                                                   month="DEC"),
                 target=spawner)
        aws.Rule(self,
                 "EndOf_December",
                 description="Every hour 9-23 (CET) 25-31 decx",
                 schedule=aws_events.Schedule.cron(minute="0",
                                                   hour="9-23",
                                                   day="26-31",
                                                   month="DEC"),
                 target=spawner)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.current_dir = os.path.dirname(__file__)

        self.bucket = s3.Bucket(
            self,
            "qs-migration-bucket",
            bucket_name=f'quicksight-migration-{core.Aws.ACCOUNT_ID}',
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        self.quicksight_migration_lambda_role = iam.Role(
            self,
            'quicksight-migration-lambda-role',
            description='Role for the Quicksight dashboard migration Lambdas',
            role_name='quicksight-migration-lambda-role',
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={
                'AllowAccess':
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            'logs:CreateLogGroup', 'logs:CreateLogStream',
                            'logs:PutLogEvents'
                        ],
                        resources=[
                            f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:*'
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["sts:AssumeRole", "iam:ListRoles"],
                        resources=[
                            "arn:aws:iam::*:role/quicksight-migration-*-assume-role"
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["s3:PutObject", "s3:ListBucket"],
                        resources=[
                            self.bucket.bucket_arn,
                            f"{self.bucket.bucket_arn}/*"
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=["secrets:GetSecretValue"],
                        resources=[
                            f"arn:aws:secretsmanager:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:secret:*"
                        ]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            "quicksight:Create*", "quicksight:Delete*",
                            "quicksight:Describe*", "quicksight:List*",
                            "quicksight:Search*", "quicksight:Update*"
                        ],
                        resources=["*"])
                ])
            })

        self.quicksight_migration_target_assume_role = iam.Role(
            self,
            'quicksight-migration-target-assume-role',
            description=
            'Role for the Quicksight dashboard migration Lambdas to assume',
            role_name='quicksight-migration-target-assume-role',
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={
                'AllowAccess':
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            "quicksight:Create*", "quicksight:Delete*",
                            "quicksight:Describe*", "quicksight:List*",
                            "quicksight:Search*", "quicksight:Update*"
                        ],
                        resources=["*"]),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            "ssm:GetParameter",
                        ],
                        resources=["arn:aws:ssm:*:*:parameter/infra/config"])
                ])
            })

        self.quicksight_migration_target_assume_role.assume_role_policy.add_statements(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=['sts:AssumeRole'],
                principals=[iam.AccountPrincipal(core.Aws.ACCOUNT_ID)]))

        # API Gateway to SQS
        self.rest_api_role = iam.Role(
            self,
            "RestAPIRole",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonSQSFullAccess")
            ])

        self.queue = sqs.Queue(self,
                               "quicksight-migration-sqs-queue",
                               queue_name="quicksight-migration-sqs",
                               visibility_timeout=core.Duration.minutes(15))

        self.integration_response = apigw.IntegrationResponse(
            status_code="200",
            response_templates={"application/json": ""},
            response_parameters={
                "method.response.header.Access-Control-Allow-Headers":
                "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'",
                "method.response.header.Access-Control-Allow-Origin":
                "'*'",
                "method.response.header.Access-Control-Allow-Methods":
                "'POST,OPTIONS'"
            })

        self.api_integration_options = apigw.IntegrationOptions(
            credentials_role=self.rest_api_role,
            integration_responses=[self.integration_response],
            request_templates={
                "application/json":
                'Action=SendMessage&MessageBody=$util.urlEncode("$input.body")'
            },
            passthrough_behavior=apigw.PassthroughBehavior.NEVER,
            request_parameters={
                "integration.request.header.Content-Type":
                "'application/x-www-form-urlencoded'"
            })

        self.api_resource_sqs_integration = apigw.AwsIntegration(
            service="sqs",
            integration_http_method="POST",
            path="{}/{}".format(core.Aws.ACCOUNT_ID, self.queue.queue_name),
            options=self.api_integration_options)

        self.base_api = apigw.RestApi(
            self,
            'quicksight-migration-sqs',
            rest_api_name='quicksight-migration-sqs',
            default_cors_preflight_options=apigw.CorsOptions(
                allow_origins=apigw.Cors.ALL_ORIGINS,
                allow_methods=["POST", "OPTIONS"],
                allow_headers=[
                    'Access-Control-Allow-Origin',
                    'Access-Control-Allow-Headers', 'Content-Type'
                ]))

        self.base_api.root.add_method(
            "POST",
            self.api_resource_sqs_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Headers':
                    True,
                    'method.response.header.Access-Control-Allow-Methods':
                    True,
                    'method.response.header.Access-Control-Allow-Origin': True
                }
            }])

        self.quicksight_migration_lambda = _lambda.Function(
            self,
            'quicksight-migration-lambda',
            handler='quicksight_migration.lambda_function.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                os.path.join(self.current_dir,
                             '../lambda/quicksight_migration/')),
            function_name='quicksight_migration_lambda',
            role=self.quicksight_migration_lambda_role,
            timeout=core.Duration.minutes(15),
            memory_size=1024,
            environment={
                'BUCKET_NAME': self.bucket.bucket_name,
                'S3_KEY': 'None',
                'INFRA_CONFIG_PARAM': '/infra/config',
                'SQS_URL': self.queue.queue_url
            })

        self.sqs_event_source = event_sources.SqsEventSource(self.queue)

        self.quicksight_migration_lambda.add_event_source(
            self.sqs_event_source)

        core.CfnOutput(self,
                       "MigrationAPIGatewayURL",
                       value=self.base_api.url,
                       description="Migration API GW URL")
Пример #28
0
    def __init__(self, app: core.App, id: str) -> None:
        super().__init__(app, id)

        ##################################
        # Resource Property Config
        ##################################

        # see: https://docs.aws.amazon.com/lambda/latest/dg/
        # with-sqs.html#events-sqs-queueconfig
        domain_scan_timeout = 900
        queue_visibility_timeout = 6 * domain_scan_timeout
        # tldextract needs to cache tld list after request and /tmp is writable in Lamdba
        tld_cache = os.path.join('/tmp', '.tld_set')

        ##################################
        # Domain Gatherer Lambda and Queue
        ##################################

        # create queue
        domain_queue = sqs.Queue(
            self,
            'domain-queue',
            visibility_timeout=core.Duration.seconds(queue_visibility_timeout),
            dead_letter_queue=sqs.DeadLetterQueue(
                max_receive_count=5,
                queue=sqs.Queue(self,
                                'domain-queue-dlq',
                                retention_period=core.Duration.days(5))))

        # create lambda to gather domains
        domain_gatherer_lambda = lambda_.Function(
            self,
            "domain-gatherer",
            code=lambda_.Code.from_asset(
                'lambda-releases/domain-gatherer.zip'),
            handler="handler.main",
            timeout=core.Duration.seconds(600),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=400)
        # set env vars
        domain_gatherer_lambda.add_environment('SQS_URL',
                                               domain_queue.queue_url)
        domain_gatherer_lambda.add_environment('TLDEXTRACT_CACHE', tld_cache)

        # provide lambda with execution role
        domain_gatherer_lambda_exec_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'lambda:InvokeFunction', 'sqs:SendMessage',
                'sqs:DeleteMessage', 'sqs:SendMessageBatch',
                'sqs:SetQueueAttributes', "sqs:GetQueueAttributes",
                "sqs:GetQueueUrl", 'sqs:GetQueueAttributes'
            ],
            resources=[domain_queue.queue_arn])
        domain_gatherer_lambda.add_to_role_policy(
            domain_gatherer_lambda_exec_policy)

        # allow lambda to send messages to queue
        domain_queue.grant_send_messages(domain_gatherer_lambda)

        # create rule to run the lambda every Friday
        rule = events.Rule(
            self,
            "Rule",
            schedule=events.Schedule.cron(minute='0',
                                          hour='18',
                                          month='*',
                                          week_day='FRI',
                                          year='*'),
        )
        rule.add_target(targets.LambdaFunction(domain_gatherer_lambda))

        ##################################
        # Domain Scan Lambda and Results Bucket
        ##################################

        # create lambda to scan domains
        domain_scanner_lambda = lambda_.Function(
            self,
            "domain-scanner",
            code=lambda_.Code.from_asset('lambda-releases/domain-scanner.zip'),
            handler="handler.main",
            timeout=core.Duration.seconds(domain_scan_timeout),
            runtime=lambda_.Runtime.PYTHON_3_7,
            memory_size=1000)

        # create sqs event source for domain scan lambda
        domain_scanner_lambda.add_event_source(
            sources.SqsEventSource(domain_queue, batch_size=2))

        # create s3 bucket to put results
        bucket = s3.Bucket(self,
                           'results-bucket',
                           versioned=False,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           block_public_access=s3.BlockPublicAccess(
                               block_public_acls=False,
                               ignore_public_acls=False,
                               block_public_policy=True,
                               restrict_public_buckets=True))

        # grant s3:PUT to the pa11y lambda
        bucket.grant_put(domain_scanner_lambda)

        # set an env var for bucket name
        domain_scanner_lambda.add_environment('BUCKET_NAME',
                                              bucket.bucket_name)

        # create execution role for domain scanner lambda
        domain_scanner_lambda_exec_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                'lambda:InvokeFunction', 'sqs:SendMessage',
                'sqs:DeleteMessage', 'sqs:SendMessageBatch',
                'sqs:SetQueueAttributes', "sqs:GetQueueAttributes",
                "sqs:GetQueueUrl", 'sqs:GetQueueAttributes', "s3:PutObject",
                "s3:PutObjectAcl"
            ],
            resources=[bucket.bucket_arn])
        domain_scanner_lambda.add_to_role_policy(
            domain_scanner_lambda_exec_policy)
Пример #29
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Message timeout; used by SQS and Lambda
        message_timeout = core.Duration.seconds(15)

        # SQS queue that the Raspberry Pi will write to
        queue = sqs.Queue(
            self,
            'Queue',
            visibility_timeout=message_timeout,
            receive_message_wait_time=core.Duration.seconds(20),
            retention_period=core.Duration.hours(1),
        )

        # DynamoDB table that the web app will read from
        icao_address = dynamodb.Attribute(
            name='IcaoAddress',
            type=dynamodb.AttributeType.STRING,
        )
        table = dynamodb.Table(
            self,
            'Table',
            partition_key=icao_address,
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        database = timestream.CfnDatabase(
            self,
            'Database',
            database_name='aircraft-database',
        )
        table2 = timestream.CfnTable(self,
                                     'Table2',
                                     database_name=database.ref,
                                     table_name='aircraft-table',
                                     retention_properties={
                                         'MemoryStoreRetentionPeriodInHours':
                                         1,
                                         'MagneticStoreRetentionPeriodInDays':
                                         1,
                                     })

        # IAM user for the Raspberry Pi
        user = iam.User(self, 'RaspberryPi')
        queue.grant_send_messages(user)
        access_key = iam.CfnAccessKey(
            self,
            'AccessKey',
            user_name=user.user_name,
        )

        # IAM role for Lambda function, so it can write to DynamoDB
        lambda_role = iam.Role(
            self,
            'LambdaRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
            ],
        )
        lambda_role.add_to_policy(
            iam.PolicyStatement(
                actions=[
                    'timestream:CancelQuery', 'timestream:DescribeEndpoints',
                    'timestream:DescribeTable', 'timestream:ListMeasures',
                    'timestream:Select', 'timestream:WriteRecords'
                ],
                resources=['*'],  # TODO: narrow down permissions
            ))
        table.grant_read_write_data(lambda_role)

        # Integration between SQS and Lambda
        event = lambda_event_sources.SqsEventSource(
            queue=queue,
            batch_size=10,
        )

        # Lambda function that processes messages from SQS queue and updates DynamoDB table
        import_function = lambda_.Function(
            self,
            'ImportFunction',
            description='Reads SQS messages and writes to DynamoDB',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset('lambda_import/'),
            timeout=message_timeout,
            handler='index.handler',
            role=lambda_role,
            events=[event],
            environment={
                'TABLE_NAME': table2.ref,
            },
        )

        # TODO: add custom log group
        # TODO: add metric filters for number of succesfull updates and failed updates

        # Lambda function that reads from DynamoDB and returns data to API Gateway
        api_function = lambda_.Function(
            self,
            'ApiFunction',
            description='Reads from DynamoDB and returns to API GW',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset('lambda_api/'),
            timeout=message_timeout,
            handler='index.handler',
            role=lambda_role,
            environment={
                'TABLE_NAME': table.table_name,
            },
        )

        # API Gateway for requesting aircraft data
        api = apigateway.RestApi(
            self,
            'Api',
            endpoint_types=[apigateway.EndpointType.REGIONAL],
            cloud_watch_role=False,
        )

        aircraft_resource = api.root.add_resource('aircraft')

        aircraft_resource.add_method(
            http_method='GET',
            integration=apigateway.LambdaIntegration(
                api_function,
                proxy=True,
            ),
        )

        # Static website
        bucket = s3.Bucket(self, 'StaticWebsite')

        s3_deployment.BucketDeployment(
            self,
            'Deployment',
            sources=[
                s3_deployment.Source.asset('html/'),
            ],
            destination_bucket=bucket,
        )

        # Permissions between CloudFront and S3
        origin_identity = cloudfront.OriginAccessIdentity(self, 'Identity')
        bucket.grant_read(origin_identity.grant_principal)

        # CloudFront distribution pointing to both S3 and API Gateway
        s3_origin = cloudfront.SourceConfiguration(
            s3_origin_source=cloudfront.S3OriginConfig(
                s3_bucket_source=bucket,
                origin_access_identity=origin_identity,
            ),
            behaviors=[
                cloudfront.Behavior(
                    default_ttl=core.Duration.days(0),
                    min_ttl=core.Duration.days(0),
                    max_ttl=core.Duration.days(31),
                    is_default_behavior=True,
                )
            ])

        api_origin = cloudfront.SourceConfiguration(
            origin_path='/{}'.format(api.deployment_stage.stage_name),
            custom_origin_source=cloudfront.CustomOriginConfig(
                domain_name='{}.execute-api.{}.{}'.format(
                    api.rest_api_id, self.region, self.url_suffix), ),
            behaviors=[
                cloudfront.Behavior(
                    default_ttl=core.Duration.seconds(0),
                    min_ttl=core.Duration.seconds(0),
                    max_ttl=core.Duration.seconds(0),
                    path_pattern='/aircraft/*',
                )
            ])

        domain_name = self.node.try_get_context('domain_name')

        # If domain name is specified, create a certificate and alias configuration for CloudFront
        if domain_name is None:
            alias_configuration = None
        else:
            subdomain = 'aircraft.{}'.format(domain_name)

            zone = route53.HostedZone.from_lookup(
                self,
                'Zone',
                domain_name=domain_name,
            )

            certificate = acm.DnsValidatedCertificate(
                self,
                'Certificate',
                domain_name=subdomain,
                hosted_zone=zone,
                region='us-east-1',
            )

            alias_configuration = cloudfront.AliasConfiguration(
                acm_cert_ref=certificate.certificate_arn,
                names=[subdomain],
            )

        distribution = cloudfront.CloudFrontWebDistribution(
            self,
            'CDN',
            price_class=cloudfront.PriceClass.PRICE_CLASS_ALL,
            alias_configuration=alias_configuration,
            origin_configs=[
                s3_origin,
                api_origin,
            ],
        )

        # If domain name is specified, create a DNS record for CloudFront
        if domain_name is not None:
            route53.ARecord(
                self,
                'DnsRecord',
                record_name=subdomain,
                target=route53.AddressRecordTarget.from_alias(
                    alias_target=route53_targets.CloudFrontTarget(
                        distribution)),
                zone=zone,
            )

        # Outputs that are needed on the Raspberry Pi
        core.CfnOutput(
            self,
            'QueueUrl',
            value=queue.queue_url,
        )
        core.CfnOutput(
            self,
            'AccessKeyId',
            value=access_key.ref,
        )
        core.CfnOutput(
            self,
            'SecretAccessKey',
            value=access_key.attr_secret_access_key,
        )
        core.CfnOutput(
            self,
            'Region',
            value=self.region,
        )
    def __init__(self, scope: core.Construct, id: str, instance_id: str,
                 contact_flow_id: str, source_phone_number: str, timeout: int,
                 **kwargs):
        super().__init__(scope, id, **kwargs)

        web_bucket = _s3.Bucket(self,
                                "StaticWebBucket",
                                website_index_document="index.html",
                                website_error_document="index.html",
                                removal_policy=core.RemovalPolicy.DESTROY,
                                public_read_access=True)

        core.CfnOutput(self,
                       'WebBucketUrl',
                       value=web_bucket.bucket_domain_name)

        web_distribution = _clf.CloudFrontWebDistribution(
            self,
            'StaticWebDistribution',
            origin_configs=[
                _clf.SourceConfiguration(
                    s3_origin_source=_clf.S3OriginConfig(
                        s3_bucket_source=web_bucket),
                    behaviors=[_clf.Behavior(is_default_behavior=True)])
            ],
            viewer_protocol_policy=_clf.ViewerProtocolPolicy.REDIRECT_TO_HTTPS)

        _s3d.BucketDeployment(
            self,
            "S3StaticWebContentDeploymentWithInvalidation",
            sources=[
                _s3d.Source.asset(
                    f"{pathlib.Path(__file__).parent.absolute()}/site-content/build"
                )
            ],
            destination_bucket=web_bucket,
            distribution=web_distribution,
            distribution_paths=["/*"])

        file_bucket = _s3.Bucket(self,
                                 "FileBucket",
                                 removal_policy=core.RemovalPolicy.DESTROY)

        call_dead_letter_queue = _sqs.Queue(self,
                                            "CallDeadLetterQueue",
                                            fifo=True,
                                            content_based_deduplication=True)

        call_sqs_queue = _sqs.Queue(
            self,
            "CallSqsQueue",
            fifo=True,
            content_based_deduplication=True,
            visibility_timeout=core.Duration.seconds(120),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=1, queue=call_dead_letter_queue))

        async_call_dead_letter_queue = _sqs.Queue(
            self,
            "AsyncCallDeadLetterQueue",
            fifo=True,
            content_based_deduplication=True)

        async_callout_queue = _sqs.Queue(
            self,
            "AsyncCalloutQueue",
            fifo=True,
            content_based_deduplication=True,
            visibility_timeout=core.Duration.seconds(120),
            dead_letter_queue=_sqs.DeadLetterQueue(
                max_receive_count=1, queue=async_call_dead_letter_queue))

        call_job_complete_sns_topic = _sns.Topic(
            self, "CallJobCompleteSnsTopic", display_name="CallJobCompletion")

        call_result_table = _dynamodb.Table(
            self,
            "CallResultDynamodbTable",
            billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamodb.Attribute(
                name="task_id", type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name="receiver_id",
                                         type=_dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        callout_record_table = _dynamodb.Table(
            self,
            "CallTaskDynamodbTable",
            billing_mode=_dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamodb.Attribute(
                name="task_id", type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name="created_at",
                                         type=_dynamodb.AttributeType.NUMBER),
            removal_policy=core.RemovalPolicy.DESTROY)
        callout_record_table.add_global_secondary_index(
            partition_key=_dynamodb.Attribute(
                name='call_type', type=_dynamodb.AttributeType.STRING),
            sort_key=_dynamodb.Attribute(name='created_at',
                                         type=_dynamodb.AttributeType.NUMBER),
            index_name='CallTypeCreatedAtGlobalIndex',
            projection_type=_dynamodb.ProjectionType.ALL)

        python_function_layer = _lambda.LayerVersion(
            self,
            "LambdaPythonFunctionLayer",
            code=_lambda.Code.asset("aws_callouts_cdk/layer/_python"),
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_8
            ],
            license="Available under the MIT-0 license")

        nodejs_function_layer = _lambda.LayerVersion(
            self,
            "LambdaNodeJsFunctionLayer",
            code=_lambda.Code.asset("aws_callouts_cdk/layer/_nodejs"),
            compatible_runtimes=[
                _lambda.Runtime.NODEJS_10_X, _lambda.Runtime.NODEJS_12_X
            ],
            license="Available under the MIT-0 license")

        global_python_function_arguments = {
            "code": _lambda.Code.asset("aws_callouts_cdk/src/python"),
            "layers": [python_function_layer],
            "runtime": _lambda.Runtime.PYTHON_3_7
        }

        global_nodeje_function_arguments = {
            "code": _lambda.Code.asset("aws_callouts_cdk/src/nodejs"),
            "layers": [nodejs_function_layer],
            "runtime": _lambda.Runtime.NODEJS_12_X
        }

        get_callout_job_function = _lambda.Function(
            self,
            "GetCalloutJobFunction",
            handler="get_call_job.lambda_handler",
            **global_python_function_arguments)
        get_callout_job_function.add_environment(key="S3Bucket",
                                                 value=file_bucket.bucket_name)
        file_bucket.grant_read(get_callout_job_function)

        callout_function = _lambda.Function(self,
                                            "CalloutFunction",
                                            handler="send_call.lambda_handler",
                                            **global_python_function_arguments)
        callout_function.add_environment(
            key="ContactFlowArn",
            value=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}/contact-flow/{contact_flow_id}"
        )
        callout_function.add_environment(key="SourcePhoneNumber",
                                         value=source_phone_number)
        callout_function.add_environment(key="ExcelFileBucket",
                                         value=file_bucket.bucket_name)
        callout_function.add_environment(key="AsynCalloutQueueUrl",
                                         value=async_callout_queue.queue_url)
        callout_function.add_to_role_policy(statement=_iam.PolicyStatement(
            resources=[
                f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}/*"
            ],
            actions=["connect:StartOutboundVoiceContact"]))
        callout_function.add_event_source(source=_les.SqsEventSource(
            queue=async_callout_queue, batch_size=1))
        'arn:aws:connect:751225572132:ap-southeast-2:instance/9d0c7cc5-7d2a-42e4-a3dd-70f402e0d040'
        file_bucket.grant_read_write(callout_function)

        response_handler_function = _lambda.Function(
            self,
            "ResponseHandlerFunction",
            handler="response_handler.lambda_handler",
            **global_python_function_arguments)
        response_handler_function.add_permission(
            id="ResponseHandlerFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        send_task_success_function = _lambda.Function(
            self,
            "SendTaskSuccessFunction",
            handler="send_task_success.lambda_handler",
            **global_python_function_arguments)
        send_task_success_function.add_permission(
            id="SendTaskSuccessFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        get_call_result_function = _lambda.Function(
            self,
            "GetCallResultFunction",
            handler="get_call_result.lambda_handler",
            memory_size=512,
            **global_python_function_arguments)
        get_call_result_function.add_environment(
            key="CallResultDynamoDBTable", value=call_result_table.table_name)
        get_call_result_function.add_environment(key="S3Bucket",
                                                 value=file_bucket.bucket_name)
        call_result_table.grant_read_data(grantee=get_call_result_function)
        file_bucket.grant_read_write(get_call_result_function)

        iterator_function = _lambda.Function(
            self,
            "IteratorFunction",
            handler="iterator.lambda_handler",
            **global_python_function_arguments)
        iterator_function.add_permission(
            id="IteratorFunctionLambdaInvokePermission",
            principal=_iam.ServicePrincipal(service="connect.amazonaws.com"),
            action="lambda:InvokeFunction",
            source_account=self.account,
            source_arn=
            f"arn:aws:connect:{self.region}:{self.account}:instance/{instance_id}"
        )

        create_appsync_call_task_function = _lambda.Function(
            self,
            "CreateAppSyncCallTaskFunction",
            handler="create_appsync_call_task.lambda_handler",
            **global_nodeje_function_arguments)
        create_appsync_call_task_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        create_appsync_call_task_function.add_environment(
            key="CallRecordTableName", value=callout_record_table.table_name)
        call_sqs_queue.grant_send_messages(create_appsync_call_task_function)
        callout_record_table.grant_write_data(
            create_appsync_call_task_function)

        create_call_report_record_function = _lambda.Function(
            self,
            "CreateCallReportRecordFunction",
            handler="create_call_report_record.lambda_handler",
            **global_nodeje_function_arguments)

        create_excel_call_task_function = _lambda.Function(
            self,
            "CreateExcelCallTaskFunction",
            handler="create_excel_call_task.lambda_handler",
            **global_python_function_arguments)
        create_excel_call_task_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        call_sqs_queue.grant_send_messages(create_excel_call_task_function)

        create_excel_call_task_function.add_event_source(
            source=_les.S3EventSource(bucket=file_bucket,
                                      events=[_s3.EventType.OBJECT_CREATED],
                                      filters=[
                                          _s3.NotificationKeyFilter(
                                              prefix="call_task",
                                              suffix=".xlsx")
                                      ]))

        start_callout_flow_function = _lambda.Function(
            self,
            "StartCalloutFlowFunction",
            handler="start_call_out_flow.lambda_handler",
            reserved_concurrent_executions=1,
            **global_python_function_arguments)
        start_callout_flow_function.add_environment(
            key="CallSqsQueueUrl", value=call_sqs_queue.queue_url)
        start_callout_flow_function.add_environment(
            key="ResponseHandlerFunctionArn",
            value=response_handler_function.function_arn)
        start_callout_flow_function.add_environment(
            key="IteratorFunctionArn", value=iterator_function.function_arn)
        start_callout_flow_function.add_environment(
            key="SendTaskSuccessFunctionArn",
            value=send_task_success_function.function_arn)
        start_callout_flow_function.add_environment(
            key="S3Bucket", value=file_bucket.bucket_name)
        start_callout_flow_function.add_event_source(
            source=_les.SqsEventSource(queue=call_sqs_queue, batch_size=1))
        file_bucket.grant_read_write(start_callout_flow_function)

        call_state_machine_definition = {
            "Comment":
            "Reading messages from an SQS queue and iteratively processing each message.",
            "StartAt": "Start",
            "States": {
                "Start": {
                    "Type": "Pass",
                    "Next": "Process Call Messages"
                },
                "Process Call Messages": {
                    "Type": "Map",
                    "Next": "Get Call Result",
                    "InputPath": "$",
                    "ItemsPath": "$",
                    "OutputPath": "$.[0]",
                    "Iterator": {
                        "StartAt": "Get Call out job",
                        "States": {
                            "Get Call out job": {
                                "Type": "Task",
                                "Resource":
                                get_callout_job_function.function_arn,
                                "Next": "Callout with AWS Connect"
                            },
                            "Callout with AWS Connect": {
                                "Type":
                                "Task",
                                "Resource":
                                "arn:aws:states:::sqs:sendMessage.waitForTaskToken",
                                "TimeoutSeconds":
                                timeout,
                                "Parameters": {
                                    "QueueUrl": async_callout_queue.queue_url,
                                    "MessageGroupId": "1",
                                    "MessageBody": {
                                        "Message.$": "$",
                                        "TaskToken.$": "$$.Task.Token"
                                    }
                                },
                                "Catch": [{
                                    "ErrorEquals": ["States.Timeout"],
                                    "ResultPath": None,
                                    "Next": "Call Timeout"
                                }],
                                "Next":
                                "Save call result"
                            },
                            "Call Timeout": {
                                "Type": "Pass",
                                "ResultPath": None,
                                "Next": "Save call result"
                            },
                            "Save call result": {
                                "Type": "Task",
                                "Resource":
                                "arn:aws:states:::dynamodb:putItem",
                                "Parameters": {
                                    "TableName": call_result_table.table_name,
                                    "Item": {
                                        "receiver_id": {
                                            "S.$": "$.receiver_id"
                                        },
                                        "task_id": {
                                            "S.$": "$.task_id"
                                        },
                                        "username": {
                                            "S.$": "$.username"
                                        },
                                        "phone_number": {
                                            "S.$": "$.phone_number"
                                        },
                                        "status": {
                                            "S.$": "$.status"
                                        },
                                        "answers": {
                                            "S.$": "$.answers"
                                        },
                                        "error": {
                                            "S.$": "$.error"
                                        },
                                        "call_at": {
                                            "S.$": "$.call_at"
                                        }
                                    }
                                },
                                "ResultPath": "$.Result",
                                "OutputPath": "$.task_id",
                                "End": True
                            }
                        }
                    }
                },
                "Get Call Result": {
                    "Type": "Task",
                    "Resource": get_call_result_function.function_arn,
                    "Next": "Create Call Report Record"
                },
                "Create Call Report Record": {
                    "Type": "Task",
                    "Resource":
                    create_call_report_record_function.function_arn,
                    "Next": "Send Completion message to SNS"
                },
                "Send Completion message to SNS": {
                    "Type": "Task",
                    "Resource": "arn:aws:states:::sns:publish",
                    "Parameters": {
                        "TopicArn": call_job_complete_sns_topic.topic_arn,
                        "Message.$": "$"
                    },
                    "Next": "Finish"
                },
                "Finish": {
                    "Type": "Succeed"
                }
            }
        }
        callout_state_machine_role = _iam.Role(
            self,
            "CalloutStatesExecutionRole",
            assumed_by=_iam.ServicePrincipal(
                f"states.{self.region}.amazonaws.com"))
        callout_state_machine_role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    "sqs:SendMessage", "dynamodb:PutItem",
                    "lambda:InvokeFunction", "SNS:Publish"
                ],
                resources=[
                    async_callout_queue.queue_arn, call_result_table.table_arn,
                    get_callout_job_function.function_arn,
                    get_call_result_function.function_arn,
                    call_job_complete_sns_topic.topic_arn,
                    create_appsync_call_task_function.function_arn,
                    create_call_report_record_function.function_arn
                ]))
        callout_state_machine = _sfn.CfnStateMachine(
            self,
            "CalloutStateMachine",
            role_arn=callout_state_machine_role.role_arn,
            definition_string=json.dumps(call_state_machine_definition))
        send_task_success_function.add_to_role_policy(
            _iam.PolicyStatement(actions=["states:SendTaskSuccess"],
                                 resources=[callout_state_machine.ref]))

        start_callout_flow_function.add_environment(
            key="CalloutStateMachineArn", value=callout_state_machine.ref)
        start_callout_flow_function.add_to_role_policy(
            _iam.PolicyStatement(effect=_iam.Effect.ALLOW,
                                 resources=[callout_state_machine.ref],
                                 actions=['states:StartExecution']))

        user_pool = _cognito.UserPool(
            self, "UserPool", sign_in_type=_cognito.SignInType.USERNAME)

        user_pool_client = _cognito.UserPoolClient(self,
                                                   "UserPoolClient",
                                                   user_pool=user_pool)

        appsync_api = _appsync.GraphQLApi(
            self,
            "AppSyncApi",
            name="AWSCalloutApi",
            user_pool_config=_appsync.UserPoolConfig(
                user_pool=user_pool,
                default_action=_appsync.UserPoolDefaultAction.ALLOW),
            log_config=_appsync.LogConfig(
                field_log_level=_appsync.FieldLogLevel.ALL),
            schema_definition_file=
            f"{pathlib.Path(__file__).parent.absolute()}/schema.graphql")

        callout_record_ddb_ds = appsync_api.add_dynamo_db_data_source(
            name="CalloutRecordDdb",
            description="Callout Record DynamoDB Data Source",
            table=callout_record_table)
        callout_record_ddb_ds.create_resolver(
            type_name="Query",
            field_name="getLatestCallTaskRecords",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"Query","index":"CallTypeCreatedAtGlobalIndex","query":{"expression":"call_type = :call_type","expressionValues":{":call_type":{"S":"TASK"}}},"scanIndexForward":false,"limit":${ctx.args.limit}}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_list())
        callout_record_ddb_ds.create_resolver(
            type_name="Query",
            field_name="getLatestCallReportRecords",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"Query","index":"CallTypeCreatedAtGlobalIndex","query":{"expression":"call_type = :call_type","expressionValues":{":call_type":{"S":"REPORT"}}},"scanIndexForward":false,"limit":${ctx.args.limit}}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_list())
        callout_record_ddb_ds.create_resolver(
            type_name="Mutation",
            field_name="createCallReport",
            request_mapping_template=_appsync.MappingTemplate.from_string(
                '{"version":"2017-02-28","operation":"PutItem","key":{"task_id":{"S":"${ctx.args.report.task_id}"},"created_at":{"N":"${ctx.args.report.created_at}"}},"attributeValues":$util.dynamodb.toMapValuesJson($ctx.args.report)}'
            ),
            response_mapping_template=_appsync.MappingTemplate.
            dynamo_db_result_item())

        call_task_lambda_ds = appsync_api.add_lambda_data_source(
            name="CallTaskLambda",
            description="Call Task Lambda Data Source",
            lambda_function=create_appsync_call_task_function)
        call_task_lambda_ds.create_resolver(
            type_name="Mutation",
            field_name="createCallTask",
            request_mapping_template=_appsync.MappingTemplate.lambda_request(
                "$utils.toJson($ctx.args)"),
            response_mapping_template=_appsync.MappingTemplate.lambda_result())

        create_call_report_record_function.add_environment(
            value=appsync_api.graph_ql_url, key="AppSyncGraphQlApiUrl")

        create_call_report_record_function.add_to_role_policy(
            statement=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=['appsync:GraphQL'],
                resources=[
                    f"{appsync_api.arn}/types/Mutation/fields/createCallReport"
                ]))

        core.CfnOutput(self,
                       id="OutputCallSqsQueue",
                       value=call_sqs_queue.queue_arn)
        core.CfnOutput(self,
                       id="OutputCallJobCompletionSNSTopic",
                       value=call_job_complete_sns_topic.topic_arn)
        core.CfnOutput(self,
                       id="OutputExcelFileS3Bucket",
                       value=file_bucket.bucket_name)
        core.CfnOutput(self,
                       id="OutputStaticWebS3Bucket",
                       value=web_bucket.bucket_name)
        core.CfnOutput(self,
                       id="OutputStaticWebUrl",
                       value=web_bucket.bucket_website_url)

        identity_pool = _cognito.CfnIdentityPool(
            self,
            "IdentityPool",
            allow_unauthenticated_identities=True,
            cognito_identity_providers=[
                _cognito.CfnIdentityPool.CognitoIdentityProviderProperty(
                    provider_name=user_pool.user_pool_provider_name,
                    client_id=user_pool_client.user_pool_client_id)
            ])
        identity_pool_unauthorized_role = _iam.Role(
            self,
            'IdentityPoolUnAuthorizedRole',
            assumed_by=_iam.FederatedPrincipal(
                federated="cognito-identity.amazonaws.com",
                assume_role_action="sts:AssumeRoleWithWebIdentity",
                conditions={
                    "StringEquals": {
                        "cognito-identity.amazonaws.com:aud": identity_pool.ref
                    },
                    "ForAnyValue:StringLike": {
                        "cognito-identity.amazonaws.com:amr": "unauthenticated"
                    }
                }))
        identity_pool_unauthorized_role.add_to_policy(
            _iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["appsync:GraphQL"],
                resources=[
                    f"{appsync_api.arn}/types/*",
                    # f"{appsync_api.arn}/types/Query/fields/getLatestCallTaskRecords",
                    # f"{appsync_api.arn}/types/Query/fields/getLatestCallReportRecords",
                    # f"{appsync_api.arn}/types/Mutation/fields/createCallRecord",
                    # f"{appsync_api.arn}/types/Subscription/fields/createCallTask",
                    # f"{appsync_api.arn}/types/Subscription/fields/createCallReport"
                ]))

        _cognito.CfnIdentityPoolRoleAttachment(
            self,
            "CognitoIdentityPoolRoleAttachment",
            identity_pool_id=identity_pool.ref,
            roles={
                "unauthenticated": identity_pool_unauthorized_role.role_arn
            })

        core.CfnOutput(self, id="UserPoolId", value=user_pool.user_pool_id)
        core.CfnOutput(self,
                       id="UserPoolClientId",
                       value=user_pool_client.user_pool_client_id)
        core.CfnOutput(self, id="IdentityPoolId", value=identity_pool.ref)