Exemplo n.º 1
0
 def create_url_requester(self):
     url_requester = self.create_lambda_with_error_alarm("url_requester")
     url_requester.add_event_source(
         aws_lambda_event_sources.DynamoEventSource(
             self.yelp_table,
             starting_position=aws_lambda.StartingPosition.TRIM_HORIZON,
             batch_size=5,
             bisect_batch_on_error=True,
             retry_attempts=0,
         ))
     url_requester.add_event_source(
         aws_lambda_event_sources.DynamoEventSource(
             self.config_table,
             starting_position=aws_lambda.StartingPosition.TRIM_HORIZON,
             batch_size=5,
             bisect_batch_on_error=True,
             retry_attempts=0,
         ))
     rule = aws_events.Rule(
         self,
         "UrlRequesterRule",
         schedule=aws_events.Schedule.cron(minute="*/5",
                                           hour="*",
                                           month="*",
                                           week_day="*",
                                           year="*"),
     )
     rule.add_target(aws_events_targets.LambdaFunction(url_requester))
     self.url_requester = url_requester
     return self.url_requester
    def __init__(self, scope: core.Construct, id: str,
                 dynamo_table: aws_dynamodb.Table) -> None:
        super().__init__(scope, id)

        if not dynamo_table:
            print('missing dynamo db parameter')
            raise TypeError('missing dynamo db stream arn parameter')

        self._parents_dir: str = get_git_root(Path(__file__))
        self._log_utils_dir: Path = Path(f'{self._parents_dir}/log_utils')
        self._cloudwatch_logs_policy = iam.ManagedPolicy.from_aws_managed_policy_name(
            "service-role/AWSLambdaBasicExecutionRole")

        #### eventbridge -> event bus -> rule
        ## lambda puts event to event bus , a rule is triggered -> lambda is called with the event
        ## other destinations are possible

        # create event bus
        event_bus = aws_events.EventBus(scope=self,
                                        id=f'{id}TenantMgmtBus',
                                        event_bus_name=f'{id}TenantMgmtBus')

        # create lambdas
        self.dispatcher_lambda = self._create_dispatcher_lambda(id, event_bus)

        # trigger a lambda on a DynamoDB change, create an event source
        stream_event_source = aws_lambda_event_sources.DynamoEventSource(
            table=dynamo_table,
            starting_position=aws_lambda.StartingPosition.LATEST)
        # bind the event source to a lambda
        stream_event_source.bind(self.dispatcher_lambda)
Exemplo n.º 3
0
 def create_dynamodb(self) -> None:
     '''DynamoDB Tables and Streams Event Sources
     '''
     # Single-table to store Reality #possibility to change billing,ttl here, global index in new function
     self.ddb_table_reality = aws_dynamodb.Table(
         self,
         'Reality-dev',
         partition_key=aws_dynamodb.Attribute(
             name="hash_id", type=aws_dynamodb.AttributeType.NUMBER),
         sort_key=aws_dynamodb.Attribute(
             name='actualized', type=aws_dynamodb.AttributeType.STRING),
         stream=aws_dynamodb.StreamViewType.
         NEW_AND_OLD_IMAGES,  #enable dynamo streams for the trigger
         read_capacity=3,
         write_capacity=8,
         removal_policy=core.RemovalPolicy.DESTROY  # hidden in core
     )
     ## Streams DB + triggers
     self.ddb_streams_reality = aws_lambda_event_sources.DynamoEventSource(
         table=self.ddb_table_reality,
         starting_position=aws_lambda.StartingPosition.
         LATEST,  # check what trim horizon means
         batch_size=100,  # check is 5 enough? is 500 a lot 
         max_batching_window=core.Duration.seconds(60),
         #parallelization_factor = ?2 ?5
         retry_attempts=2,
         on_failure=aws_lambda_destinations.SqsDestination(
             self.queue_ddb_streams),
     )
Exemplo n.º 4
0
 def stream_lambda_source(self, table: dynamo.ITable,
                          function: _lambda.IFunction):
     dynamodb_stream_source = event_source.DynamoEventSource(
         table=table,
         starting_position=_lambda.StartingPosition.LATEST,
         batch_size=1,
         retry_attempts=1)
     function.add_event_source(dynamodb_stream_source)
Exemplo n.º 5
0
    def __init__(self,
                 scope: cdk.Stack,
                 id: str,
                 sla_monitor_dynamo_table,
                 git_hash,
                 stream_zip='stream-monitor.zip',
                 **kwargs):
        super().__init__(scope, id, **kwargs)
        self.sla_monitor_dynamo_table = sla_monitor_dynamo_table
        self.stream_zip = git_hash + "-" + stream_zip

        self.sla_stream_monitor_lambda_function = aws_lambda.Function(
            self,
            "StreamMonitorLambdaFunction",
            function_name=self.stack_name,
            code=aws_lambda.AssetCode(self.stream_zip),
            handler="stream_processor.lambda_handler",
            runtime=aws_lambda.Runtime.PYTHON37,
            #layers=[self.dynamodb_lambda_layer],
            description=
            "Monitors DynamoDB stream from SLA Monitor and updates a DynamoDB Table with any changes",
            environment={
                "STACK_NAME": self.stack_name,
                "DYNAMO_TABLE_NAME": self.stack_name
            },
            memory_size=128,
            timeout=90,
        )

        self.sla_stream_monitor_dynamo_table = aws_dynamodb.Table(
            self,
            "DynamoTable{}".format("SLAStreamMonitor"),
            table_name=self.stack_name,
            billing_mode=aws_dynamodb.BillingMode.PayPerRequest,
            partition_key={
                "name": "service_name",
                "type": aws_dynamodb.AttributeType.String
            },
            sort_key={
                "name": "last_updated_date",
                "type": aws_dynamodb.AttributeType.String
            },
        )

        # Permissions to access stream_monitor dynamo table
        self.sla_stream_monitor_dynamo_table.grant_read_write_data(
            self.sla_stream_monitor_lambda_function.role)

        # Event source for stream monitor lambda function from sla monitor dynamodb stream
        self.sla_stream_monitor_lambda_function.add_event_source(
            aws_lambda_event_sources.DynamoEventSource(
                table=self.sla_monitor_dynamo_table,
                starting_position=aws_lambda.StartingPosition.Latest))
Exemplo n.º 6
0
    def create_dynamodb(self) -> None:
        '''DynamoDB Tables and Event Sources
        '''
        # DynamoDB Table Attributes
        self.ddb_attr_time_to_live = 'time-to-live'

        # DynamoDB Parameters
        self.ddb_param_max_parallel_streams = 5

        # Single-table to store blog content
        self.ddb_table_blog = aws_dynamodb.Table(
            self,
            'sls-blog-dynamo-table',
            partition_key=aws_dynamodb.Attribute(
                name='id',
                type=aws_dynamodb.AttributeType.STRING,
            ),
            billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST,
            point_in_time_recovery=True,
            removal_policy=core.RemovalPolicy.DESTROY,
            time_to_live_attribute=self.ddb_attr_time_to_live,
            stream=aws_dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
        )

        # GSI to query blog content by item (type) and ordered by time
        self.ddb_gsi_latest = 'latest-blogs'

        self.ddb_table_blog.add_global_secondary_index(
            index_name=self.ddb_gsi_latest,
            partition_key=aws_dynamodb.Attribute(
                name='item-type',
                type=aws_dynamodb.AttributeType.STRING,
            ),
            sort_key=aws_dynamodb.Attribute(
                name='publish-timestamp',
                type=aws_dynamodb.AttributeType.NUMBER,
            ),
            projection_type=aws_dynamodb.ProjectionType.ALL,
        )

        # Generate streams from modifications to the "blog" DDB Table
        self.ddb_source_blog = aws_lambda_event_sources.DynamoEventSource(
            table=self.ddb_table_blog,
            starting_position=aws_lambda.StartingPosition.LATEST,
            batch_size=500,
            max_batching_window=core.Duration.seconds(60),
            parallelization_factor=self.ddb_param_max_parallel_streams,
            retry_attempts=2,
            on_failure=aws_lambda_destinations.SqsDestination(
                self.queue_ddb_streams_dlq),
        )
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #dynamodb
        stream_dynamodb = aws_dynamodb.Table(
            self,
            "StreamDynamoDb",
            partition_key=aws_dynamodb.Attribute(
                name="stream_id", type=aws_dynamodb.AttributeType.STRING),
            stream=aws_dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
            removal_policy=core.RemovalPolicy.DESTROY)

        #import lambda code
        try:
            with open("deployments/functions/ddb_stream.py", mode="r") as file:
                function_body = file.read()
        except OSError:
            print('File can not read')

        #lambda
        ddb_stream_function = aws_lambda.Function(
            self,
            "DddbStreamFunction",
            function_name="DynamoDbStream",
            description="Dynamodb stream processor",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="index.lambda_handler",
            code=aws_lambda.InlineCode(function_body),
            timeout=core.Duration.seconds(5),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
            })

        #dynamodb stream event sources
        dynamodb_event_source = aws_lambda_es.DynamoEventSource(
            table=stream_dynamodb,
            starting_position=aws_lambda.StartingPosition.TRIM_HORIZON,
            bisect_batch_on_error=True)

        #lambda trigger
        ddb_stream_function.add_event_source(dynamodb_event_source)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Add your stack resources below):
        # DynamoDB Table
        api_db = _dynamodb.Table(
            self,
            "apiDDBTable",
            partition_key=_dynamodb.Attribute(
                name="_id", type=_dynamodb.AttributeType.STRING),
            stream=_dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Read Lambda Code
        try:
            with open(
                    "advanced_use_cases/lambda_src/dynamodb_stream_processor.py",
                    mode="r") as f:
                ddb_stream_processor_fn_code = f.read()
        except OSError:
            print("Unable to read lambda function code")

        # Deploy the lambda function
        ddb_stream_processor_fn = _lambda.Function(
            self,
            "ddbStreamProcessorFn",
            function_name="ddb_stream_processor_fn",
            description="Process DDB Streaming data events",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(ddb_stream_processor_fn_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={"LOG_LEVEL": "INFO"})

        # Create New DDB Stream Event Source
        ddb_stream_event_source = _lambda_event_sources.DynamoEventSource(
            table=api_db,
            starting_position=_lambda.StartingPosition.TRIM_HORIZON,
            bisect_batch_on_error=True)

        # Attach DDB Event Source As Lambda Trigger
        ddb_stream_processor_fn.add_event_source(ddb_stream_event_source)
Exemplo n.º 9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        stack_role = iam.Role(
            self,
            "SimulationServiceRole",
            assumed_by=iam.ServicePrincipal("batch.amazonaws.com"),
        )

        stack_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AdministratorAccess"))

        job_role = iam.Role(
            self,
            "SimulationJobServiceRole",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
        )

        job_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AdministratorAccess"))

        lambda_role = iam.Role(
            self,
            "SimulationLambdaServiceRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
        )

        lambda_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AdministratorAccess"))

        # Create Input S3
        input_bucket = s3.Bucket(self, "InputS3Bucket")

        # Create Output S3
        output_bucket = s3.Bucket(self, "OutputS3Bucket")

        # admin_policy = iam.from_policy_name("AdministratorAccess", "AdministratorAccess")

        job_table = aws_dynamodb.Table(
            self,
            id="JobTable",
            partition_key=aws_dynamodb.Attribute(
                name="PK", type=aws_dynamodb.AttributeType.STRING),
            stream=aws_dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        orchestration_handler_lambda = aws_lambda.Function(
            self,
            id="JobOrchestrationHandler",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="orchestration_handler_lambda.handler",
            code=aws_lambda.Code.asset("./simulations_service/functions/"),
        )

        # Give only write access to the post handler
        job_table.grant_write_data(orchestration_handler_lambda)

        # Pass table_name as env variable
        orchestration_handler_lambda.add_environment("TABLE_NAME",
                                                     job_table.table_name)

        # Create lambda function for processing dynamodb streams
        dynamodb_streams_processor_lambda = aws_lambda.Function(
            self,
            id="JobsDynamoDBStreamsProcessor",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            handler="dynamodb_streams_processor_lambda.handler",
            code=aws_lambda.Code.asset("./simulations_service/functions/"),
            role=lambda_role,
        )

        # Add dynamo db as lambda event source
        dynamodb_streams_processor_lambda.add_event_source(
            aws_lambda_event_sources.DynamoEventSource(
                job_table,
                starting_position=aws_lambda.StartingPosition.LATEST,
                batch_size=1,
            ))

        dynamodb_streams_processor_lambda.add_environment(
            "S3_OUTPUT_BUCKET", output_bucket.bucket_name)

        dynamodb_streams_processor_lambda.add_environment(
            "TABLE_NAME", job_table.table_name)

        vpc = ec2.Vpc(self, "VPC")

        spot_environment = batch.ComputeEnvironment(
            self,
            "MyComputedEnvironment",
            compute_resources={
                "vpc": vpc,
            },
            service_role=stack_role.without_policy_updates(),
        )

        job_queue = batch.JobQueue(
            self,
            "JobQueue",
            compute_environments=[
                batch.JobQueueComputeEnvironment(
                    compute_environment=spot_environment, order=1)
            ],
        )

        dynamodb_streams_processor_lambda.add_environment(
            "JOB_QUEUE", job_queue.job_queue_name)

        job_definition = batch.JobDefinition(
            self,
            "batch-job-def-from-local",
            container={
                "image":
                ecs.ContainerImage.from_asset("./simulations_service/job/"),
                "memory_limit_mib":
                500,
                "privileged":
                True,
                "job_role":
                job_role,
            },
        )

        dynamodb_streams_processor_lambda.add_environment(
            "JOB_DEFINITION", job_definition.job_definition_name)

        orchestration_handler_lambda.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                bucket=input_bucket,
                events=[s3.EventType.OBJECT_CREATED],
            ))
Exemplo n.º 10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self._table = ddb.Table(self,
                                'AirTicketOrder',
                                partition_key={
                                    'name': 'customer_id',
                                    'type': ddb.AttributeType.STRING
                                },
                                stream=ddb.StreamViewType.NEW_AND_OLD_IMAGES,
                                removal_policy=core.RemovalPolicy.DESTROY)

        self.lambda_cmd = _lambda.Function(
            self,
            'CommandDDBSaver',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("./lambda/cmd/"),
            handler="cmd.lambda_handler",
            environment={
                "ORDER_TABLE_NAME": self._table.table_name,
            })

        self._table.grant_read_write_data(self.lambda_cmd)

        # Allow Command lambda to invoke other lambda
        self.lambda_cmd.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=["lambda:InvokeFunction"]))

        api = apigw.LambdaRestApi(
            self,
            "CommandEndPoint",
            handler=self.lambda_cmd,
        )

        # TODO: 因为2个AZ,可以只生命一个公网和一个私网,这样X2 AZ就会2 pub + 2 pri
        # Lambda access RDS Aurora MySQL requires VPC for security and perf
        vpc = ec2.Vpc(
            self,
            'air-ticket',
            cidr="10.125.0.0/16",
            max_azs=2,
            nat_gateways=1,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="public1",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(name="public2",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(name="private1",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.PRIVATE),
                ec2.SubnetConfiguration(name="private2",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.PRIVATE)
            ])

        query_lambda_sg = ec2.SecurityGroup(
            self,
            'Query-Lambda-SG',
            vpc=vpc,
            description="Allows DB connections from Query Lambda SG",
        )

        sink_lambda_sg = ec2.SecurityGroup(
            self,
            'RDS-Sink-Lambda-SG',
            vpc=vpc,
            description="Allows DB connections from Sink Lambda SG",
        )

        db_name = "Demo"
        db_user_name = 'admin'
        db_user_passowrd = 'password'

        parameter_group = rds.ParameterGroup(self,
                                             "ParameterGroup",
                                             family="mysql5.7",
                                             parameters={})
        aurora_db = rds.DatabaseInstance(
            self,
            "air-ticket-db",
            master_user_password=core.SecretValue.ssm_secure(
                'AirTicket.AdminPass', version='1'),
            master_username=db_user_name,
            engine=rds.DatabaseInstanceEngine.MYSQL,
            engine_version="5.7",
            parameter_group=parameter_group,
            vpc=vpc,
            # Disable deletion protection for auto deletion
            deletion_protection=False,
            instance_class=ec2.InstanceType.of(ec2.InstanceClass.MEMORY5,
                                               ec2.InstanceSize.XLARGE),
            removal_policy=core.RemovalPolicy.DESTROY)

        self._query_handler = _lambda.Function(
            self,
            'QueryHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("./lambda/query/"),
            handler="query.lambda_handler",
            timeout=core.Duration.seconds(60),
            vpc=vpc,
            security_group=query_lambda_sg,
            environment={
                "AuroraEndpoint": aurora_db.db_instance_endpoint_address,
                "dbName": db_name,
                "dbPassword": db_user_passowrd,
                "dbUser": db_user_name
            })

        query_api = apigw.LambdaRestApi(
            self,
            "Query",
            handler=self._query_handler,
        )

        # Init DB Lambda
        self.lambda_init = _lambda.Function(
            self,
            'InitDBHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("./lambda/initdb/"),
            handler="init.lambda_handler",
            timeout=core.Duration.seconds(60),
            vpc=vpc,
            security_group=query_lambda_sg,
            environment={
                "AuroraEndpoint": aurora_db.db_instance_endpoint_address,
                "dbName": db_name,
                "dbPassword": db_user_passowrd,
                "dbUser": db_user_name
            })

        self.lambda_cmd.add_environment('INITDB_LAMBDA_NAME',
                                        self.lambda_init.function_name)

        # Create stream for fan-out
        stream_name = 'kinesis-stream-for-fanout'

        # Sync DDB stream delta to RDS Lambda
        self.lambda_sync = _lambda.Function(
            self,
            'SyncHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("./lambda/sync/"),
            handler="sync.lambda_handler",
            timeout=core.Duration.seconds(60),
            vpc=vpc,
            security_group=query_lambda_sg,
            environment={"streamName": stream_name})

        # Add DDB stream trigger to sync lambda
        self.lambda_sync.add_event_source(
            event_sources.DynamoEventSource(
                self._table,
                starting_position=_lambda.StartingPosition.TRIM_HORIZON))

        self._table.grant_stream_read(self.lambda_sync)

        # Allow init/sync lambda access MySQL
        aurora_db.connections.allow_from(
            query_lambda_sg,
            ec2.Port.tcp(3306),
            "Allow MySQL access from Query Lambda (because Aurora actually exposes PostgreSQL/MySQL on port 3306)",
        )

        aurora_db.connections.allow_from(
            sink_lambda_sg,
            ec2.Port.tcp(3306),
            "Allow MySQL access from Sink Lambda (because Aurora actually exposes PostgreSQL/MySQL on port 3306)",
        )

        strm = kinesis.Stream(self,
                              'kinesis-stream-for-fanout',
                              stream_name=stream_name)

        # Create RDS Sink Lambda
        self.lambda_rds_sink = _lambda.Function(
            self,
            'RDS_SINK_1',
            handler='rds_sinker.lambda_handler',
            code=_lambda.Code.asset("./lambda/sink/"),
            runtime=_lambda.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(300),
            vpc=vpc,
            security_group=sink_lambda_sg,
            environment={
                "AuroraEndpoint": aurora_db.db_instance_endpoint_address,
                "dbName": db_name,
                "dbPassword": db_user_passowrd,
                "dbUser": db_user_name
            })

        # Update Lambda Permissions To Use Stream
        strm.grant_read_write(self.lambda_sync)
        strm.grant_read(self.lambda_rds_sink)

        stream_consumer = kinesis.CfnStreamConsumer(
            self,
            'lambda-efo-consumer-id',
            consumer_name='lambda-efo-consumer',
            stream_arn=strm.stream_arn)

        e_s_mappnig = _lambda.EventSourceMapping(
            self,
            'lambda-efo-consumer-event-source-mapping',
            target=self.lambda_rds_sink,
            event_source_arn=stream_consumer.stream_arn,
            batch_size=1,
            starting_position=_lambda.StartingPosition.TRIM_HORIZON,
        )

        # self.lambda_rds_sink.add_event_source_mapping(e_s_mappnig)

        # CDK below create lambda as a standand Kinesis consumer instead of EFO
        #
        # # Create New Kinesis Event Source
        # kinesis_stream_event_source = event_sources.KinesisEventSource(
        #     stream=strm,
        #     starting_position=_lambda.StartingPosition.TRIM_HORIZON,
        #     batch_size=1
        # )

        # # Attach New Event Source To Lambda
        # self.lambda_rds_sink.add_event_source(kinesis_stream_event_source)

        # Create dead letter queue and grant send permission to sync/sink lambda
        self._queue = sqs.Queue(
            self,
            "DeadLetterQueue",

            #Amazon SQS sets a visibility timeout, a period of time during which Amazon
            # SQS prevents other consumers from receiving and processing the message.
            # The default visibility timeout for a message is 30 seconds.
            # The minimum is 0 seconds. The maximum is 12 hours.
            visibility_timeout=core.Duration.seconds(300),
        )

        self._queue.grant_send_messages(self.lambda_sync)
        self._queue.grant_send_messages(self.lambda_rds_sink)

        self.lambda_sync.add_environment("DLQ_NAME", self._queue.queue_name)
Exemplo n.º 11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        layer = aws.PipLayers(self,
                              "scoreboard_layer",
                              layers={
                                  "htmlgen": "htmlgen/requirements.txt",
                                  "parse_globals":
                                  "parse_globals/requirements.txt",
                                  "bot": "bot/requirements.txt"
                              })

        # Create
        # * the generator function
        # * Namemap-table
        #   * allow generator to read from namemap-table
        #   (This might change - why not pass the mapping structure in the message?)
        # * Datacache-bucket
        #   * Allow generator to read and write to bucket

        htmlgen = aws.Function(self,
                               "htmlgen",
                               layers=layer.layers,
                               timeout=core.Duration.seconds(20),
                               memory_size=1024)

        # id: str (boardid), name: str (username), value: str (replacement value)
        namemap = aws.Table(self,
                            "namemap",
                            sort_key=aws_dynamodb.Attribute(
                                name='name',
                                type=aws_dynamodb.AttributeType.STRING),
                            removal_policy=CONFIGDATA)
        namemap.grant_read_data(htmlgen)

        no_point_days = aws.Table(self, "nopointdays")

        # id: str (boardid), day: int, results_1: dict ({player: score, ...}), results_2: dict ({player: score, ...})
        globalscores = aws.Table(self,
                                 "globalscores",
                                 partition_key=aws_dynamodb.Attribute(
                                     name='year',
                                     type=aws_dynamodb.AttributeType.NUMBER),
                                 sort_key=aws_dynamodb.Attribute(
                                     name='day',
                                     type=aws_dynamodb.AttributeType.NUMBER),
                                 removal_policy=EPHEMERALDATA)
        parse_globals = aws.Function(self,
                                     "parse_globals",
                                     layers=layer.layers,
                                     timeout=core.Duration.seconds(20),
                                     memory_size=1024)
        parse_globals.add_environment("DDB_GLOBALSCORES",
                                      globalscores.table_name)
        globalscores.grant_read_write_data(parse_globals)
        globalscores.grant_read_data(htmlgen)

        timestamps = aws.Table(self,
                               "timestamps",
                               removal_policy=EPHEMERALDATA)
        htmlgen.add_environment("DDB_TIMESTAMPS", timestamps.table_name)
        timestamps.grant_write_data(htmlgen)

        datacache = aws.Bucket(self, "datacache")
        datacache.grant_read_write(htmlgen)

        htmlbucket = aws.Bucket(
            self,
            "html",
            removal_policy=EPHEMERALDATA,
            auto_delete_objects=True,
            block_public_access=None,
            website_error_document="error.html",
            website_index_document="scoreboard.html",
            cors=[
                aws_s3.CorsRule(allowed_methods=[aws_s3.HttpMethods.GET],
                                allowed_headers=["*"],
                                allowed_origins=["*"])
            ])
        htmlbucket.grant_public_access()
        core.CfnOutput(self,
                       f"{id}_bucketurl",
                       value=f"BUCKET_URL={htmlbucket.bucket_website_url}")
        htmlbucket.grant_read_write(htmlgen)
        htmlgen.add_environment("S3_DATACACHE", datacache.bucket_name)
        htmlgen.add_environment("S3_HTML", htmlbucket.bucket_name)
        htmlgen.add_environment("DDB_NAMEMAP", namemap.table_name)

        aws_s3_deployment.BucketDeployment(
            self,
            "StaticHtml",
            sources=[aws_s3_deployment.Source.asset("htmlgen/frontend")],
            destination_bucket=htmlbucket,
            prune=False)

        # Create
        # * spawner function
        # * boardconfig-table
        #   * allow spawner to read from boardconfig-table
        # * generator_queue
        #   allow spawner to post messages to queue
        spawner = aws.Function(self, "spawner", layers=layer.layers)
        boardconfig = aws.Table(
            self,
            "boardconfig",
            stream=aws_dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
            removal_policy=CONFIGDATA)
        boardconfig.grant_read_data(spawner)
        spawner.add_environment("DDB_CONFIG", boardconfig.table_name)
        spawner.add_environment("DDB_NOPOINTDAYS", no_point_days.table_name)

        boardconfig_source = aws_lambda_event_sources.DynamoEventSource(
            boardconfig, starting_position=aws_lambda.StartingPosition.LATEST)

        boarddeletions = aws.Function(self, "boarddeletions")
        boarddeletions.add_event_source(boardconfig_source)
        boarddeletions.add_environment("S3_HTML", htmlbucket.bucket_name)
        htmlbucket.grant_read_write(boarddeletions)

        generator_queue = aws.Queue(self, "generator_queue")
        generator_queue.grant_send_messages(spawner)
        spawner.add_environment("SQS_GENERATOR", generator_queue.queue_name)
        spawner.add_environment("DDB_TIMESTAMPS", timestamps.table_name)
        timestamps.grant_read_data(spawner)

        # Connect the generator_queue to the htmlgen-function
        event_source = aws_lambda_event_sources.SqsEventSource(generator_queue,
                                                               batch_size=10)
        htmlgen.add_event_source(event_source)

        # Admin API
        adminhandler = aws.Function(self, "adminhandler")
        adminhandlerApi = aws_apigateway.LambdaRestApi(self,
                                                       "adminapi",
                                                       handler=adminhandler)
        core.CfnOutput(self,
                       "root_url",
                       value=f"Admin URL={adminhandlerApi.url_for_path()}")
        adminhandler.add_environment("DDB_CONFIG", boardconfig.table_name)
        boardconfig.grant_read_write_data(adminhandler)

        # Slack API
        api = aws.RestApi(self, "slack")

        slack = aws.ResourceWithLambda(
            self,
            "bot",
            verb="POST",
            description="Handle incoming Slack-bot interaction",
            parent_resource=api.root,
            lambda_layers=[layer.idlayers["bot"]])
        slack.handler.add_environment(
            "BOT_TOKEN", read_token_from_file('slack_bot_token.txt'))
        slack.handler.add_environment(
            "BOT_VERIFICATION",
            read_token_from_file('slack_verification_token.txt'))
        # "xoxb-1033954193568-1654676166455-Vzom9aQY9NUjAYR5mhKZP70k")
        slack.handler.add_environment("DDB_CONFIG", boardconfig.table_name)
        slack.handler.add_environment("DDB_NAMEMAP", namemap.table_name)
        namemap.grant_read_write_data(slack.handler)
        boardconfig.grant_read_write_data(slack.handler)

        # aws.Rule(
        #     self,
        #     "Test",
        #     description="Remove after functions verified - Fire every minute for some duration in Februaryx",
        #     schedule=aws_events.Schedule.cron(minute="*", hour="*", week_day="2", month="FEB"),
        #     target=spawner)

        aws.Rule(self,
                 "RestOfYear",
                 description="Fire every week jan-novx",
                 schedule=aws_events.Schedule.cron(minute="0",
                                                   hour="4",
                                                   week_day="2",
                                                   month="JAN-NOV"),
                 target=spawner)
        aws.Rule(self,
                 "Mornings_December",
                 description="Every second minute 06-08 (CET) 1-25 decx",
                 schedule=aws_events.Schedule.cron(minute="0/2",
                                                   hour="6-7",
                                                   day="1-25",
                                                   month="DEC"),
                 target=spawner)
        aws.Rule(self,
                 "Daytime_December",
                 description="Every 20 minutes 08-15 (CET) 1-25 decx",
                 schedule=aws_events.Schedule.cron(minute="0/20",
                                                   hour="8-15",
                                                   day="1-25",
                                                   month="DEC"),
                 target=spawner)
        aws.Rule(self,
                 "Nighttime_December",
                 description="Every hour 00-6,14-24 (CET) 1-25 decx",
                 schedule=aws_events.Schedule.cron(minute="0",
                                                   hour="0-6,14-23",
                                                   day="1-25",
                                                   month="DEC"),
                 target=spawner)
        aws.Rule(self,
                 "EndOf_December",
                 description="Every hour 9-23 (CET) 25-31 decx",
                 schedule=aws_events.Schedule.cron(minute="0",
                                                   hour="9-23",
                                                   day="26-31",
                                                   month="DEC"),
                 target=spawner)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # DynamoDB Table
        # Streaming is enabled to send the whole new object down the pipe
        table = dynamo_db.Table(self,
                                "TheDynamoStreamer",
                                partition_key=dynamo_db.Attribute(
                                    name="message",
                                    type=dynamo_db.AttributeType.STRING),
                                stream=dynamo_db.StreamViewType.NEW_IMAGE)

        # defines an AWS  Lambda resource
        subscriber_lambda = _lambda.Function(
            self,
            "DynamoLambdaHandler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="lambda.handler",
            code=_lambda.Code.from_asset("lambda_fns/subscribe"))

        subscriber_lambda.add_event_source(
            _event.DynamoEventSource(
                table=table,
                starting_position=_lambda.StartingPosition.LATEST))

        # API Gateway Creation
        gateway = api_gw.RestApi(
            self,
            'DynamoStreamerAPI',
            deploy_options=api_gw.StageOptions(
                metrics_enabled=True,
                logging_level=api_gw.MethodLoggingLevel.INFO,
                data_trace_enabled=True,
                stage_name='prod'))

        # Give our gateway permissions to interact with dynamodb
        api_gw_dynamo_role = iam.Role(
            self,
            'DefaultLambdaHanderRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'))
        table.grant_read_write_data(api_gw_dynamo_role)

        # shortening the lines of later code
        schema = api_gw.JsonSchema
        schema_type = api_gw.JsonSchemaType

        # Because this isn't a proxy integration, we need to define our response model
        response_model = gateway.add_model(
            'ResponseModel',
            content_type='application/json',
            model_name='ResponseModel',
            schema=schema(
                schema=api_gw.JsonSchemaVersion.DRAFT4,
                title='pollResponse',
                type=schema_type.OBJECT,
                properties={'message': schema(type=schema_type.STRING)}))

        error_response_model = gateway.add_model(
            'ErrorResponseModel',
            content_type='application/json',
            model_name='ErrorResponseModel',
            schema=schema(schema=api_gw.JsonSchemaVersion.DRAFT4,
                          title='errorResponse',
                          type=schema_type.OBJECT,
                          properties={
                              'state': schema(type=schema_type.STRING),
                              'message': schema(type=schema_type.STRING)
                          }))

        # This is the VTL to transform our incoming JSON to a Dynamo Insert Query
        request_template = {
            "TableName": table.table_name,
            "Item": {
                "message": {
                    "S": "$input.path('$.message')"
                }
            }
        }
        request_template_string = json.dumps(request_template,
                                             separators=(',', ':'))

        # This is the VTL to transform the error response
        error_template = {
            "state": 'error',
            "message": "$util.escapeJavaScript($input.path('$.errorMessage'))"
        }
        error_template_string = json.dumps(error_template,
                                           separators=(',', ':'))

        # This is how our gateway chooses what response to send based on selection_pattern
        integration_options = api_gw.IntegrationOptions(
            credentials_role=api_gw_dynamo_role,
            request_templates={"application/json": request_template_string},
            passthrough_behavior=api_gw.PassthroughBehavior.NEVER,
            integration_responses=[
                api_gw.IntegrationResponse(
                    status_code='200',
                    response_templates={
                        "application/json":
                        json.dumps({"message": 'item added to db'})
                    }),
                api_gw.IntegrationResponse(
                    selection_pattern="^\[BadRequest\].*",
                    status_code='400',
                    response_templates={
                        "application/json": error_template_string
                    },
                    response_parameters={
                        'method.response.header.Content-Type':
                        "'application/json'",
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'",
                        'method.response.header.Access-Control-Allow-Credentials':
                        "'true'"
                    })
            ])

        # Add an InsertItem endpoint onto the gateway
        gateway.root.add_resource('InsertItem') \
            .add_method('POST', api_gw.Integration(type=api_gw.IntegrationType.AWS,
                                                   integration_http_method='POST',
                                                   uri='arn:aws:apigateway:us-east-1:dynamodb:action/PutItem',
                                                   options=integration_options
                                                   ),
                        method_responses=[
                            api_gw.MethodResponse(status_code='200',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': response_model
                                                  }),
                            api_gw.MethodResponse(status_code='400',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': error_response_model
                                                  }),
                        ]
                        )
Exemplo n.º 13
0
    def __init__(self, scope: core.Construct, id: str, *, prefix: str, environment: str, configuration, **kwargs):
        """
        :param scope: Stack class, used by CDK.
        :param id: ID of the construct, used by CDK.
        :param prefix: Prefix of the construct, used for naming purposes.
        :param environment: Environment of the construct, used for naming purposes.
        :param configuration: Configuration of the construct. In this case SNS_CONFIG_SCHEMA.
        :param kwargs: Other parameters that could be used by the construct.
        """
        super().__init__(scope, id, **kwargs)
        self.prefix = prefix
        self.environment_ = environment
        self._configuration = configuration

        # Validating that the payload passed is correct
        validate_configuration(configuration_schema=USER_SERVERLESS_BACKEND_SCHEMA, configuration_received=self._configuration)

        # Define Lambda Authorizer Function
        authorizer_functions = self._configuration.get("authorizer_function")
        self._authorizer_function = None
        if authorizer_functions is not None:
            if authorizer_functions.get("imported") is not None:
                self._authorizer_function = lambda_.Function.from_function_arn(
                    self,
                    id=authorizer_functions.get("imported").get("identifier"),
                    function_arn=authorizer_functions.get("imported").get("arn"),
                )
            elif authorizer_functions.get("origin") is not None:
                self._authorizer_function = base_lambda_function(self, **authorizer_functions.get("origin"))

        # Define DynamoDB Tables
        self._dynamodb_tables_lambda_functions = list()
        for table in self._configuration.get("dynamo_tables", []):
            table, stream = base_dynamodb_table(self, **table)
            stream_lambda = None
            if stream is True and table["stream"].get("function") is not None:
                stream_lambda = base_lambda_function(self, **table["stream"]["function"])

                # Add DynamoDB Stream Trigger to Lambda Function
                stream_lambda.add_event_source(
                    source=event_sources.DynamoEventSource(
                        table=table, starting_position=lambda_.StartingPosition.TRIM_HORIZON, batch_size=1
                    )
                )

            self._dynamodb_tables_lambda_functions.append({"table": table, "stream_lambda": stream_lambda})

        # Define S3 Buckets Cluster
        if isinstance(self._configuration.get("buckets"), list):
            self._s3_buckets = [base_bucket(self, **bucket) for bucket in self._configuration["buckets"]]

        # Define Cognito User Pool
        user_pool_config = self._configuration["user_pool"]
        self._user_pool, self._user_pool_client = base_cognito_user_pool(self, **user_pool_config)

        if user_pool_config.get("identity_pool") is not None and self._user_pool_client is not None:
            self._identity_pool = base_cognito_user_identity_pool(
                self,
                user_pool_client_id=self._user_pool_client.user_pool_client_id,
                user_pool_provider_name=self._user_pool.user_pool_provider_name,
                **user_pool_config["identity_pool"],
            )
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 id_checker: str,
                 event_bus: str,
                 stage: Optional[str] = 'prod',
                 **kwargs) -> None:
        super().__init__(scope, id + '-' + stage, **kwargs)

        app_table_name = id + '-applications-table-' + stage
        app_table = ddb.Table(self,
                              id=app_table_name,
                              table_name=app_table_name,
                              partition_key=ddb.Attribute(
                                  name='id', type=ddb.AttributeType.STRING),
                              billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        events_table_name = id + '-events-table-' + stage
        events_table = ddb.Table(self,
                                 id=events_table_name,
                                 table_name=events_table_name,
                                 partition_key=ddb.Attribute(
                                     name='id', type=ddb.AttributeType.STRING),
                                 billing_mode=ddb.BillingMode.PAY_PER_REQUEST,
                                 stream=ddb.StreamViewType.NEW_IMAGE)

        self._table_stream_arn = events_table.table_stream_arn

        # create our Lambda function for the bank account service
        func_name = id + '-' + stage + '-' + 'account-application'
        lambda_assets = lambda_.Code.from_asset('account_application_service')
        handler = lambda_.Function(self,
                                   func_name,
                                   code=lambda_assets,
                                   runtime=lambda_.Runtime.NODEJS_10_X,
                                   handler='main.handler',
                                   environment={
                                       'ACCOUNTS_TABLE_NAME':
                                       app_table.table_name,
                                       'EVENTS_TABLE_NAME':
                                       events_table.table_name,
                                       'REGION': core.Aws.REGION
                                   })

        gw.LambdaRestApi(self, id=stage + '-' + id, handler=handler)

        # grant main Lambda function access to DynamoDB tables
        app_table.grant_read_write_data(handler.role)
        events_table.grant_read_write_data(handler.role)

        p_statement = iam.PolicyStatement(actions=[
            'ssm:Describe*', 'ssm:Get*', 'ssm:List*', 'events:*', 'states:*'
        ],
                                          effect=iam.Effect.ALLOW,
                                          resources=['*'])
        handler.add_to_role_policy(statement=p_statement)

        # create the Lambda function for the event publisher
        evt_publisher = id + '-' + stage + '-' + 'event-publisher'
        evt_handler = lambda_.Function(
            self,
            evt_publisher,
            code=lambda_assets,
            runtime=lambda_.Runtime.NODEJS_10_X,
            handler='event-publisher.handler',
            events=[
                lambda_es.DynamoEventSource(
                    table=events_table,
                    starting_position=lambda_.StartingPosition.LATEST)
            ],
            environment={
                'EVENT_BRIDGE_ARN': event_bus,
                'REGION': core.Aws.REGION
            })

        evt_handler.add_to_role_policy(statement=p_statement)

        # set up StepFunctions
        approve_application = sf.Task(
            self,
            'Approve Application',
            task=sft.InvokeFunction(handler,
                                    payload={
                                        'body': {
                                            'command':
                                            'APPROVE_ACCOUNT_APPLICATION',
                                            'data': {
                                                'id.$': '$.application.id'
                                            }
                                        }
                                    }),
            result_path='$.approveApplication')

        reject_application = sf.Task(self,
                                     'Reject Application',
                                     task=sft.InvokeFunction(
                                         handler,
                                         payload={
                                             'body': {
                                                 'command':
                                                 'REJECT_ACCOUNT_APPLICATION',
                                                 'data': {
                                                     'id.$': '$.application.id'
                                                 }
                                             }
                                         }),
                                     result_path='$.rejectApplication')

        id_checker_handler = lambda_.Function.from_function_arn(
            self, 'IdentityChecker', function_arn=id_checker)
        check_identity = sf.Task(self,
                                 'Check Identity',
                                 task=sft.InvokeFunction(
                                     id_checker_handler,
                                     payload={
                                         'body': {
                                             'command': 'CHECK_IDENTITY',
                                             'data': {
                                                 'application.$':
                                                 '$.application'
                                             }
                                         }
                                     }))

        wait_for_human_review = sf.Task(self, 'Wait for Human Review',
                                        task=sft.RunLambdaTask(handler,
                                                               integration_pattern=sf.ServiceIntegrationPattern.WAIT_FOR_TASK_TOKEN,
                                                               payload={
                                                                   'body': {
                                                                       'command': 'FLAG_ACCOUNT_APPLICATION_FOR_HUMAN_REVIEW',
                                                                       'data': {
                                                                           'id.$': '$.application.id',
                                                                           'taskToken': sf.Context.task_token
                                                                       }
                                                                   }
                                                               }), result_path='$.humanReview') \
            .next(
            sf.Choice(self, 'Human Approval Choice')
            .when(sf.Condition.string_equals('$.humanReview.decision', 'APPROVE'), next=approve_application)
            .when(sf.Condition.string_equals('$.humanReview.decision', 'REJECT'), next=reject_application))

        sm_definition = sf.Parallel(self, 'Perform Automated Checks', result_path='$.checks') \
            .branch(check_identity) \
            .branch(sf.Pass(self, 'Check Fraud Model', result=sf.Result({'flagged': False}))) \
            .next(
            sf.Choice(self, 'Automated Checks Choice')
                .when(sf.Condition.boolean_equals('$.checks[0].flagged', True), next=wait_for_human_review)
                .when(sf.Condition.boolean_equals('$.checks[1].flagged', True), next=wait_for_human_review)
                .otherwise(approve_application))

        state_machine = sf.StateMachine(self,
                                        'OpenAccountStateMachine' + stage,
                                        definition=sm_definition)
        ssm.CfnParameter(self,
                         id='StateMachineArnSSM',
                         type='String',
                         value=state_machine.state_machine_arn,
                         name='StateMachineArnSSM')
Exemplo n.º 15
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        table = dynamo_db.Table(self,
                                "MysfitsQuestionsTable",
                                table_name="MysfitsQuestionsTable",
                                partition_key=dynamo_db.Attribute(
                                    name="QuestionId",
                                    type=dynamo_db.AttributeType.STRING),
                                stream=dynamo_db.StreamViewType.NEW_IMAGE)

        post_question_lambda_function_policy_stm_ddb = _iam.PolicyStatement()
        post_question_lambda_function_policy_stm_ddb.add_actions(
            "dynamodb:PutItem")
        post_question_lambda_function_policy_stm_ddb.add_resources(
            table.table_arn)

        lambda_function_policy_stm_xray = _iam.PolicyStatement()
        lambda_function_policy_stm_xray.add_actions(
            "xray:PutTraceSegments", "xray:PutTelemetryRecords",
            "xray:GetSamplingRules", "xray:GetSamplingTargets",
            "xray:GetSamplingStatisticSummaries")
        lambda_function_policy_stm_xray.add_all_resources()

        # Lambda processor function
        mysfits_post_question = _lambda.Function(
            self,
            'PostQuestionFunction',
            handler="mysfitsPostQuestion.postQuestion",
            runtime=_lambda.Runtime.PYTHON_3_6,
            description=
            'A microservice Lambda function that receives a new question submitted to the MythicalMysfits'
            ' website from a user and inserts it into a DynamoDB database table.',
            memory_size=128,
            code=_lambda.Code.asset('./lambda_questions/PostQuestionsService'),
            timeout=cdk.Duration.seconds(30),
            initial_policy=[
                post_question_lambda_function_policy_stm_ddb,
                lambda_function_policy_stm_xray
            ],
            tracing=_lambda.Tracing.ACTIVE)

        topic = sns.Topic(self,
                          'Topic',
                          display_name='MythicalMysfitsQuestionsTopic',
                          topic_name='MythicalMysfitsQuestionsTopic')
        topic.add_subscription(subs.EmailSubscription(receiver_email))

        post_question_lambda_function_policy_stm_sns = _iam.PolicyStatement()
        post_question_lambda_function_policy_stm_sns.add_actions("sns:Publish")
        post_question_lambda_function_policy_stm_sns.add_resources(
            topic.topic_arn)

        mysfits_process_questions_stream = _lambda.Function(
            self,
            'ProcessQuestionStreamFunction',
            handler="mysfitsProcessStream.processStream",
            runtime=_lambda.Runtime.PYTHON_3_6,
            description=
            'An AWS Lambda function that will process all new questions posted to mythical mysfits'
            ' and notify the site administrator of the question that was asked.',
            memory_size=128,
            code=_lambda.Code.asset(
                './lambda_questions/ProcessQuestionsStream'),
            timeout=cdk.Duration.seconds(30),
            initial_policy=[
                post_question_lambda_function_policy_stm_sns,
                lambda_function_policy_stm_xray
            ],
            environment={'SNS_TOPIC_ARN': topic.topic_arn},
            tracing=_lambda.Tracing.ACTIVE,
            events=[
                event.DynamoEventSource(
                    table,
                    starting_position=_lambda.StartingPosition.TRIM_HORIZON,
                    batch_size=1)
            ])

        questions_api_role = _iam.Role(
            self,
            'QuestionsApiRole',
            assumed_by=_iam.ServicePrincipal('apigateway.amazonaws.com'))
        api_policy = _iam.PolicyStatement()
        api_policy.add_actions("lambda:InvokeFunction")
        api_policy.add_resources(mysfits_post_question.function_arn)
        api_policy.effect = _iam.Effect.ALLOW
        # Associate policy to role
        _iam.Policy(self,
                    "QuestionsApiPolicy",
                    policy_name="questions_api_policy",
                    statements=[api_policy],
                    roles=[questions_api_role])

        # Create API gateway
        questions_integration = apigw.LambdaIntegration(
            mysfits_post_question,
            credentials_role=questions_api_role,
            integration_responses=[
                apigw.IntegrationResponse(
                    status_code='200',
                    response_templates={"application/json": '{"status":"OK"}'})
            ],
        )

        api = apigw.LambdaRestApi(
            self,
            'APIEndpoint',
            handler=mysfits_post_question,
            options=apigw.LambdaRestApiProps(
                rest_api_name='QuestionsAPI',
                deploy_options=apigw.StageOptions(tracing_enabled=True),
                handler=mysfits_post_question),
            proxy=False)

        questions_method = api.root.add_resource('questions')
        questions_method.add_method(
            'POST',
            questions_integration,
            method_responses=[apigw.MethodResponse(status_code='200')],
            authorization_type=apigw.AuthorizationType.NONE)

        questions_method.add_method(
            'OPTIONS',
            integration=apigw.MockIntegration(integration_responses=[
                apigw.IntegrationResponse(
                    status_code='200',
                    response_parameters={
                        'method.response.header.Access-Control-Allow-Headers':
                        "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'",
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'",
                        'method.response.header.Access-Control-Allow-Credentials':
                        "'false'",
                        'method.response.header.Access-Control-Allow-Methods':
                        "'OPTIONS,GET,PUT,POST,DELETE'"
                    })
            ],
                                              passthrough_behavior=apigw.
                                              PassthroughBehavior.NEVER,
                                              request_templates={
                                                  "application/json":
                                                  '{"statusCode": 200}'
                                              }),
            method_responses=[
                apigw.MethodResponse(
                    status_code='200',
                    response_parameters={
                        "method.response.header.Access-Control-Allow-Headers":
                        True,
                        "method.response.header.Access-Control-Allow-Methods":
                        True,
                        "method.response.header.Access-Control-Allow-Credentials":
                        True,
                        "method.response.header.Access-Control-Allow-Origin":
                        True
                    })
            ])
Exemplo n.º 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        lambda_repository = aws_codecommit.Repository(
            self,
            "QuestionsLambdaRepository",
            repository_name="MythicalMysfits-QuestionsLambdaRepository",
        )

        core.CfnOutput(
            self,
            "questionsRepositoryCloneUrlHTTP",
            value=lambda_repository.repository_clone_url_http,
            description="Questions Lambda Repository Clone URL HTTP",
        )
        core.CfnOutput(
            self,
            "questionsRepositoryCloneUrlSSH",
            value=lambda_repository.repository_clone_url_ssh,
            description="Questions Lambda Repository Clone URL SSH",
        )

        table = aws_dynamodb.Table(
            self,
            "Table",
            table_name="MysfitsQuestionsTable",
            partition_key=aws_dynamodb.Attribute(
                name="QuestionId", type=aws_dynamodb.AttributeType.STRING),
            stream=aws_dynamodb.StreamViewType.NEW_IMAGE,
        )

        lambda_function_policy_statement_ddb = aws_iam.PolicyStatement()
        lambda_function_policy_statement_ddb.add_actions("dynamodb:PutItem")
        lambda_function_policy_statement_ddb.add_resources(table.table_arn)

        lambda_function_policy_statement_xray = aws_iam.PolicyStatement()
        lambda_function_policy_statement_xray.add_actions(
            "xray:PutTraceSegments",
            "xray:PutTelemetryRecords",
            "xray:GetSamplingRules",
            "xray:GetSamplingTargets",
            "xray:GetSamplingStatisticSummaries",
        )
        lambda_function_policy_statement_xray.add_all_resources()

        mysfits_post_question = aws_lambda.Function(
            self,
            "PostQuestionFunction",
            handler="mysfitsPostQuestion.postQuestion",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            description=
            "A microservice Lambda function that receives a new question submitted to the MythicalMysfits website from a user and inserts it into a DynamoDB database table.",
            memory_size=128,
            code=aws_lambda.Code.asset(
                os.path.join("..", "..", "lambda-questions",
                             "PostQuestionsService")),
            timeout=core.Duration.seconds(30),
            initial_policy=[
                lambda_function_policy_statement_ddb,
                lambda_function_policy_statement_xray,
            ],
            tracing=aws_lambda.Tracing.ACTIVE,
        )

        topic = aws_sns.Topic(
            self,
            "Topic",
            display_name="MythicalMysfitsQuestionsTopic",
            topic_name="MythicalMysfitsQuestionsTopic",
        )
        topic.add_subscription(subs.EmailSubscription(os.environ["SNS_EMAIL"]))

        post_question_lamdaa_function_policy_statement_sns = aws_iam.PolicyStatement(
        )
        post_question_lamdaa_function_policy_statement_sns.add_actions(
            "sns:Publish")
        post_question_lamdaa_function_policy_statement_sns.add_resources(
            topic.topic_arn)

        mysfits_process_question_stream = aws_lambda.Function(
            self,
            "ProcessQuestionStreamFunction",
            handler="mysfitsProcessStream.processStream",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            description=
            "An AWS Lambda function that will process all new questions posted to mythical mysfits and notify the site administrator of the question that was asked.",
            memory_size=128,
            code=aws_lambda.Code.asset(
                os.path.join("..", "..", "lambda-questions",
                             "ProcessQuestionsStream")),
            timeout=core.Duration.seconds(30),
            initial_policy=[
                post_question_lamdaa_function_policy_statement_sns,
                lambda_function_policy_statement_xray,
            ],
            tracing=aws_lambda.Tracing.ACTIVE,
            environment={"SNS_TOPIC_ARN": topic.topic_arn},
            events=[
                event.DynamoEventSource(
                    table,
                    starting_position=aws_lambda.StartingPosition.TRIM_HORIZON,
                    batch_size=1,
                )
            ],
        )

        questions_api_role = aws_iam.Role(
            self,
            "QuestionsApiRole",
            assumed_by=aws_iam.ServicePrincipal("apigateway.amazonaws.com"),
        )

        api_policy = aws_iam.PolicyStatement()
        api_policy.add_actions("lambda:InvokeFunction")
        api_policy.add_resources(mysfits_post_question.function_arn)
        aws_iam.Policy(
            self,
            "QuestionsApiPolicy",
            policy_name="questions_api_policy",
            statements=[api_policy],
            roles=[questions_api_role],
        )

        questions_integration = aws_apigateway.LambdaIntegration(
            mysfits_post_question,
            credentials_role=questions_api_role,
            integration_responses=[
                aws_apigateway.IntegrationResponse(
                    status_code="200",
                    response_templates={
                        "application/json": '{"status": "OK"}'
                    },
                )
            ],
        )

        api = aws_apigateway.LambdaRestApi(
            self,
            "APIEndpoint",
            handler=mysfits_post_question,
            options=aws_apigateway.RestApiProps(
                rest_api_name="Questions API Server"),
            proxy=False,
        )

        questions_method = api.root.add_resource("questions")
        questions_method.add_method(
            "POST",
            questions_integration,
            method_responses=[
                aws_apigateway.MethodResponse(status_code="200")
            ],
            authorization_type=aws_apigateway.AuthorizationType.NONE,
        )

        questions_method.add_method(
            "OPTIONS",
            aws_apigateway.MockIntegration(
                integration_responses=[
                    aws_apigateway.IntegrationResponse(
                        status_code="200",
                        response_parameters={
                            "method.response.header.Access-Control-Allow-Headers":
                            "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Amz-User-Agent'",
                            "method.response.header.Access-Control-Allow-Origin":
                            "'*'",
                            "method.response.header.Access-Control-Allow-Credentials":
                            "'false'",
                            "method.response.header.Access-Control-Allow-Methods":
                            "'OPTIONS,GET,PUT,POST,DELETE'",
                        },
                    )
                ],
                passthrough_behavior=aws_apigateway.PassthroughBehavior.NEVER,
                request_templates={"application/json": '{"statusCode": 200}'},
            ),
            method_responses=[
                aws_apigateway.MethodResponse(
                    status_code="200",
                    response_parameters={
                        "method.response.header.Access-Control-Allow-Headers":
                        True,
                        "method.response.header.Access-Control-Allow-Methods":
                        True,
                        "method.response.header.Access-Control-Allow-Credentials":
                        True,
                        "method.response.header.Access-Control-Allow-Origin":
                        True,
                    },
                )
            ],
        )
Exemplo n.º 17
0
    def __init__(self, scope: core.Construct, id_: str,
                 imported_assets_config: ImportedAssetsConfiguration,
                 mongodb_config: MongoDBConfiguration,
                 access_keys_config: AccessKeysConfiguration, **kwargs):

        super().__init__(scope, id_, **kwargs)

        # LAMBDAS DEFINITIONS

        lambda_dispatch_stream = lambda_.Function(
            self,
            'DispatchStream',
            code=lambda_.AssetCode(
                'stack/lambda/dispatch_stream/1.0.0/python/dispatch_stream'),
            timeout=core.Duration.seconds(10),
            description='',
            function_name='DispatchStream',
            reserved_concurrent_executions=10,
            handler=DEFAULT_LAMBDA_HANDLER,
            runtime=DEFAULT_LAMBDA_RUNTIME,
            log_retention=DEFAULT_LAMBDA_LOG_RETENTION,
            memory_size=128,
            retry_attempts=0,
            dead_letter_queue_enabled=False)

        lambda_geocode_property = lambda_.Function(
            self,
            'GeocodeProperty',
            code=lambda_.AssetCode(
                'stack/lambda/geocode_property/1.1.3/python/geocode_property'),
            timeout=core.Duration.seconds(15),
            description='',
            function_name='GeocodeProperty',
            reserved_concurrent_executions=10,
            handler=DEFAULT_LAMBDA_HANDLER,
            runtime=DEFAULT_LAMBDA_RUNTIME,
            log_retention=DEFAULT_LAMBDA_LOG_RETENTION,
            memory_size=128,
            retry_attempts=0,
            dead_letter_queue_enabled=True)

        lambda_fetch_properties = lambda_.Function(
            self,
            'FetchProperties',
            code=lambda_.AssetCode(
                'stack/lambda/fetch_properties/1.4.0/python/fetch_properties'),
            timeout=core.Duration.seconds(10),
            description='',
            function_name='FetchProperties',
            reserved_concurrent_executions=10,
            handler=DEFAULT_LAMBDA_HANDLER,
            runtime=DEFAULT_LAMBDA_RUNTIME,
            log_retention=DEFAULT_LAMBDA_LOG_RETENTION,
            memory_size=128,
            retry_attempts=0,
            dead_letter_queue_enabled=True)

        # LAYERS DEFINITIONS

        layer_dispatch_stream = lambda_.LayerVersion(
            self,
            'DispatchStreamLibs',
            code=lambda_.Code.from_asset(
                'stack/lambda/dispatch_stream/1.0.0/'),
            description='',
            layer_version_name='DispatchStreamLibs',
            compatible_runtimes=[DEFAULT_LAMBDA_RUNTIME])

        layer_geocode_property = lambda_.LayerVersion(
            self,
            'GeocodePropertyLibs',
            code=lambda_.Code.from_asset(
                'stack/lambda/geocode_property/1.1.3/'),
            description='',
            layer_version_name='GeocodePropertyLibs',
            compatible_runtimes=[DEFAULT_LAMBDA_RUNTIME])

        layer_fetch_properties = lambda_.LayerVersion(
            self,
            'FetchPropertiesLibs',
            code=lambda_.Code.from_asset(
                'stack/lambda/fetch_properties/1.4.0/'),
            description='',
            layer_version_name='FetchPropertiesLibs',
            compatible_runtimes=[DEFAULT_LAMBDA_RUNTIME])

        # CLOUDWATCH RULES DEFINITIONS
        # -

        # SQS QUEUES DEFINITIONS
        # -

        # SNS TOPICS DEFINITIONS

        topic_new_properties = sns.Topic(self,
                                         'NewProperties',
                                         display_name='',
                                         topic_name='NewProperties')

        # API GATEWAYS
        api_gateway_graphql = api_gateway.LambdaRestApi(
            self,
            'GraphQLApi',
            handler=lambda_fetch_properties,
            rest_api_name='GraphQLApi',
            description='GraphQL API',
            cloud_watch_role=True)
        api_gateway_graphql_resource = api_gateway_graphql.root.add_resource(
            'graphql')
        api_gateway_graphql_resource.add_method('GET', api_key_required=False)
        api_gateway_graphql.add_usage_plan(
            'GraphQLUsagePlan',
            name='GraphQLUsagePlan',
            throttle=api_gateway.ThrottleSettings(rate_limit=1, burst_limit=1))

        # DYNAMODB PERMISSIONS
        lambda_dispatch_stream.add_event_source(
            event_sources.DynamoEventSource(
                table=imported_assets_config.table_property,
                starting_position=lambda_.StartingPosition.LATEST,
                batch_size=10,
                max_batching_window=core.Duration.seconds(30),
                parallelization_factor=10,
                retry_attempts=0))

        # CLOUDWATCH SCHEDULING RULES
        # -

        # SQS PERMISSIONS
        # -

        # SNS PERMISSIONS

        topic_new_properties.grant_publish(lambda_dispatch_stream)
        topic_new_properties.add_subscription(
            sns_subscriptions.LambdaSubscription(lambda_geocode_property))

        # LAYERS ASSIGNMENTS

        lambda_dispatch_stream.add_layers(layer_dispatch_stream)
        lambda_geocode_property.add_layers(layer_geocode_property)
        lambda_fetch_properties.add_layers(layer_fetch_properties)

        # ENVIRONMENT VARIABLES

        lambda_geocode_property.add_environment(key='MONGODB_URI',
                                                value=mongodb_config.uri)
        lambda_geocode_property.add_environment(key='MONGODB_DATABASE',
                                                value=mongodb_config.database)
        lambda_geocode_property.add_environment(
            key='MONGODB_COLLECTION', value=mongodb_config.collection)
        lambda_geocode_property.add_environment(
            key='API_ACCESS_TOKEN_GEOCODING',
            value=access_keys_config.geocoding)
        lambda_fetch_properties.add_environment(key='MONGODB_URI',
                                                value=mongodb_config.uri)
        lambda_fetch_properties.add_environment(key='MONGODB_DATABASE',
                                                value=mongodb_config.database)
        lambda_fetch_properties.add_environment(
            key='MONGODB_COLLECTION', value=mongodb_config.collection)
        lambda_fetch_properties.add_environment(
            key='MONGODB_MAX_PAGE_SIZE', value=mongodb_config.max_page_size)