def __init__(self, scope: cdk.Construct, construct_id: str, stage: str,
                 explain_bot_lambda: _lambda.Function,
                 add_meaning_lambda: _lambda.Function, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Define API Gateway and HTTP API
        explain_bot_api = _apigw2.HttpApi(self, "ExplainSlackBotApi" + stage)

        self.url_output = cdk.CfnOutput(self, "Url", value=explain_bot_api.url)

        # Set up proxy integrations
        explain_bot_entity_lambda_integration = _a2int.LambdaProxyIntegration(
            handler=explain_bot_lambda, )

        # explain_bot_entity
        explain_bot_api.add_routes(
            path="/",
            methods=[_apigw2.HttpMethod.POST],
            integration=explain_bot_entity_lambda_integration,
        )

        add_meaning_lambda_integration = _a2int.LambdaProxyIntegration(
            handler=add_meaning_lambda, )

        # add_meaning_entity
        explain_bot_api.add_routes(
            path="/add_meaning",
            methods=[_apigw2.HttpMethod.ANY],
            integration=add_meaning_lambda_integration,
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # DynamoDB Table
        table = dynamo_db.Table(self,
                                "Hits",
                                partition_key=dynamo_db.Attribute(
                                    name="path",
                                    type=dynamo_db.AttributeType.STRING))

        # defines an AWS  Lambda resource
        dynamo_lambda = _lambda.Function(
            self,
            "DynamoLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,  # execution environment
            handler="lambda.handler",  # file is "lambda", function is "handler"
            code=_lambda.Code.from_asset(
                "lambda_fns"),  # Code loaded from the lambda_fns dir
            environment={'HITS_TABLE_NAME': table.table_name})

        # grant the lambda role read/write permissions to our table'
        table.grant_read_write_data(dynamo_lambda)

        # defines an API Gateway Http API resource backed by our "dynamoLambda" function.
        api = api_gw.HttpApi(
            self,
            'Endpoint',
            default_integration=integrations.LambdaProxyIntegration(
                handler=dynamo_lambda))

        core.CfnOutput(self, 'HTTP API Url', value=api.url)
Ejemplo n.º 3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Lambda Function that takes in text and returns a polly voice synthesis
        polly_lambda = _lambda.Function(
            self,
            'pollyHandler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_fns'),
            handler='polly.handler')

        # https://docs.aws.amazon.com/polly/latest/dg/api-permissions-reference.html
        # https://docs.aws.amazon.com/translate/latest/dg/translate-api-permissions-ref.html
        polly_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=['*'],
            actions=['translate:TranslateText', 'polly:SynthesizeSpeech'])
        polly_lambda.add_to_role_policy(polly_policy)

        # defines an API Gateway Http API resource backed by our "efs_lambda" function.
        api = api_gw.HttpApi(
            self,
            'Polly',
            default_integration=integrations.LambdaProxyIntegration(
                handler=polly_lambda))

        core.CfnOutput(self, 'HTTP API Url', value=api.url)
Ejemplo n.º 4
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        memory: int = 1024,
        timeout: int = 30,
        runtime: aws_lambda.Runtime = aws_lambda.Runtime.PYTHON_3_8,
        concurrent: Optional[int] = None,
        permissions: Optional[List[iam.PolicyStatement]] = None,
        env: dict = {},
        code_dir: str = "./",
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, *kwargs)

        permissions = permissions or []

        lambda_env = {**DEFAULT_ENV, **env}
        lambda_env.update(
            dict(
                MOSAIC_BACKEND=settings.mosaic_backend,
                MOSAIC_HOST=settings.mosaic_host,
            ))

        lambda_function = aws_lambda.Function(
            self,
            f"{id}-lambda",
            runtime=runtime,
            code=aws_lambda.Code.from_asset(
                path=os.path.abspath(code_dir),
                bundling=core.BundlingOptions(
                    image=core.BundlingDockerImage.from_asset(
                        os.path.abspath(code_dir),
                        file="Dockerfile",
                    ),
                    command=[
                        "bash", "-c", "cp -R /var/task/. /asset-output/."
                    ],
                ),
            ),
            handler="titiler_pds.handler.handler",
            memory_size=memory,
            reserved_concurrent_executions=concurrent,
            timeout=core.Duration.seconds(timeout),
            environment=lambda_env,
        )

        for perm in permissions:
            lambda_function.add_to_role_policy(perm)

        api = apigw.HttpApi(
            self,
            f"{id}-endpoint",
            default_integration=apigw_integrations.LambdaProxyIntegration(
                handler=lambda_function),
        )
        core.CfnOutput(self, "Endpoint", value=api.url)
Ejemplo n.º 5
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        memory: int = 1024,
        timeout: int = 30,
        runtime: aws_lambda.Runtime = aws_lambda.Runtime.PYTHON_3_8,
        concurrent: Optional[int] = None,
        permissions: Optional[List[iam.PolicyStatement]] = None,
        layer_arn: Optional[str] = None,
        env: dict = {},
        code_dir: str = "./",
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, *kwargs)

        permissions = permissions or []

        lambda_env = DEFAULT_ENV.copy()
        lambda_env.update(env)

        lambda_function = aws_lambda.Function(
            self,
            f"{id}-lambda",
            runtime=runtime,
            code=self.create_package(code_dir),
            handler="handler.handler",
            memory_size=memory,
            reserved_concurrent_executions=concurrent,
            timeout=core.Duration.seconds(timeout),
            environment=lambda_env,
        )

        for perm in permissions:
            lambda_function.add_to_role_policy(perm)

        if layer_arn:
            lambda_function.add_layers(
                aws_lambda.LayerVersion.from_layer_version_arn(
                    self,
                    layer_arn.split(":")[-2], layer_arn))

        api = apigw.HttpApi(
            self,
            f"{id}-endpoint",
            default_integration=apigw_integrations.LambdaProxyIntegration(
                handler=lambda_function),
        )
        core.CfnOutput(self, "Endpoint", value=api.url)
Ejemplo n.º 6
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # defines an AWS  Lambda resource
        model_folder = os.path.dirname(os.path.realpath(__file__)) + "/../model"
        predictive_lambda = _lambda.DockerImageFunction(self, 'PredictiveLambda',
                                                        code=_lambda.DockerImageCode.from_image_asset(model_folder),
                                                        memory_size=4096,
                                                        timeout=core.Duration.seconds(15))
        # defines an API Gateway Http API resource backed by our "PredictiveLambda" function.
        api = api_gw.HttpApi(self, 'PredictiveLambdaEndpoint',
                             default_integration=integrations.LambdaProxyIntegration(handler=predictive_lambda));

        core.CfnOutput(self, 'HTTP API Url', value=api.url);
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # DynamoDB Table
        table = dynamo_db.Table(self,
                                "CircuitBreakerTable",
                                partition_key=dynamo_db.Attribute(
                                    name="id",
                                    type=dynamo_db.AttributeType.STRING),
                                removal_policy=core.RemovalPolicy.DESTROY)

        # install node dependencies for lambdas
        lambda_folder = os.path.dirname(
            os.path.realpath(__file__)) + "/../lambda_fns"
        subprocess.check_call("npm i".split(),
                              cwd=lambda_folder,
                              stdout=subprocess.DEVNULL)
        subprocess.check_call("npm run build".split(),
                              cwd=lambda_folder,
                              stdout=subprocess.DEVNULL)

        # defines an AWS Lambda resource with unreliable code
        unreliable_lambda = _lambda.Function(
            self,
            "UnreliableLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="unreliable.handler",
            code=_lambda.Code.from_asset("lambda_fns"),
            # Code loaded from the lambda_fns dir
            environment={'CIRCUITBREAKER_TABLE': table.table_name})

        # grant the lambda role read/write permissions to our table'
        table.grant_read_write_data(unreliable_lambda)

        # defines an API Gateway Http API resource backed by our "dynamoLambda" function.
        api = api_gw.HttpApi(
            self,
            'CircuitBreakerGateway',
            default_integration=integrations.LambdaProxyIntegration(
                handler=unreliable_lambda))

        core.CfnOutput(self, 'HTTP API Url', value=api.url)
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        my_api_function = _lambda.DockerImageFunction(
            self,
            'MyApiFunction',
            code=_lambda.DockerImageCode.from_image_asset('../src'),
            timeout=core.Duration.seconds(30),
            memory_size=2048)

        my_default_integration = apigv2int.LambdaProxyIntegration(
            handler=my_api_function)

        my_http_api = apigv2.HttpApi(
            self, 'MyApi', default_integration=my_default_integration)

        core.CfnOutput(self, 'MyApiUrl', value=my_http_api.api_endpoint)

        core.CfnOutput(self,
                       'MyApiFnLogGroup',
                       value=my_api_function.log_group.log_group_name)
Ejemplo n.º 9
0
    def __init__(self, scope: core.Construct, id: str, *, docker_root: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        base_lambda = _lambda.DockerImageFunction(
            self,
            "FastAPIImageLambda",
            code=_lambda.DockerImageCode.from_image_asset(docker_root),
        )

        base_api = _apigw.HttpApi(
            self,
            "FastAPIProxyGateway",
            api_name="FastAPIProxyGateway",
            default_integration=_apigw_integration.LambdaProxyIntegration(
                handler=base_lambda),
        )

        core.CfnOutput(self,
                       "EndpointUrl",
                       value=base_api.api_endpoint,
                       export_name="fastApiUrl")
Ejemplo n.º 10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # EFS needs to be setup in a VPC
        vpc = ec2.Vpc(self, 'Vpc', max_azs=2)

        # Create a file system in EFS to store information
        fs = efs.FileSystem(self,
                            'FileSystem',
                            vpc=vpc,
                            removal_policy=core.RemovalPolicy.DESTROY)

        access_point = fs.add_access_point(
            'AccessPoint',
            create_acl=efs.Acl(owner_gid='1001',
                               owner_uid='1001',
                               permissions='750'),
            path="/export/lambda",
            posix_user=efs.PosixUser(gid="1001", uid="1001"))

        efs_lambda = _lambda.Function(
            self,
            'rdsProxyHandler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset('lambda_fns'),
            handler='message_wall.lambda_handler',
            vpc=vpc,
            filesystem=_lambda.FileSystem.from_efs_access_point(
                access_point, '/mnt/msg'))

        # defines an API Gateway Http API resource backed by our "efs_lambda" function.
        api = api_gw.HttpApi(
            self,
            'EFS Lambda',
            default_integration=integrations.LambdaProxyIntegration(
                handler=efs_lambda))

        core.CfnOutput(self, 'HTTP API Url', value=api.url)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # -----------------------------------------------------------------------------------------------------------
        # The Simple Webservice Logic - This is what we will be monitoring
        #
        # API GW HTTP API, Lambda Fn and DynamoDB
        # https://github.com/cdk-patterns/serverless/tree/master/the-simple-webservice
        # -----------------------------------------------------------------------------------------------------------

        # DynamoDB Table
        table = dynamo_db.Table(
            self,
            "Hits",
            partition_key=dynamo_db.Attribute(
                name="path", type=dynamo_db.AttributeType.STRING),
            billing_mode=dynamo_db.BillingMode.PAY_PER_REQUEST)

        # defines an AWS  Lambda resource
        dynamo_lambda = _lambda.Function(
            self,
            "DynamoLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,  # execution environment
            handler="lambda.handler",  # file is "lambda", function is "handler"
            code=_lambda.Code.from_asset(
                "lambda_fns"),  # Code loaded from the lambda dir
            environment={'HITS_TABLE_NAME': table.table_name})

        # grant the lambda role read/write permissions to our table'
        table.grant_read_write_data(dynamo_lambda)

        # defines an API Gateway Http API resource backed by our "dynamoLambda" function.
        api = api_gw.HttpApi(
            self,
            'HttpAPI',
            default_integration=integrations.LambdaProxyIntegration(
                handler=dynamo_lambda))

        core.CfnOutput(self, 'HTTP API Url', value=api.url)

        # -----------------------------------------------------------------------------------------------------------
        # Monitoring Logic Starts Here
        #
        # This is everything we need to understand the state of our system:
        # - custom metrics
        # - cloudwatch alarms
        # - custom cloudwatch dashboard
        # -----------------------------------------------------------------------------------------------------------

        # SNS Topic so we can hook things into our alerts e.g. email
        error_topic = sns.Topic(self, 'theBigFanTopic')

        ###
        # Custom Metrics
        ###

        api_gw_4xx_error_percentage = cloud_watch.MathExpression(
            expression="m1/m2*100",
            label="% API Gateway 4xx Errors",
            using_metrics={
                "m1":
                self.metric_for_api_gw(api.http_api_id, '4XXError',
                                       '4XX Errors', 'sum'),
                "m2":
                self.metric_for_api_gw(api.http_api_id, 'Count', '# Requests',
                                       'sum'),
            },
            period=core.Duration.minutes(5))

        # Gather the % of lambda invocations that error in past 5 mins
        lambda_error_perc = cloud_watch.MathExpression(
            expression="e / i * 100",
            label="% of invocations that errored, last 5 mins",
            using_metrics={
                "i":
                dynamo_lambda.metric(metric_name="Invocations",
                                     statistic="sum"),
                "e":
                dynamo_lambda.metric(metric_name="Errors", statistic="sum"),
            },
            period=core.Duration.minutes(5))

        # note: throttled requests are not counted in total num of invocations
        lambda_throttled_perc = cloud_watch.MathExpression(
            expression="t / (i + t) * 100",
            label="% of throttled requests, last 30 mins",
            using_metrics={
                "i":
                dynamo_lambda.metric(metric_name="Invocations",
                                     statistic="sum"),
                "t":
                dynamo_lambda.metric(metric_name="Throttles", statistic="sum"),
            },
            period=core.Duration.minutes(5))

        # I think usererrors are at an account level rather than a table level so merging
        # these two metrics until I can get a definitive answer. I think usererrors
        # will always show as 0 when scoped to a table so this is still effectively
        # a system errors count
        dynamo_db_total_errors = cloud_watch.MathExpression(
            expression="m1 + m2",
            label="DynamoDB Errors",
            using_metrics={
                "m1": table.metric_user_errors(),
                "m2": table.metric_system_errors_for_operations(),
            },
            period=core.Duration.minutes(5))

        # Rather than have 2 alerts, let's create one aggregate metric
        dynamo_db_throttles = cloud_watch.MathExpression(
            expression="m1 + m2",
            label="DynamoDB Throttles",
            using_metrics={
                "m1":
                table.metric(metric_name="ReadThrottleEvents",
                             statistic="sum"),
                "m2":
                table.metric(metric_name="WriteThrottleEvents",
                             statistic="sum"),
            },
            period=core.Duration.minutes(5))
        ###
        # Alarms
        ###

        # Api Gateway

        # 4xx are user errors so a large volume indicates a problem
        cloud_watch.Alarm(self,
                          id="API Gateway 4XX Errors > 1%",
                          metric=api_gw_4xx_error_percentage,
                          threshold=1,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # 5xx are internal server errors so we want 0 of these
        cloud_watch.Alarm(self,
                          id="API Gateway 5XX Errors > 0",
                          metric=self.metric_for_api_gw(api_id=api.http_api_id,
                                                        metric_name="5XXError",
                                                        label="5XX Errors",
                                                        stat="p99"),
                          threshold=0,
                          period=core.Duration.minutes(5),
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        cloud_watch.Alarm(self,
                          id="API p99 latency alarm >= 1s",
                          metric=self.metric_for_api_gw(api_id=api.http_api_id,
                                                        metric_name="Latency",
                                                        label="API GW Latency",
                                                        stat="p99"),
                          threshold=1000,
                          period=core.Duration.minutes(5),
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # Lambda

        # 2% of Dynamo Lambda invocations erroring
        cloud_watch.Alarm(self,
                          id="Dynamo Lambda 2% Error",
                          metric=lambda_error_perc,
                          threshold=2,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # 1% of Lambda invocations taking longer than 1 second
        cloud_watch.Alarm(self,
                          id="Dynamo Lambda p99 Long Duration (>1s)",
                          metric=dynamo_lambda.metric_duration(),
                          period=core.Duration.minutes(5),
                          threshold=1000,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          statistic="p99",
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # 2% of our lambda invocations are throttled
        cloud_watch.Alarm(self,
                          id="Dynamo Lambda 2% Throttled",
                          metric=lambda_throttled_perc,
                          threshold=2,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # DynamoDB

        # DynamoDB Interactions are throttled - indicated poorly provisioned
        cloud_watch.Alarm(self,
                          id="DynamoDB Table Reads/Writes Throttled",
                          metric=dynamo_db_throttles,
                          threshold=1,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # There should be 0 DynamoDB errors
        cloud_watch.Alarm(self,
                          id="DynamoDB Errors > 0",
                          metric=dynamo_db_total_errors,
                          threshold=0,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        dashboard = cloud_watch.Dashboard(self, id="CloudWatchDashBoard")
        dashboard.add_widgets(
            cloud_watch.GraphWidget(title="Requests",
                                    width=8,
                                    left=[
                                        self.metric_for_api_gw(
                                            api_id=api.http_api_id,
                                            metric_name="Count",
                                            label="# Requests",
                                            stat="sum")
                                    ]),
            cloud_watch.GraphWidget(
                title="API GW Latency",
                width=8,
                stacked=True,
                left=[
                    self.metric_for_api_gw(api_id=api.http_api_id,
                                           metric_name="Latency",
                                           label="API Latency p50",
                                           stat="p50"),
                    self.metric_for_api_gw(api_id=api.http_api_id,
                                           metric_name="Latency",
                                           label="API Latency p90",
                                           stat="p90"),
                    self.metric_for_api_gw(api_id=api.http_api_id,
                                           metric_name="Latency",
                                           label="API Latency p99",
                                           stat="p99")
                ]),
            cloud_watch.GraphWidget(
                title="API GW Errors",
                width=8,
                stacked=True,
                left=[
                    self.metric_for_api_gw(api_id=api.http_api_id,
                                           metric_name="4XXError",
                                           label="4XX Errors",
                                           stat="sum"),
                    self.metric_for_api_gw(api_id=api.http_api_id,
                                           metric_name="5XXError",
                                           label="5XX Errors",
                                           stat="sum")
                ]),
            cloud_watch.GraphWidget(title="Dynamo Lambda Error %",
                                    width=8,
                                    left=[lambda_error_perc]),
            cloud_watch.GraphWidget(
                title="Dynamo Lambda Duration",
                width=8,
                stacked=True,
                left=[
                    dynamo_lambda.metric_duration(statistic="p50"),
                    dynamo_lambda.metric_duration(statistic="p90"),
                    dynamo_lambda.metric_duration(statistic="p99")
                ]),
            cloud_watch.GraphWidget(title="Dynamo Lambda Throttle %",
                                    width=8,
                                    left=[lambda_throttled_perc]),
            cloud_watch.GraphWidget(
                title="DynamoDB Latency",
                width=8,
                stacked=True,
                left=[
                    table.metric_successful_request_latency(
                        dimensions={
                            "TableName": table.table_name,
                            "Operation": "GetItem"
                        }),
                    table.metric_successful_request_latency(
                        dimensions={
                            "TableName": table.table_name,
                            "Operation": "UpdateItem"
                        }),
                    table.metric_successful_request_latency(
                        dimensions={
                            "TableName": table.table_name,
                            "Operation": "PutItem"
                        }),
                    table.metric_successful_request_latency(
                        dimensions={
                            "TableName": table.table_name,
                            "Operation": "DeleteItem"
                        }),
                    table.metric_successful_request_latency(
                        dimensions={
                            "TableName": table.table_name,
                            "Operation": "Query"
                        }),
                ]),
            cloud_watch.GraphWidget(
                title="DynamoDB Consumed Read/Write Units",
                width=8,
                stacked=False,
                left=[
                    table.metric(metric_name="ConsumedReadCapacityUnits"),
                    table.metric(metric_name="ConsumedWriteCapacityUnits")
                ]),
            cloud_watch.GraphWidget(
                title="DynamoDB Throttles",
                width=8,
                stacked=True,
                left=[
                    table.metric(metric_name="ReadThrottleEvents",
                                 statistic="sum"),
                    table.metric(metric_name="WriteThrottleEvents",
                                 statistic="sum")
                ]),
        )
Ejemplo n.º 12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        account = self.account

        print("")
        print(f"   Service: {service_name}")
        print(f"   Region:  {region}")
        print(f"   Stage:   {stage}")
        print(f"   Account: {account}")
        print(f"   Stack:   {stack_name}")
        print("")

        ssm = boto3.client('ssm')

        # Environment variable mapping
        environment: dict = {
            'dev': {
                'logLevel': 'DEBUG'
            },
            'prod': {
                'logLevel': 'INFO'
            }
        }

        # How to: Retrieve an existing VPC instance.
        vpc_id: str = ssm.get_parameter(Name="VpcId")['Parameter']['Value']
        vpc = ec2.Vpc.from_lookup(self, 'VPC', vpc_id=vpc_id)

        private_subnet_1_id: str = ssm.get_parameter(
            Name="private-subnet-1")['Parameter']['Value']
        private_subnet_2_id: str = ssm.get_parameter(
            Name="private-subnet-2")['Parameter']['Value']
        private_subnet_3_id: str = ssm.get_parameter(
            Name="private-subnet-3")['Parameter']['Value']

        # How to: Import a value exported from another stack
        # These values are imported from the simple-database stack https://github.com/SimpleServerless/simple-database/blob/main/template.yaml#L95
        # Change these lines to the appropriate values for your project
        db_host = core.Fn.import_value(
            f"simple-serverless-database-{stage}-Host")
        db_name = core.Fn.import_value(
            f"simple-serverless-database-{stage}-Name")
        app_security_group_id = core.Fn.import_value(
            f"simple-serverless-database-{stage}-AppSGId")

        env_variables = {
            'STAGE': stage,
            "PGHOST": db_host,
            "PGPORT": "5432",
            "PGDATABASE": db_name,
            "LOG_LEVEL": environment[stage]['logLevel']
        }

        # How to: Import a security group
        app_security_group = ec2.SecurityGroup.from_security_group_id(
            self, "AppSecurityGroup", app_security_group_id)

        # Create the main lambda function
        service_lambda = aws_lambda.Function(
            self,
            'LambdaFunction',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            description=service_name,
            code=aws_lambda.AssetCode("./dist"),
            function_name=service_name + "-" + stage,
            timeout=core.Duration.seconds(35),
            tracing=aws_lambda.Tracing.ACTIVE,
            memory_size=128,
            handler='lambda_function.handler',
            vpc=vpc,
            security_groups=[app_security_group],
            environment=env_variables)

        # Add SecretsManager permissions to lambda
        service_lambda.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "secretsmanager:DescribeSecret",
                    "secretsmanager:GetSecretValue", "secretsmanager:List*"
                ],
                resources=[
                    f"arn:aws:secretsmanager:{region}:{account}:secret:simple-serverless/*"
                ]))

        # Make a wide open security group for the secrets vpc endpoint
        vpc_endpoint_sg = ec2.SecurityGroup(
            self,
            'VpcEndpointSG',
            vpc=vpc,
            allow_all_outbound=True,
            description="Secret Manager VPC Endpoint SG")

        vpc_endpoint_sg.add_ingress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port.tcp_range(0, 65535),
            description="all inbound",
        )

        # How to create at VPC Endpoint to access secrets manager.
        # You can delete this if you're not too cheap to pay for a NAT instance.
        # This still costs $0.24 per day per AZ, so $0.72 for the three AZs we're using here
        # This block is the only reoccurring cost in this stack and is the only reason I delete this stack
        # when I'm not actively working on it.
        ec2.CfnVPCEndpoint(
            self,
            'SecretsManagerVPCEndpoint',
            service_name='com.amazonaws.us-east-2.secretsmanager',
            vpc_endpoint_type='Interface',
            vpc_id=vpc_id,
            subnet_ids=[
                private_subnet_1_id, private_subnet_2_id, private_subnet_3_id
            ],
            security_group_ids=[vpc_endpoint_sg.security_group_id],
            private_dns_enabled=True)

        #
        # REST (API Gateway HTTP) stuff starts here
        #

        # How to: Import an existing HTTP API Gateway instance
        # http_api = apigatewayv2.HttpApi.from_api_id(self, id='APIGateway', http_api_id='0fdl9wlxw4')

        # How to: Create a new HTTP API Gateway instance
        http_api = apigatewayv2.HttpApi(self,
                                        'APIGateway',
                                        api_name=f'{service_name}-api-{stage}')

        integration = apigatewayv2_integrations.LambdaProxyIntegration(
            handler=service_lambda,
            payload_format_version=apigatewayv2.PayloadFormatVersion.
            VERSION_2_0)

        # How to: auto generate REST endpoints from decorators ex: @router.rest("GET", "/students").
        for route_key, endpoint in lambda_function.router.get_rest_endpoints(
        ).items():
            print(f"Creating REST endpoint for {route_key}")
            http_api.add_routes(
                path=endpoint['path'],
                methods=[apigatewayv2.HttpMethod(endpoint['method'])],
                integration=integration)

        core.CfnOutput(self,
                       "RestAPIOutput",
                       value=http_api.url,
                       export_name=f"{stack_name}-RestApiUrl-{stage}")
Ejemplo n.º 13
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        dataset_metadata_filename: str,
        dataset_metadata_generator_function_name: str,
        memory: int = 1024,
        timeout: int = 30,
        concurrent: int = 100,
        code_dir: str = "./",
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, **kwargs)

        # add cache
        if config.VPC_ID:
            vpc = ec2.Vpc.from_lookup(
                self,
                f"{id}-vpc",
                vpc_id=config.VPC_ID,
            )
        else:
            vpc = ec2.Vpc(self, f"{id}-vpc")

        sb_group = escache.CfnSubnetGroup(
            self,
            f"{id}-subnet-group",
            description=f"{id} subnet group",
            subnet_ids=[sb.subnet_id for sb in vpc.private_subnets],
        )

        lambda_function_security_group = ec2.SecurityGroup(self,
                                                           f"{id}-lambda-sg",
                                                           vpc=vpc)
        lambda_function_security_group.add_egress_rule(
            ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ec2.Protocol("ALL"),
                                string_representation=""),
            description="Allow lambda security group all outbound access",
        )

        cache_security_group = ec2.SecurityGroup(self,
                                                 f"{id}-cache-sg",
                                                 vpc=vpc)

        cache_security_group.add_ingress_rule(
            lambda_function_security_group,
            connection=ec2.Port(protocol=ec2.Protocol("ALL"),
                                string_representation=""),
            description=
            "Allow Lambda security group access to Cache security group",
        )

        cache = escache.CfnCacheCluster(
            self,
            f"{id}-cache",
            cache_node_type=config.CACHE_NODE_TYPE,
            engine=config.CACHE_ENGINE,
            num_cache_nodes=config.CACHE_NODE_NUM,
            vpc_security_group_ids=[cache_security_group.security_group_id],
            cache_subnet_group_name=sb_group.ref,
        )

        logs_access = iam.PolicyStatement(
            actions=[
                "logs:CreateLogGroup",
                "logs:CreateLogStream",
                "logs:PutLogEvents",
            ],
            resources=["*"],
        )
        ec2_network_access = iam.PolicyStatement(
            actions=[
                "ec2:CreateNetworkInterface",
                "ec2:DescribeNetworkInterfaces",
                "ec2:DeleteNetworkInterface",
            ],
            resources=["*"],
        )

        lambda_env = DEFAULT_ENV.copy()
        lambda_env.update(
            dict(
                MODULE_NAME="covid_api.main",
                VARIABLE_NAME="app",
                WORKERS_PER_CORE="1",
                LOG_LEVEL="error",
                MEMCACHE_HOST=cache.attr_configuration_endpoint_address,
                MEMCACHE_PORT=cache.attr_configuration_endpoint_port,
                DATASET_METADATA_FILENAME=dataset_metadata_filename,
                DATASET_METADATA_GENERATOR_FUNCTION_NAME=
                dataset_metadata_generator_function_name,
                PLANET_API_KEY=os.environ["PLANET_API_KEY"],
            ))

        lambda_function_props = dict(
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=self.create_package(code_dir),
            handler="handler.handler",
            memory_size=memory,
            timeout=core.Duration.seconds(timeout),
            environment=lambda_env,
            security_groups=[lambda_function_security_group],
            vpc=vpc,
        )

        if concurrent:
            lambda_function_props[
                "reserved_concurrent_executions"] = concurrent

        lambda_function = aws_lambda.Function(self, f"{id}-lambda",
                                              **lambda_function_props)

        lambda_function.add_to_role_policy(s3_full_access_to_data_bucket)
        lambda_function.add_to_role_policy(logs_access)
        lambda_function.add_to_role_policy(ec2_network_access)

        # defines an API Gateway Http API resource backed by our "dynamoLambda" function.
        apigw.HttpApi(
            self,
            f"{id}-endpoint",
            default_integration=apigw_integrations.LambdaProxyIntegration(
                handler=lambda_function),
        )
Ejemplo n.º 14
0
    def __init__(self, scope: core.Construct, construct_id: str, name: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        """VPC - used in project"""
        vpc = ec2.Vpc(self, f'{name}-VPC', max_azs=2)
        """Filesystem - shared between Lambda and Streamlit - Deletes when stack gets shut down"""
        fs = efs.FileSystem(self,
                            f'{name}-FileSystem',
                            vpc=vpc,
                            removal_policy=core.RemovalPolicy.DESTROY)

        access_point = fs.add_access_point(
            'AccessPoint',
            create_acl=efs.Acl(owner_gid='1001',
                               owner_uid='1001',
                               permissions='750'),
            path="/export/lambda",
            posix_user=efs.PosixUser(gid="1001", uid="1001"))
        """Model folder that contains Lambda code"""
        model_folder = os.path.dirname(
            os.path.realpath(__file__)) + "/../model"
        lambda_handler = _lambda.DockerImageFunction(
            self,
            f'{name}-Lambda',
            code=_lambda.DockerImageCode.from_image_asset(
                model_folder),  #Uses local code to build the container
            memory_size=1024,  #Adjust to your need - 128MB to 10GB
            timeout=core.Duration.minutes(
                5),  #Adjust to your need - up to 15 mins
            vpc=vpc,
            filesystem=_lambda.FileSystem.from_efs_access_point(
                access_point, MOUNT_POINT))
        """Custom Log groups for Lambda"""
        lambda_lgs = logs.LogGroup(
            self,
            f'{name}-Lambda-LogGroup',
            log_group_name=f"/aws/lambda/{lambda_handler.function_name}",
            retention=logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY)
        """API Gateway - integrates all methods and ressources - used for Lambda invocation"""
        api = api_gw.HttpApi(
            self,
            f'{name}-ApiGw',
            default_integration=integrations.LambdaProxyIntegration(
                handler=lambda_handler))
        """""" """""" """""" """""" """""" """""" """""" """""" """"""
        #STREAMLIT RELATED START
        """""" """""" """""" """""" """""" """""" """""" """""" """"""
        '''
        cluster = ecs.Cluster(self, f"{name}-Streamlit-Cluster", vpc=vpc)
        
        ecs_task = ecs.FargateTaskDefinition(
            self,
            f'{name}-Streamlit-Task-Def',            
        )

        streamlit_container = ecs_task.add_container(
            f'{name}-Streamlit-Container',
            image=ecs.ContainerImage.from_asset('streamlit-docker'),
            essential=True,
            environment={
                'API_URL': api.url,
            },
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix=f'{name}-Streamlit-Log'
            )            
        )
        
        streamlit_container.add_port_mappings(
            ecs.PortMapping(
                container_port=8501,
                host_port=8501,
                protocol=ecs.Protocol.TCP
            )
        )
        
        """Efs Volume - shared between Lambda / Streamlit"""
        ecs_task.add_volume(name=f'{name}-Efs-Volume',  
                efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=fs.file_system_id,                
        ))
        
        """Efs Mountpoint"""
        streamlit_container.add_mount_points(
            ecs.MountPoint(
                container_path="/mnt/data",
                read_only=False,
                source_volume=f'{name}-Efs-Volume'
        ))
        
       
        ecs_task.add_to_task_role_policy(
            statement=iam.PolicyStatement(
                actions=["efs:*"],
                resources=['*'],
                effect=iam.Effect.ALLOW
            )
        )
       
        """Fargate Service that hosts the Streamlit Application"""
        ecs_service = ecs_patterns.ApplicationLoadBalancedFargateService(self, f'{name}-Fargate-Service',
            cluster=cluster,            
            cpu=256,                    
            desired_count=1,            
            task_definition = ecs_task,
            memory_limit_mib=512,     
            public_load_balancer=True, 
            platform_version=ecs.FargatePlatformVersion.VERSION1_4, #https://forums.aws.amazon.com/thread.jspa?messageID=960420
            
        )  
        
        fs.connections.allow_default_port_from(
            ecs_service.service.connections)
        '''
        """""" """""" """""" """""" """""" """""" """""" """""" """"""
        #STREAMLIT RELATED END
        """""" """""" """""" """""" """""" """""" """""" """""" """"""

        core.CfnOutput(self, 'URL', value=api.url)
Ejemplo n.º 15
0
    def __init__(self,
                 scope: core.Construct,
                 construct_id: str,
                 cdk_env_='',
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        this_dir = path.dirname(__file__)

        # Dynamo DB Tables
        dynamo_names_table = dynamodb.Table(
            self,
            'Names',
            partition_key=dynamodb.Attribute(
                name='name', type=dynamodb.AttributeType.STRING),
            sort_key=dynamodb.Attribute(name='gender',
                                        type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST)
        dynamo_names_table.add_global_secondary_index(
            partition_key=dynamodb.Attribute(
                name='gender', type=dynamodb.AttributeType.STRING),
            sort_key=dynamodb.Attribute(name='uuid',
                                        type=dynamodb.AttributeType.STRING),
            index_name='bn_uuid_sort')

        # Lambda Layers
        lambda_layer_requests = lmb.LayerVersion(
            self,
            'Layer-Requests',
            code=lmb.Code.from_asset(
                path.join(this_dir, 'lambda/layers/requests.zip')),
            compatible_runtimes=[lmb.Runtime.PYTHON_3_8],
        )
        lambda_layer_simplejson = lmb.LayerVersion(
            self,
            'Layer-SimpleJSON',
            code=lmb.Code.from_asset(
                path.join(this_dir, 'lambda/layers/simplejson.zip')),
            compatible_runtimes=[lmb.Runtime.PYTHON_3_8],
        )
        lambda_layer_jinja2 = lmb.LayerVersion(
            self,
            'Layer-Jinja2',
            code=lmb.Code.from_asset(
                path.join(this_dir, 'lambda/layers/jinja2.zip')),
            compatible_runtimes=[lmb.Runtime.PYTHON_3_8],
        )

        ## Lambda - API Handler
        lambda_api_handler = lmb.Function(
            self,
            'API-Handler',
            timeout=core.Duration.seconds(360),
            memory_size=512,
            runtime=lmb.Runtime.PYTHON_3_8,
            handler='api_handler.handler',
            layers=[lambda_layer_simplejson, lambda_layer_jinja2],
            code=lmb.Code.from_asset(path.join(this_dir,
                                               'lambda/api_handler')),
            environment={'DYNAMO_DB_NAMES': dynamo_names_table.table_name})
        ### Grants
        dynamo_names_table.grant_read_write_data(lambda_api_handler)

        # APIGW
        ## Pull domain values from parameter store
        parameter_store_record_name = ssm.StringParameter.value_for_string_parameter(
            self, f'/babynames/{cdk_env_}/record_name')
        parameter_store_domain_name = ssm.StringParameter.value_for_string_parameter(
            self, f'/babynames/{cdk_env_}/domain_name')
        parameter_store_zone_id = ssm.StringParameter.value_for_string_parameter(
            self, f'/babynames/{cdk_env_}/zone_id')

        ## Import R53 Zone
        r53_zone = route53.HostedZone.from_hosted_zone_attributes(
            self,
            "R53Zone",
            zone_name=parameter_store_domain_name,
            hosted_zone_id=parameter_store_zone_id)

        ## ACM Certificate
        acm_certificate = acm.Certificate(
            self,
            "BabyNamesCertificate",
            domain_name=parameter_store_record_name,
            validation=acm.CertificateValidation.from_dns(r53_zone))

        ## APIGW Custom Domain
        apigw_baby_names_domain_name = apigw2.DomainName(
            self,
            "BabyNamesDomain",
            domain_name=parameter_store_record_name,
            certificate=acm.Certificate.from_certificate_arn(
                self, "BabyNamesCert", acm_certificate.certificate_arn))

        ## Set R53 Records
        r53_alias_target_baby_names_apigw = r53targets.ApiGatewayv2Domain(
            apigw_baby_names_domain_name)
        route53.ARecord(self,
                        "BabyNamesARecord",
                        record_name='babynames',
                        zone=r53_zone,
                        target=route53.RecordTarget.from_alias(
                            r53_alias_target_baby_names_apigw))

        ## Instantiate APIGW
        apigw_baby_names = apigw2.HttpApi(
            self,
            'BabyNames-APIGW-Http',
            default_domain_mapping=(apigw2.DefaultDomainMappingOptions(
                domain_name=apigw_baby_names_domain_name)))

        ## APIGW Integrations
        ## Lambda Integrations
        lambda_int_lambda_api_handler = apigw2int.LambdaProxyIntegration(
            handler=lambda_api_handler)

        apigw_baby_names.add_routes(path='/{name}/{gender}',
                                    methods=[apigw2.HttpMethod.GET],
                                    integration=lambda_int_lambda_api_handler)

        apigw_baby_names.add_routes(path='/{proxy+}',
                                    methods=[apigw2.HttpMethod.GET],
                                    integration=lambda_int_lambda_api_handler)
Ejemplo n.º 16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        account = self.account

        print("")
        print(f"   Service: {service_name}")
        print(f"   Region:  {region}")
        print(f"   Stage:   {stage}")
        print(f"   Account: {account}")
        print(f"   Stack:   {stack_name}")
        print("")

        ssm = boto3.client('ssm')

        # Environment variable mapping
        environment: dict = {'dev': {
                                     'logLevel': 'DEBUG',
                                     'dbHost': 'simple-serverless-aurora-serverless-development.cluster-cw3bjgnjhzxa.us-east-2.rds.amazonaws.com',
                                     'dbName': 'simple_serverless_dev',
                                     'vpcId': 'vpc-319daa58'
                                     },
                             'prod': {
                                      'logLevel': 'INFO',
                                      'dbHost': 'simple-serverless-aurora-serverless-production.cluster-cw3bjgnjhzxa.us-east-2.rds.amazonaws.com',
                                      'dbName': 'simple_serverless_prod',
                                      'vpcId': 'vpc-XXXXXX'
                                      }
                             }

        env_variables = {
            'STAGE': stage,
            "LOG_LEVEL": environment[stage]['logLevel']
        }

        # Create the main lambda function
        service_lambda = aws_lambda.Function(self,
                                             'LambdaFunction',
                                             runtime=aws_lambda.Runtime.PYTHON_3_8,
                                             description=service_name,
                                             code=aws_lambda.AssetCode("./dist"),
                                             function_name=service_name + "-" + stage,
                                             timeout=core.Duration.seconds(35),
                                             tracing=aws_lambda.Tracing.ACTIVE,
                                             memory_size=128,
                                             handler='lambda_function.handler',
                                             environment=env_variables)

        #
        # REST (API Gateway HTTP) stuff starts here
        #

        # How to: Import an existing HTTP API Gateway instance
        # http_api = apigatewayv2.HttpApi.from_api_id(self, id='APIGateway', http_api_id='0fdl9wlxw4')

        # How to: Create a new HTTP API Gateway instance
        http_api = apigatewayv2.HttpApi(
            self, 'APIGateway',
            api_name=f'{service_name}-api-{stage}'
        )

        integration = apigatewayv2_integrations.LambdaProxyIntegration(
            handler=service_lambda,
            payload_format_version=apigatewayv2.PayloadFormatVersion.VERSION_2_0
        )

        # How to: auto generate REST endpoints from decorators ex: @router.rest("GET", "/students").
        for route_key, endpoint in lambda_function.router.get_rest_endpoints().items():
            print(f"Creating REST endpoint for {route_key}")
            http_api.add_routes(
                path=endpoint['path'],
                methods=[apigatewayv2.HttpMethod(endpoint['method'])],
                integration=integration
            )

        #
        # Graphql (AppSync) stuff starts here
        #
        policy = iam.PolicyStatement(actions=['lambda:InvokeFunction'],
                                     resources=[service_lambda.function_arn])
        principal = iam.ServicePrincipal('appsync.amazonaws.com')
        service_role = iam.Role(self, 'service-role', assumed_by=principal)
        service_role.add_to_policy(policy)

        # How to: import an existing AppSync instance
        # graphql_api = appsync.GraphqlApi.from_graphql_api_attributes(self, 'GraphQLApi', graphql_api_id='phw4kdabqnbjzi4czy3dtbmynu')

        graphql_schema = appsync.Schema(file_path='./src/schema.graphql')
        graphql_auth_mode = appsync.AuthorizationMode(authorization_type=appsync.AuthorizationType.API_KEY)
        graphql_auth_config = appsync.AuthorizationConfig(default_authorization=graphql_auth_mode)

        graphql_api = appsync.GraphqlApi(
            self, 'GraphQLApi',
            name=f'{service_name}-api-' + stage,
            authorization_config=graphql_auth_config,
            schema=graphql_schema
        )

        datasource_name = to_camel(service_name) + "Lambda"
        lambda_data_source = appsync.LambdaDataSource(
            self, 'LambdaDataSource',
            api=graphql_api,
            name=datasource_name,
            lambda_function=service_lambda,
            service_role=service_role
        )

        # How to: auto generate GraphQL resolvers from decorators ex: @router.graphql("Query", "listStudents").
        for field_name, graphql_def in lambda_function.router.get_graphql_endpoints().items():
            print(f"Creating graphql {graphql_def['parent']} for {field_name}")
            appsync.Resolver(
                self, field_name + "Resolver",
                api=graphql_api,
                type_name=graphql_def['parent'],
                field_name=field_name,
                data_source=lambda_data_source
            )


        core.CfnOutput(self, "RestAPIUrlOutput",
                       value=http_api.url,
                       export_name=f"{stack_name}-RestApiUrl-{stage}")

        core.CfnOutput(self, "GraphQLApiIdOutput",
                       value=graphql_api.api_id,
                       export_name=f"{stack_name}-GraphqlApiId-{stage}")

        core.CfnOutput(self, "GraphQLUrlOutput",
                       value=graphql_api.graphql_url,
                       export_name=f"{stack_name}-GraphqlUrl-{stage}")

        core.CfnOutput(self, "GraphQLApiKeyOutput",
                       value=graphql_api.api_key,
                       export_name=f"{stack_name}-GraphQLApiKey-{stage}")
Ejemplo n.º 17
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Input variables

        # Domain name to redirect
        domain_name = core.CfnParameter(
            self,
            "domainName",
            type="String",
            description="Domain name to redirect",
        )

        # Here we use a specific certificate from parameter values
        cert_arn = core.CfnParameter(
            self,
            "certArn",
            type="String",
            description=
            "Certificate ARN of for the redirection (has to be in us-east-1",
        )
        # End: Input variables

        # Infra setup

        redirect_fn = _lambda.Function(
            self,
            "NCCIDRedirectLambda",
            handler="lambda-handler.handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset("lambda"),
        )

        redirect_integration = _apigw2int.LambdaProxyIntegration(
            handler=redirect_fn)

        cert = _acm.Certificate.from_certificate_arn(self, "cert",
                                                     cert_arn.value_as_string)

        http_api = _apigw2.HttpApi(
            self,
            "nccid-redirect",
            api_name="nccid-redirect",
            description="A redirection gateway.",
        )

        http_api.add_routes(path="/",
                            methods=[_apigw2.HttpMethod.GET],
                            integration=redirect_integration)

        # Change https address into just domain name (while keeping the Cloudformation variables in)
        origin_target = http_api.url.replace("https://", "",
                                             1).replace("/", "")
        origin = _origins.HttpOrigin(domain_name=origin_target)
        behaviour = _cloudfront.BehaviorOptions(origin=origin)

        distribution = _cloudfront.Distribution(
            self,
            "nccid-redirect-dist",
            default_behavior=behaviour,
            certificate=cert,
            domain_names=[domain_name.value_as_string],
        )
        # Explicit dependency is required between the API gateway and Cloudfront distribution
        distribution.node.add_dependency(http_api)

        # Outputs
        distribution_domain = core.CfnOutput(  # noqa:  F841
            self,
            "nccidRedirectDomain",
            value=distribution.distribution_domain_name,
            description=
            "The Cloudfront domain to add to the CNAME records for the redirected domain",
        )
Ejemplo n.º 18
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        secret_key: str,
        custom_domain: Optional[str] = None,
        hosted_zone_id: Optional[str] = None,
        hosted_zone_name: Optional[str] = None,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        self.files_bucket = s3.Bucket(
            self,
            "files-bucket",
            bucket_name="once-shared-files",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            encryption=s3.BucketEncryption.S3_MANAGED,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        self.files_table = dynamodb.Table(
            self,
            "once-files-table",
            table_name="once-files",
            partition_key=dynamodb.Attribute(
                name="id", type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        self.api = apigw.HttpApi(self, "once-api", api_name="once-api")

        api_url = self.api.url
        if custom_domain is not None:
            api_url = f"https://{custom_domain}/"

        core.CfnOutput(self, "base-url", value=api_url)

        self.get_upload_ticket_function = lambda_.Function(
            self,
            "get-upload-ticket-function",
            function_name="once-get-upload-ticket",
            description="Returns a pre-signed request to share a file",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=make_python_zip_bundle(
                os.path.join(BASE_PATH, "get-upload-ticket")),
            handler="handler.on_event",
            log_retention=LOG_RETENTION,
            environment={
                "APP_URL": api_url,
                "FILES_TABLE_NAME": self.files_table.table_name,
                "FILES_BUCKET": self.files_bucket.bucket_name,
                "SECRET_KEY": secret_key,
            },
        )

        self.files_bucket.grant_put(self.get_upload_ticket_function)
        self.files_table.grant_read_write_data(self.get_upload_ticket_function)

        self.download_and_delete_function = lambda_.Function(
            self,
            "download-and-delete-function",
            function_name="once-download-and-delete",
            description=
            "Serves a file from S3 and deletes it as soon as it has been successfully transferred",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.from_asset(
                os.path.join(BASE_PATH, "download-and-delete")),
            handler="handler.on_event",
            log_retention=LOG_RETENTION,
            environment={
                "FILES_BUCKET": self.files_bucket.bucket_name,
                "FILES_TABLE_NAME": self.files_table.table_name,
            },
        )

        self.files_bucket.grant_read(self.download_and_delete_function)
        self.files_bucket.grant_delete(self.download_and_delete_function)
        self.files_table.grant_read_write_data(
            self.download_and_delete_function)

        get_upload_ticket_integration = integrations.LambdaProxyIntegration(
            handler=self.get_upload_ticket_function)
        self.api.add_routes(path="/",
                            methods=[apigw.HttpMethod.GET],
                            integration=get_upload_ticket_integration)

        download_and_delete_integration = integrations.LambdaProxyIntegration(
            handler=self.download_and_delete_function)
        self.api.add_routes(path="/{entry_id}/{filename}",
                            methods=[apigw.HttpMethod.GET],
                            integration=download_and_delete_integration)

        self.cleanup_function = lambda_.Function(
            self,
            "delete-served-files-function",
            function_name="once-delete-served-files",
            description=
            "Deletes files from S3 once they have been marked as deleted in DynamoDB",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.from_asset(
                os.path.join(BASE_PATH, "delete-served-files")),
            handler="handler.on_event",
            log_retention=LOG_RETENTION,
            environment={
                "FILES_BUCKET": self.files_bucket.bucket_name,
                "FILES_TABLE_NAME": self.files_table.table_name,
            },
        )

        self.files_bucket.grant_delete(self.cleanup_function)
        self.files_table.grant_read_write_data(self.cleanup_function)

        events.Rule(
            self,
            "once-delete-served-files-rule",
            schedule=events.Schedule.rate(core.Duration.hours(24)),
            targets=[targets.LambdaFunction(self.cleanup_function)],
        )

        if custom_domain is not None:
            self.custom_domain_stack = CustomDomainStack(
                self,
                "custom-domain",
                api=self.api,
                domain_name=custom_domain,
                hosted_zone_id=hosted_zone_id,
                hosted_zone_name=hosted_zone_name,
            )
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 db_stack: DatabaseStack, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        # Enrichment Queue
        enrichment_queue = sqs.Queue(
            self,
            "CrawlerEnrichmentQueue",
            queue_name='CrawlerEnrichmentQueue',
            retention_period=cdk.Duration.days(1),
            visibility_timeout=cdk.Duration.minutes(15))

        # Environment
        env_default = {'APP_LOGGING_LEVEL': 'ERROR'}
        env_table = {'APP_OFFERS_TABLE': db_stack.offers_table.table_name}
        env_queue_url = {'APP_OFFERS_QUEUE_URL': enrichment_queue.queue_url}

        # Base Lambda ECR image asset
        lambda_asset = ecr_assets.DockerImageAsset(self,
                                                   'CrawlerLambdaImage',
                                                   directory=os.path.join(
                                                       os.getcwd(), 'src',
                                                       'crawler'),
                                                   repository_name='crawler')

        # Crawler Lambda
        lambda_crawler = self._lambda_function_from_asset(
            lambda_asset, 'LambdaCrawler', 'lambda_handler.crawler', {
                **env_default,
                **env_table,
                **env_queue_url
            })
        rule = events.Rule(self,
                           'CrawlerCallingRule',
                           rule_name='CrawlerCallingRule',
                           schedule=events.Schedule.rate(
                               cdk.Duration.hours(1)))
        rule.add_target(targets.LambdaFunction(lambda_crawler))
        db_stack.offers_table.grant_write_data(lambda_crawler)
        enrichment_queue.grant_send_messages(lambda_crawler)

        # Enrichment Lambda
        lambda_enrichment = self._lambda_function_from_asset(
            lambda_asset, 'LambdaEnrichment', 'lambda_handler.enrichment', {
                **env_default,
                **env_table
            })
        lambda_enrichment.add_event_source(
            lambda_event_sources.SqsEventSource(enrichment_queue))
        db_stack.offers_table.grant_write_data(lambda_enrichment)

        lambda_search = self._lambda_function_from_asset(
            lambda_asset,
            'LambdaSearch',
            'lambda_handler.search', {
                **env_default,
                **env_table
            },
            reserved_concurrent_executions=10,
            timeout_minutes=1,
            memory_size=128,
            max_event_age_minutes=1)
        db_stack.offers_table.grant_read_data(lambda_search)

        personal_token = open(
            os.path.join(str(Path.home()), '.github/personal_token.txt'),
            'r').read()

        # Frontend entrypoin
        amplify_app = amplify.App(
            self,
            'CrawlerFrontend',
            app_name='CrawlerFrontend',
            auto_branch_creation=amplify.AutoBranchCreation(auto_build=True),
            source_code_provider=amplify.GitHubSourceCodeProvider(
                owner='jaswdr',
                repository='aws-cdk-crawler-frontend-example',
                oauth_token=cdk.SecretValue(personal_token)))

        # Backend entrypoint
        search_entrypoint = gateway.HttpApi(
            self,
            'CrawlerSearchApiEntrypoint',
            api_name='CrawlerSearchApiEntrypoint',
            cors_preflight=gateway.CorsPreflightOptions(
                allow_headers=['*'],
                allow_methods=[gateway.HttpMethod.GET],
                allow_origins=['*'],
                max_age=cdk.Duration.hours(2)),
            description='Crawler Search API Entrypoint')
        search_entrypoint.add_routes(
            path='/search',
            methods=[gateway.HttpMethod.GET],
            integration=gateway_integrations.LambdaProxyIntegration(
                handler=lambda_search,
                payload_format_version=gateway.PayloadFormatVersion.VERSION_2_0
            ))
        static_data_bucket = s3.Bucket(
            self,
            'CrawlerStaticDataBucket',
            versioned=True,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            auto_delete_objects=True,
            bucket_name='crawler-static-data')