Beispiel #1
0
    def __init__(self, scope: core.Construct, id: str,
                 api_gateway: aws_apigateway.RestApi, **kwargs):
        super().__init__(scope, id, **kwargs)

        # create dynamo table
        job_table = aws_dynamodb.Table(
            self,
            "demo_table",
            partition_key=aws_dynamodb.Attribute(
                name="job_id", type=aws_dynamodb.AttributeType.STRING))

        # Create the Queue
        sqs_queue = aws_sqs.Queue(self, "SQSQueue")

        # Create the Handler
        lambda_sqs_role = aws_iam.Role(
            self,
            id='lambda-sqs-role',
            assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaSQSQueueExecutionRole')
            ])

        ecr_image = aws_lambda.EcrImageCode.from_asset_image(
            directory=os.path.join(os.getcwd(),
                                   "startuptoolbag/app/lambda-sqs-handler"))
        lambda_handler_function = aws_lambda.Function(
            self,
            id="lambdaSQSHandlerFunction",
            description="Handles/Valdates background requests and puts on SQS",
            code=ecr_image,
            handler=aws_lambda.Handler.FROM_IMAGE,
            runtime=aws_lambda.Runtime.FROM_IMAGE,
            environment={'SQS_NAME': sqs_queue.queue_name},
            role=lambda_sqs_role,
            allow_public_subnet=True,
            memory_size=128,
            reserved_concurrent_executions=10,
            timeout=core.Duration.seconds(10))

        sqs_queue.grant_send_messages(lambda_handler_function)

        # Create the Background Worker
        ecr_image = aws_lambda.EcrImageCode.from_asset_image(
            directory=os.path.join(os.getcwd(),
                                   "startuptoolbag/app/lambda-sqs-bworker"))
        background_function = aws_lambda.Function(
            self,
            id="lambdaSQSDrivenBackgroundWorker",
            description="Pulls from SQS and is a background worker",
            code=ecr_image,
            handler=aws_lambda.Handler.FROM_IMAGE,
            runtime=aws_lambda.Runtime.FROM_IMAGE,
            environment={
                'SQS_NAME': sqs_queue.queue_name,
                'TABLE_NAME': job_table.table_name
            },
            role=lambda_sqs_role,
            allow_public_subnet=True,
            memory_size=128,
            reserved_concurrent_executions=10,
            timeout=core.Duration.seconds(10))
        background_function.add_event_source(
            eventsources.SqsEventSource(sqs_queue))
        background_function.add_environment("TABLE_NAME", job_table.table_name)
        job_table.grant_write_data(background_function)

        # Create the Lambda Serving Requests
        ecr_image = aws_lambda.EcrImageCode.from_asset_image(
            directory=os.path.join(
                os.getcwd(), "startuptoolbag/app/lambda-dynamodb-server"))
        reader_function = aws_lambda.Function(
            self,
            id="lambdaDynamo Server",
            description="Handles API Requests backed by dynamo",
            code=ecr_image,
            handler=aws_lambda.Handler.FROM_IMAGE,
            runtime=aws_lambda.Runtime.FROM_IMAGE,
            environment={'TABLE_NAME': job_table.table_name},
            role=lambda_sqs_role,
            allow_public_subnet=True,
            memory_size=128,
            reserved_concurrent_executions=10,
            timeout=core.Duration.seconds(10))
        job_table.grant_read_data(reader_function)

        foo_r = api_gateway.root.add_resource("jobs")
        foo_r.add_method('GET',
                         aws_apigateway.LambdaIntegration(reader_function))
        foo_r.add_method(
            'POST', aws_apigateway.LambdaIntegration(lambda_handler_function))
Beispiel #2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        mybucket = s3.Bucket(
            self,
            ID + '-bucket',
            encryption=s3.BucketEncryption.S3_MANAGED,
            versioned=True,
            lifecycle_rules=[
                s3.LifecycleRule(
                    enabled=True,
                    noncurrent_version_expiration=core.Duration.days(30),
                )
            ])

        cloudtrail.Trail(
            self,
            ID + '-trail',
            enable_file_validation=True,
            include_global_service_events=True,
            is_multi_region_trail=True,
            send_to_cloud_watch_logs=True,
            cloud_watch_logs_retention=logs.RetentionDays.ONE_WEEK,
        )

        func_diff_notice = lambda_.Function(
            self,
            ID + '-func_diff_notice',
            code=lambda_.Code.asset('./functions/artifacts/'),
            handler='app.diff_notice',
            runtime=lambda_.Runtime.PYTHON_3_7,
            log_retention=logs.RetentionDays.ONE_MONTH,
            memory_size=128,
            timeout=core.Duration.seconds(60),
            tracing=lambda_.Tracing.ACTIVE)
        func_diff_notice.add_to_role_policy(
            iam.PolicyStatement(
                actions=['ssm:GetParameter'],
                resources=['*'],
            ))

        func_codebuild_alert = lambda_.Function(
            self,
            ID + '-func_codebuild_alert',
            code=lambda_.Code.asset('./functions/artifacts/'),
            handler='app.codebuild_alert',
            runtime=lambda_.Runtime.PYTHON_3_7,
            log_retention=logs.RetentionDays.ONE_MONTH,
            memory_size=128,
            timeout=core.Duration.seconds(60),
            tracing=lambda_.Tracing.ACTIVE)
        func_codebuild_alert.add_to_role_policy(
            iam.PolicyStatement(
                actions=['ssm:GetParameter'],
                resources=['*'],
            ))

        codebuild_project = codebuild.Project(
            self,
            ID + '-codebuild-project',
            environment=codebuild.BuildEnvironment(
                build_image=codebuild.LinuxBuildImage.STANDARD_2_0,
                compute_type=codebuild.ComputeType.SMALL),
            build_spec=codebuild.BuildSpec.from_object({
                'version': 0.2,
                'phases': {
                    'install': {
                        'runtime-versions': {
                            'ruby': 2.6
                        },
                        'commands': ['gem install roadworker']
                    },
                    'build': {
                        'commands': [
                            'roadwork --export --target-zone ' +
                            HOSTED_ZONE_NAME + ' --output Routefile',
                            'aws s3 cp Routefile s3://' +
                            mybucket.bucket_name + '/Routefile'
                        ]
                    }
                }
            }))
        codebuild_project.add_to_role_policy(
            iam.PolicyStatement(
                actions=['s3:putObject'],
                resources=[mybucket.bucket_arn + '/Routefile'],
            ))
        codebuild_project.add_to_role_policy(
            iam.PolicyStatement(
                actions=['route53:List*', 'route53:Get*'],
                resources=['*'],
            ))
        codebuild_project.on_build_failed(
            ID + '-rule-on_build_failed',
            target=targets.LambdaFunction(func_codebuild_alert))

        rule = events.Rule(self, ID + '-rule', enabled=True)
        rule.add_event_pattern(
            source=['aws.route53'],
            detail_type=['AWS API Call via CloudTrail'],
            detail={
                'eventSource': ['route53.amazonaws.com'],
                'eventName': ['ChangeResourceRecordSets'],
                'requestParameters': {
                    'hostedZoneId': [HOSTED_ZONE_ID]
                },
            },
        )
        rule.add_target(targets.LambdaFunction(func_diff_notice))
        rule.add_target(targets.CodeBuildProject(codebuild_project))
Beispiel #3
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Setting up a salck bot Lambda function
        bot_handler = aws_lambda.Function(
            self,
            "HitterBot",
            runtime=aws_lambda.Runtime.GO_1_X,
            handler="main",
            timeout=core.Duration.seconds(900),
            memory_size=2048,
            code=aws_lambda.AssetCode(path="./lambda"))

        # Creating Mutex Table in DynamoDB
        mutex_table = aws_dynamodb.Table(
            self,
            "HitterMutexTable",
            partition_key=aws_dynamodb.Attribute(
                name="ID", type=aws_dynamodb.AttributeType.STRING),
            billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST,
            time_to_live_attribute="TTL",
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        # Creating URL Table in DynamoDB
        url_table = aws_dynamodb.Table(
            self,
            "HitterURLTable",
            partition_key=aws_dynamodb.Attribute(
                name="ID", type=aws_dynamodb.AttributeType.STRING),
            billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST,
            time_to_live_attribute="TTL",
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        # Creating a bucket to be used in a Pre-Signed URL
        bucket = aws_s3.Bucket(
            self,
            "HitterS3",
            removal_policy=core.RemovalPolicy.RETAIN,
            block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
        )

        # Configuring the S3 Lifecycle
        bucket.add_lifecycle_rule(expiration=core.Duration.days(2))

        # Setting permission to the salck bot Lambda function
        mutex_table.grant_read_write_data(bot_handler)
        url_table.grant_read_write_data(bot_handler)
        bucket.grant_put(bot_handler)
        bucket.grant_read(bot_handler)
        bot_handler.add_to_role_policy(
            aws_iam.PolicyStatement(
                resources=["*"],
                actions=[
                    "comprehend:BatchDetectDominantLanguage",
                    "translate:TranslateText"
                ]))

        # Setting environment variables to the salck bot Lambda function
        bot_handler.add_environment('SLACK_OAUTH_ACCESS_TOKEN',
                                    SLACK_OAUTH_ACCESS_TOKEN)
        bot_handler.add_environment('SLACK_VERIFICATION_TOKEN',
                                    SLACK_VERIFICATION_TOKEN)
        bot_handler.add_environment('MUTEX_TABLE_NAME', mutex_table.table_name)
        bot_handler.add_environment('URL_TABLE_NAME', url_table.table_name)
        bot_handler.add_environment('S3_BUCKET_NAME', bucket.bucket_name)
        bot_handler.add_environment('DEBUG_LOG', "false")

        # Creating an API Gateway for a slack bot
        bot_api = aws_apigateway.LambdaRestApi(self,
                                               "HitterBotAPI",
                                               handler=bot_handler)

        # Only once for the hosted zone.
        hosted_zone = aws_route53.HostedZone.from_hosted_zone_attributes(
            self,
            'HitterHostedZone',
            hosted_zone_id=ZONE_ID,
            zone_name=ZONE_NAME)

        # Set the domain for a bot
        bot_subdomain = "hitter"
        bot_domain_name = bot_subdomain + '.' + ZONE_NAME

        # Using AWS ACM to create a certificate for a bot
        bot_cert = aws_certificatemanager.DnsValidatedCertificate(
            self,
            'HitterBotCertificate',
            domain_name=bot_domain_name,
            hosted_zone=hosted_zone)

        # Add the domain name to the api and the A record to our hosted zone for a bot
        bot_domain = bot_api.add_domain_name('HitterBotDomain',
                                             certificate=bot_cert,
                                             domain_name=bot_domain_name)

        # Set the A record for a bot
        aws_route53.ARecord(
            self,
            'HitterBotARecord',
            record_name=bot_subdomain,
            zone=hosted_zone,
            target=aws_route53.RecordTarget.from_alias(
                aws_route53_targets.ApiGatewayDomain(bot_domain)))

        # Setting up Short URL Lambda function
        short_url_handler = aws_lambda.Function(
            self,
            "HitterShortURL",
            runtime=aws_lambda.Runtime.GO_1_X,
            handler="main",
            timeout=core.Duration.seconds(900),
            memory_size=128,
            code=aws_lambda.AssetCode(path="./lambda_api"))

        # Setting environment variables to the Short URL Lambda function
        url_table.grant_read_data(short_url_handler)

        # Setting environment variables to the salck bot Lambda function
        short_url_handler.add_environment('URL_TABLE_NAME',
                                          url_table.table_name)
        short_url_handler.add_environment('DEBUG_LOG', "false")

        # Creating an API Gateway for a Short URL
        short_url_api = aws_apigateway.LambdaRestApi(self,
                                                     "HitterShortURLAPI",
                                                     handler=short_url_handler)

        # Set the domain for a Short URL
        short_url_subdomain = "sl"
        short_url_domain_name = short_url_subdomain + '.' + ZONE_NAME
        short_url = 'https://' + short_url_domain_name

        # Using AWS ACM to create a certificate for a Short URL
        short_url_cert = aws_certificatemanager.DnsValidatedCertificate(
            self,
            'HitterShortURLCertificate',
            domain_name=short_url_domain_name,
            hosted_zone=hosted_zone)

        # Add the domain name to the api and the A record to our hosted zone for a Short URL
        short_url_domain = short_url_api.add_domain_name(
            'HitterShortURLDomain',
            certificate=short_url_cert,
            domain_name=short_url_domain_name)

        # Set the A record for a Short URL
        aws_route53.ARecord(
            self,
            'HitterShortURLARecord',
            record_name=short_url_subdomain,
            zone=hosted_zone,
            target=aws_route53.RecordTarget.from_alias(
                aws_route53_targets.ApiGatewayDomain(short_url_domain)))

        # Set the URL of the API Gateway to receive the shortened URL to an environment variable.
        bot_handler.add_environment('API_BASE_URL', short_url)
Beispiel #4
0
    def __init__(self, scope: cdk.Stack, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        self.output_bucket = aws_s3.Bucket(
            self,
            "BucketTwitterStreamOutput",
            bucket_name=self.stack_name,
        )

        self.bucket_url = self.output_bucket.bucket_regional_domain_name

        # Because kinesis firehose bindings are to direct CF, we have to create IAM policy/role and attach on our own
        self.iam_role = aws_iam.Role(
            self,
            "IAMRoleTwitterStreamKinesisFHToS3",
            role_name="KinesisFirehoseToS3-{}".format(self.stack_name),
            assumed_by=aws_iam.ServicePrincipal(
                service='firehose.amazonaws.com'),
        )

        # S3 bucket actions
        self.s3_iam_policy_statement = aws_iam.PolicyStatement()
        actions = [
            "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket",
            "s3:ListBucketMultipartUploads", "s3:PutObject"
        ]
        for action in actions:
            self.s3_iam_policy_statement.add_action(action)
        self.s3_iam_policy_statement.add_resource(
            self.output_bucket.bucket_arn)
        self.s3_iam_policy_statement.add_resource(
            self.output_bucket.bucket_arn + "/*")

        # CW error log setup
        self.s3_error_logs_group = aws_logs.LogGroup(
            self,
            "S3ErrorLogsGroup",
            log_group_name="{}-s3-errors".format(self.stack_name))

        self.s3_error_logs_stream = aws_logs.LogStream(
            self,
            "S3ErrorLogsStream",
            log_group=self.s3_error_logs_group,
            log_stream_name='s3Backup')

        self.firehose = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "FirehoseTwitterStream",
            delivery_stream_name="{}-raw".format(self.stack_name),
            delivery_stream_type="DirectPut",
            s3_destination_configuration={
                'bucketArn': self.output_bucket.bucket_arn,
                'bufferingHints': {
                    'intervalInSeconds': 120,
                    'sizeInMBs': 10
                },
                'compressionFormat': 'UNCOMPRESSED',
                'roleArn': self.iam_role.role_arn,
                'cloudWatchLoggingOptions': {
                    'enabled': True,
                    'logGroupName': "{}-raw".format(self.stack_name),
                    'logStreamName': 's3BackupRaw'
                },
                'prefix': 'twitter-raw/'
            },
        )

        # TODO: Only attach what's needed for this policy, right now i'm lazy and attaching all policies
        self.iam_policy = aws_iam.Policy(
            self,
            "IAMPolicyTwitterStreamKinesisFHToS3",
            policy_name="KinesisFirehoseToS3-{}".format(self.stack_name),
            statements=[self.s3_iam_policy_statement],
        )

        self.iam_policy.attach_to_role(self.iam_role)

        # Because kinesis firehose bindings are to direct CF, we have to create IAM policy/role and attach on our own
        self.curator_firehose = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "CuratorFirehoseStream",
            delivery_stream_name="{}-curator".format(self.stack_name),
            delivery_stream_type="DirectPut",
            s3_destination_configuration={
                'bucketArn': self.output_bucket.bucket_arn,
                'bufferingHints': {
                    'intervalInSeconds': 120,
                    'sizeInMBs': 10
                },
                'compressionFormat': 'UNCOMPRESSED',
                'roleArn': self.iam_role.role_arn,
                'cloudWatchLoggingOptions': {
                    'enabled': True,
                    'logGroupName': "{}-curator".format(self.stack_name),
                    'logStreamName': 's3BackupCurator'
                },
                'prefix': 'twitter-curated/'
            },
        )

        def zip_package():
            cwd = os.getcwd()
            file_name = 'curator-lambda.zip'
            zip_file = cwd + '/' + file_name

            os.chdir('src/')
            sh.zip('-r9', zip_file, '.')
            os.chdir(cwd)

            return file_name, zip_file

        _, zip_file = zip_package()

        self.twitter_stream_curator_lambda_function = aws_lambda.Function(
            self,
            "TwitterStreamCuratorLambdaFunction",
            function_name="{}-curator".format(self.stack_name),
            code=aws_lambda.AssetCode(zip_file),
            handler="sentiment_analysis.lambda_handler",
            runtime=aws_lambda.Runtime.PYTHON37,
            tracing=aws_lambda.Tracing.Active,
            description=
            "Triggers from S3 PUT event for twitter stream data and transorms it to clean json syntax with sentiment analysis attached",
            environment={
                "STACK_NAME": self.stack_name,
                "FIREHOSE_STREAM": self.curator_firehose.delivery_stream_name
            },
            memory_size=128,
            timeout=120,
            log_retention_days=aws_logs.RetentionDays.OneWeek,
        )

        # Permission to talk to comprehend for sentiment analysis
        self.comprehend_iam_policy_statement = aws_iam.PolicyStatement()
        self.comprehend_iam_policy_statement.add_action('comprehend:*')
        self.comprehend_iam_policy_statement.add_all_resources()
        self.twitter_stream_curator_lambda_function.add_to_role_policy(
            self.comprehend_iam_policy_statement)

        # Permission to put in kinesis firehose
        self.curator_firehose_iam_policy_statement = aws_iam.PolicyStatement()
        self.curator_firehose_iam_policy_statement.add_action('firehose:Put*')
        self.curator_firehose_iam_policy_statement.add_resource(
            self.curator_firehose.delivery_stream_arn)
        self.twitter_stream_curator_lambda_function.add_to_role_policy(
            self.curator_firehose_iam_policy_statement)

        # Attaching the policy to the IAM role for KFH
        self.output_bucket.grant_read(
            self.twitter_stream_curator_lambda_function)

        self.twitter_stream_curator_lambda_function.add_event_source(
            aws_lambda_event_sources.S3EventSource(
                bucket=self.output_bucket,
                events=[aws_s3.EventType.ObjectCreated],
                filters=[aws_s3.NotificationKeyFilter(prefix="twitter-raw/")]))
Beispiel #5
0
    def __init__(self, scope: core.Construct, id: str,
                 resource: aws_apigateway.Resource):
        super().__init__(scope, id)

        # DynamoDB
        self.table = aws_dynamodb.Table(
            self,
            "stock_table",
            partition_key=aws_dynamodb.Attribute(
                name="id", type=aws_dynamodb.AttributeType.STRING),
            billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST,
            table_name="stock_table",
            removal_policy=core.RemovalPolicy.DESTROY)

        # Lambdas
        create_lambda = aws_lambda.Function(
            self,
            "stock_create_lambda",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("./services/stock/create"),
            memory_size=256,
            timeout=core.Duration.seconds(10),
            function_name="stock_create_lambda")
        create_lambda.add_environment("STOCK_TABLE", self.table.table_name)
        self.table.grant_read_write_data(create_lambda)

        self.find_lambda = aws_lambda.Function(
            self,
            "stock_find_lambda",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("./services/stock/find"),
            memory_size=256,
            timeout=core.Duration.seconds(10),
            function_name="stock_find_lambda")
        self.find_lambda.add_environment("STOCK_TABLE", self.table.table_name)
        self.table.grant_read_write_data(self.find_lambda)

        self.add_lambda = aws_lambda.Function(
            self,
            "stock_add_lambda",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("./services/stock/add"),
            memory_size=256,
            timeout=core.Duration.seconds(10),
            function_name="stock_add_lambda")
        self.add_lambda.add_environment("STOCK_TABLE", self.table.table_name)
        self.table.grant_read_write_data(self.add_lambda)

        self.subtract_lambda = aws_lambda.Function(
            self,
            "stock_subtract_lambda",
            runtime=aws_lambda.Runtime.PYTHON_3_6,
            handler="lambda_function.lambda_handler",
            code=aws_lambda.Code.asset("./services/stock/subtract"),
            memory_size=256,
            timeout=core.Duration.seconds(10),
            function_name="stock_subtract_lambda")
        self.subtract_lambda.add_environment("STOCK_TABLE",
                                             self.table.table_name)
        self.table.grant_read_write_data(self.subtract_lambda)

        # API Gateway integration
        create_integration = aws_apigateway.LambdaIntegration(create_lambda)
        find_integration = aws_apigateway.LambdaIntegration(self.find_lambda)
        add_integration = aws_apigateway.LambdaIntegration(self.add_lambda)
        subtract_integration = aws_apigateway.LambdaIntegration(
            self.subtract_lambda)

        # API Gateway
        # POST /stock/item/create/{price}
        create = resource.add_resource("item").add_resource(
            "create").add_resource("{price}")
        create.add_method("POST", create_integration)

        # GET /stock/find/{item_id}
        find = resource.add_resource("find").add_resource("{item_id}")
        find.add_method("GET", find_integration)

        # POST /stock/add/{item_id}/{number}
        add = resource.add_resource("add").add_resource(
            "{item_id}").add_resource("{number}")
        add.add_method("POST", add_integration)

        # POST /stock/subtract/{item_id}/{number}
        subtract = resource.add_resource("subtract").add_resource(
            "{item_id}").add_resource("{number}")
        subtract.add_method("POST", subtract_integration)
Beispiel #6
0
    def __init__(self, scope: core.Construct, id: str, stack_prefix: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Model all required resources
        ddb_table = _ddb.Table(
            self,
            id='{}-data'.format(stack_prefix),
            table_name='{}-data'.format(stack_prefix),
            partition_key=_ddb.Attribute(name='ID',
                                         type=_ddb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.
            DESTROY,  # THIS IS NOT RECOMMENDED FOR PRODUCTION USE
            read_capacity=1,
            write_capacity=1)

        ## IAM Roles
        lambda_role = _iam.Role(
            self,
            id='{}-lambda-role'.format(stack_prefix),
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'))

        cw_policy_statement = _iam.PolicyStatement(effect=_iam.Effect.ALLOW)
        cw_policy_statement.add_actions("logs:CreateLogGroup")
        cw_policy_statement.add_actions("logs:CreateLogStream")
        cw_policy_statement.add_actions("logs:PutLogEvents")
        cw_policy_statement.add_actions("logs:DescribeLogStreams")
        cw_policy_statement.add_resources("*")
        lambda_role.add_to_policy(cw_policy_statement)

        # Add role for DynamoDB
        dynamodb_policy_statement = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW)
        dynamodb_policy_statement.add_actions("dynamodb:PutItem")
        dynamodb_policy_statement.add_actions("dynamodb:GetItem")
        dynamodb_policy_statement.add_actions("dynamodb:Scan")
        dynamodb_policy_statement.add_actions("dynamodb:Query")
        dynamodb_policy_statement.add_actions("dynamodb:ConditionCheckItem")
        dynamodb_policy_statement.add_resources(ddb_table.table_arn)
        lambda_role.add_to_policy(dynamodb_policy_statement)

        ## AWS Lambda Functions
        fnLambda_storeData = _lambda.Function(
            self,
            "{}-function-storeData".format(stack_prefix),
            code=_lambda.AssetCode("../lambda-functions/store-data"),
            handler="app.handler",
            timeout=core.Duration.seconds(60),
            role=lambda_role,
            runtime=_lambda.Runtime.PYTHON_3_8)
        fnLambda_storeData.add_environment("TABLE_NAME", ddb_table.table_name)

        fnLambda_listData = _lambda.Function(
            self,
            "{}-function-getData".format(stack_prefix),
            code=_lambda.AssetCode("../lambda-functions/list-data"),
            handler="app.handler",
            role=lambda_role,
            timeout=core.Duration.seconds(60),
            runtime=_lambda.Runtime.PYTHON_3_8)
        fnLambda_listData.add_environment("TABLE_NAME", ddb_table.table_name)

        core.CfnOutput(self,
                       "{}-output-dynamodbTable".format(stack_prefix),
                       value=ddb_table.table_name,
                       export_name="{}-ddbTable".format(stack_prefix))
        core.CfnOutput(
            self,
            "{}-output-lambdaStoreData".format(stack_prefix),
            value=fnLambda_storeData.function_name,
            export_name="{}-lambdaStoreDataName".format(stack_prefix))
        core.CfnOutput(
            self,
            "{}-output-lambdaListData".format(stack_prefix),
            value=fnLambda_listData.function_name,
            export_name="{}-lambdaListDataName".format(stack_prefix))
Beispiel #7
0
    def __init__(self, scope: core.Construct, id: str, lambda_role: iam.Role) -> None:
        super().__init__(scope, id)

        self.emails_bucket = s3.Bucket(
            self,
            f'{get_stack_name()}EmailsBucket',
            removal_policy=core.RemovalPolicy.DESTROY,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        self.emails_bucket.add_to_resource_policy(
            iam.PolicyStatement(actions=['s3:PutObject'], 
                                resources=[f'arn:aws:s3:::{self.emails_bucket.bucket_name}/*'],
                                principals=[iam.ServicePrincipal('ses.amazonaws.com')],
                                conditions={ "StringEquals": { "aws:Referer": core.Stack.of(self).account } }
            )
        )

        bucket_name_output = core.CfnOutput(self, id="EmailBucket", value=self.emails_bucket.bucket_name)
        bucket_name_output.override_logical_id("EmailBucket")

        admin_submit_lambda = aws_lambda.Function(
            self,
            'AdminDataSubmit',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_asset(self._LAMBDA_ASSET_DIR),
            handler='functions.handler.admin_submit',
            role=lambda_role,
            environment={
                EMAIL_BUCKET_ENV_VAR: self.emails_bucket.bucket_name
            },
        )

        teacher_submit_lambda = aws_lambda.Function(
            self,
            'TeacherDataSubmit',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_asset(self._LAMBDA_ASSET_DIR),
            handler='functions.handler.teacher_submit',
            role=lambda_role,
            environment={
                EMAIL_BUCKET_ENV_VAR: self.emails_bucket.bucket_name
            },
        )

        self.emails_bucket.grant_read_write(admin_submit_lambda)
        self.emails_bucket.grant_read_write(teacher_submit_lambda)


        ruleset_name = ses.ReceiptRuleSet(scope=self, id="DataSubmissionReceiptRuleSet",
            receipt_rule_set_name=f'{get_stack_name()}RecieptRules',
            rules=[
                ses.ReceiptRuleOptions(
                    receipt_rule_name=f'{get_stack_name()}AdminSubmitRule',
                    recipients=[f'adminsubmit@{KESHER_DOMAIN_NAME}'],
                    actions=[
                        ses_actions.S3(bucket=self.emails_bucket, object_key_prefix="AdminIncomingEmail"), 
                        ses_actions.Lambda(function=admin_submit_lambda)
                    ],
                ),
                ses.ReceiptRuleOptions(
                    receipt_rule_name=f'{get_stack_name()}TeacherSubmitRule',
                    recipients=[f'teachersubmit@{KESHER_DOMAIN_NAME}'],
                    actions=[
                        ses_actions.S3(bucket=self.emails_bucket, object_key_prefix="TeacherIncomingEmail"), 
                        ses_actions.Lambda(function=teacher_submit_lambda),
                    ]
                )
            ]
        )

        ruleset_name_output = core.CfnOutput(self, id="RuleSetName", value=ruleset_name.receipt_rule_set_name)
        ruleset_name_output.override_logical_id("RuleSetName")
Beispiel #8
0
    def __init__(
            self,
            scope: core.Construct,
            id_: str,
            vpc_stack,
            elastic_stack,
            update_lambda_zip=False,
            **kwargs,
    ) -> None:
        super().__init__(scope, id_, **kwargs)

        # if update lambda zip (including if zip doesn't exist)
        if (
                update_lambda_zip
                or not pathlib.Path(os.path.join(dirname, "kibana_lambda.zip")).exists()
        ):
            # rebuild the lambda if changed
            call(["docker", "build", "--tag", "kibana-lambda", "."], cwd=dirname)
            call(
                ["docker", "create", "-ti", "--name", "dummy", "kibana-lambda", "bash"],
                cwd=dirname,
            )
            call(["docker", "cp", "dummy:/tmp/kibana_lambda.zip", "."], cwd=dirname)
            call(["docker", "rm", "-f", "dummy"], cwd=dirname)

        kibana_bucket = s3.Bucket(
            self,
            "kibana_bucket",
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
        # tag the bucket
        core.Tags.of(kibana_bucket).add("project", constants["PROJECT_TAG"])

        # the lambda behind the api
        kibana_lambda = lambda_.Function(
            self,
            "kibana_lambda",
            description="kibana api gateway lambda",
            code=lambda_.Code.from_asset(os.path.join(dirname, "kibana_lambda.zip")),
            handler="lambda_function.lambda_handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_8,
            vpc=vpc_stack.get_vpc,
            security_groups=[elastic_stack.elastic_security_group],
            log_retention=logs.RetentionDays.ONE_WEEK,
        )
        # tag the lambda
        core.Tags.of(kibana_lambda).add("project", constants["PROJECT_TAG"])
        # create policies for the lambda
        kibana_lambda_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW, actions=["s3:*"], resources=["*"],
        )
        # add the role permissions
        kibana_lambda.add_to_role_policy(statement=kibana_lambda_policy)

        # the api gateway
        kibana_api = apigw.LambdaRestApi(
            self, "kibana_api", handler=kibana_lambda, binary_media_types=["*/*"]
        )
        # tag the api gateway
        core.Tags.of(kibana_api).add("project", constants["PROJECT_TAG"])

        kibana_identity = cloudfront.OriginAccessIdentity(self, "kibana_identity")

        kibana_api_domain = "/".join(kibana_api.url.split("/")[1:-2])[1:]
        kibana_api_path = f'/{"/".join(kibana_api.url.split("/")[-2:])}'

        # create the cloudfront distribution
        kibana_distribution = cloudfront.CloudFrontWebDistribution(
            self,
            "kibana_distribution",
            origin_configs=[
                # the lambda source for kibana
                cloudfront.SourceConfiguration(
                    custom_origin_source=cloudfront.CustomOriginConfig(
                        domain_name=kibana_api_domain,
                        origin_protocol_policy=cloudfront.OriginProtocolPolicy.HTTPS_ONLY,
                    ),
                    origin_path="/prod",
                    behaviors=[
                        cloudfront.Behavior(
                            is_default_behavior=True,
                            allowed_methods=cloudfront.CloudFrontAllowedMethods.ALL,
                            cached_methods=cloudfront.CloudFrontAllowedCachedMethods.GET_HEAD_OPTIONS,
                            compress=False,
                            forwarded_values=CfnDistribution.ForwardedValuesProperty(
                                query_string=True,
                                cookies=CfnDistribution.CookiesProperty(forward="all"),
                                headers=[
                                    "Content-Type",
                                    "Accept",
                                    "Accept-Encoding",
                                    "kbn-name",
                                    "kbn-version",
                                ],
                            ),
                        )
                    ],
                ),
                # the s3 bucket source for kibana
                cloudfront.SourceConfiguration(
                    s3_origin_source=cloudfront.S3OriginConfig(
                        s3_bucket_source=kibana_bucket,
                        origin_access_identity=kibana_identity,
                    ),
                    behaviors=[
                        cloudfront.Behavior(
                            is_default_behavior=False,
                            path_pattern="bucket_cached/*",
                            allowed_methods=cloudfront.CloudFrontAllowedMethods.GET_HEAD,
                            cached_methods=cloudfront.CloudFrontAllowedCachedMethods.GET_HEAD,
                            compress=True,
                        )
                    ],
                ),
            ],
        )
        # tag the cloudfront distribution
        core.Tags.of(kibana_distribution).add("project", constants["PROJECT_TAG"])
        # needs api and bucket to be available
        kibana_distribution.node.add_dependency(kibana_api)

        # kibana bucket empty policies
        kibana_bucket_empty_policy = [
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW, actions=["s3:ListBucket"], resources=["*"],
            ),
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["s3:DeleteObject"],
                resources=[f"{kibana_bucket.bucket_arn}/*"],
            ),
        ]
        # create the custom resource
        kibana_bucket_empty = CustomResource(
            self,
            "kibana_bucket_empty",
            PhysicalId="kibanaBucketEmpty",
            Description="Empty kibana cache s3 bucket",
            Uuid="f7d4f730-4ee1-13e8-9c2d-fa7ae06bbebc",
            HandlerPath=os.path.join(dirname, "../helpers/s3_bucket_empty.py"),
            BucketName=kibana_bucket.bucket_name,
            ResourcePolicies=kibana_bucket_empty_policy,
        )
        # tag the lambda
        core.Tags.of(kibana_bucket_empty).add("project", constants["PROJECT_TAG"])
        # needs a dependency
        kibana_bucket_empty.node.add_dependency(kibana_bucket)

        # kibana lambda update policies
        kibana_lambda_update_policy = [
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "s3:ListBucket",
                    "s3:ListAllMyBuckets",
                    "lambda:ListFunctions",
                    "lambda:UpdateFunctionConfiguration",
                    "cloudfront:ListDistributions",
                    "s3:GetBucketTagging",
                    "es:ListDomainNames",
                    "es:DescribeElasticsearchDomain",
                ],
                resources=["*"],
            )
        ]
        # create the kibana lambda update
        kibana_lambda_update = CustomResource(
            self,
            "kibana_lambda_update",
            Description="Update ENV vars for kibana api lambda",
            PhysicalId="kibanaLambdaUpdate",
            Uuid="f7d4f230-4ee1-07e8-9c2d-fa7ae06bbebc",
            HandlerPath=os.path.join(dirname, "../helpers/lambda_env_update.py"),
            ResourcePolicies=kibana_lambda_update_policy,
        )
        # tag the lambda
        core.Tags.of(kibana_lambda_update).add("project", constants["PROJECT_TAG"])
        # needs a dependency
        kibana_lambda_update.node.add_dependency(kibana_bucket)
        kibana_lambda_update.node.add_dependency(kibana_distribution)

        core.CfnOutput(
            self,
            "kibana_link",
            value=f"https://{kibana_distribution.domain_name}/_plugin/kibana",
            description="Kibana Web Url",
            export_name="kibana-link",
        )
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 stack_log_level: str, sales_event_bkt, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below):

        ########################################
        #######                          #######
        #######   Stream Data Producer   #######
        #######                          #######
        ########################################

        # Read Lambda Code
        try:
            with open(
                    "stacks/back_end/serverless_s3_producer_stack/lambda_src/stream_data_producer.py",
                    encoding="utf-8",
                    mode="r",
            ) as f:
                data_producer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        data_producer_fn = _lambda.Function(
            self,
            "streamDataProducerFn",
            function_name=f"data_producer_{construct_id}",
            description="Produce streaming data events and push to S3 stream",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(data_producer_fn_code),
            handler="index.lambda_handler",
            timeout=cdk.Duration.seconds(2),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "APP_ENV": "Production",
                "STORE_EVENTS_BKT": f"{sales_event_bkt.bucket_name}",
                "TRIGGER_RANDOM_DELAY": "True"
            },
        )

        # Grant our Lambda Producer privileges to write to S3
        sales_event_bkt.grant_read_write(data_producer_fn)

        data_producer_fn_version = data_producer_fn.latest_version
        data_producer_fn_version_alias = _lambda.Alias(
            self,
            "streamDataProducerFnAlias",
            alias_name="MystiqueAutomation",
            version=data_producer_fn_version,
        )

        # Create Custom Loggroup for Producer
        data_producer_lg = _logs.LogGroup(
            self,
            "streamDataProducerFnLogGroup",
            log_group_name=f"/aws/lambda/{data_producer_fn.function_name}",
            removal_policy=cdk.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY,
        )

        # Restrict Produce Lambda to be invoked only from the stack owner account
        data_producer_fn.add_permission(
            "restrictLambdaInvocationToOwnAccount",
            principal=_iam.AccountRootPrincipal(),
            action="lambda:InvokeFunction",
            source_account=cdk.Aws.ACCOUNT_ID,
            # source_arn=sales_event_bkt.bucket_arn
        )

        self.data_producer_fn_role = data_producer_fn.role

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page.",
        )

        output_1 = cdk.CfnOutput(
            self,
            "LambdaConsumer",
            # "StoreOrdersEventsProducer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={cdk.Aws.REGION}#/functions/{data_producer_fn.function_name}",
            description="Produce streaming data events and push to S3 Topic.",
        )
        output_2 = cdk.CfnOutput(
            self,
            "LambdaConsumerRoleArn",
            value=f"{self.data_producer_fn_role.role_arn}",
            description="StoreOrdersEventsProducerRole",
        )
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        domain_arn: str,
        aws_cli_profile: str = None,
        cw_trigger_sns_arn_list: list = [],
        enable_es_api_output: bool = False,
        es_api_output_sns_arn: str = None,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # Configuring certain aspects of the stack based on the ES domain details
        self.configure(domain_arn, aws_cli_profile, cw_trigger_sns_arn_list)

        # Setting a Cloudwatch Alarm on the ClusterStatus.red metric
        self.create_cw_alarm_with_action(
            "ClusterStatus.red",
            1,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            1,
            1,
            "max",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the ClusterStatus.yellow metric
        self.create_cw_alarm_with_action(
            "ClusterStatus.yellow",
            1,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            1,
            1,
            "max",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the FreeStorageSpace metric. The threshold is 25% of the current volume size (in MB) of a data node.
        self.create_cw_alarm_with_action(
            "FreeStorageSpace",
            self._volume_size * 0.25 * 1000,
            cloudwatch.ComparisonOperator.LESS_THAN_OR_EQUAL_TO_THRESHOLD,
            1,
            1,
            "min",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the ClusterIndexWritesBlocked metric
        self.create_cw_alarm_with_action(
            "ClusterIndexWritesBlocked",
            1,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            5,
            1,
            "max",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the Nodes metric
        self.create_cw_alarm_with_action(
            "Nodes",
            self._node_count,
            cloudwatch.ComparisonOperator.LESS_THAN_THRESHOLD,
            1440,
            1,
            "min",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the AutomatedSnapshotFailure metric
        self.create_cw_alarm_with_action(
            "AutomatedSnapshotFailure",
            1,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            1,
            1,
            "max",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the CPUUtilization metric
        self.create_cw_alarm_with_action(
            "CPUUtilization",
            80,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            15,
            3,
            "avg",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the JVMMemoryPressure metric
        self.create_cw_alarm_with_action(
            "JVMMemoryPressure",
            80,
            cloudwatch.ComparisonOperator.GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            5,
            3,
            "max",
            self._sns_topic_list,
        )

        # Setting a Cloudwatch Alarm on the MasterCPUUtilization & MasterJVMMemoryPressure metrics
        # only if Dedicated Master is enabled
        if self._is_dedicated_master_enabled:
            self.create_cw_alarm_with_action(
                "MasterCPUUtilization",
                50,
                cloudwatch.ComparisonOperator.
                GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
                15,
                3,
                "avg",
                self._sns_topic_list,
            )

            self.create_cw_alarm_with_action(
                "MasterJVMMemoryPressure",
                80,
                cloudwatch.ComparisonOperator.
                GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
                15,
                1,
                "max",
                self._sns_topic_list,
            )

        # Setting a Cloudwatch Alarm on the KMSKeyError & KMSKeyInaccessible metrics
        # only if Encryption at Rest config is enabled
        if self._is_encryption_at_rest_enabled:
            self.create_cw_alarm_with_action(
                "KMSKeyError",
                1,
                cloudwatch.ComparisonOperator.
                GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
                1,
                1,
                "max",
                self._sns_topic_list,
            )

            self.create_cw_alarm_with_action(
                "KMSKeyInaccessible",
                1,
                cloudwatch.ComparisonOperator.
                GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
                1,
                1,
                "max",
                self._sns_topic_list,
            )

        if enable_es_api_output:
            # Creating a Lambda function to invoke ES _cat APIs corresponding to the triggered CW Alarm
            if self._is_vpc_domain:
                self._lambda_vpc = ec2.Vpc.from_lookup(self,
                                                       self._vpc,
                                                       vpc_id=self._vpc)

                self._lambda_security_group = ec2.SecurityGroup.from_security_group_id(
                    self,
                    self._security_group,
                    security_group_id=self._security_group)

                self._lambda_security_group.connections.allow_internally(
                    port_range=ec2.Port.tcp(443),
                    description=
                    "Ingress rule that allows the aws_es_cw_alarms Lambda to talk to a VPC based ES domain"
                )

                self._lambda_func = _lambda.Function(
                    self,
                    "CWAlarmHandler",
                    runtime=_lambda.Runtime.PYTHON_3_7,
                    code=_lambda.Code.asset("lambda"),
                    handler="lambda_function.lambda_handler",
                    timeout=core.Duration.minutes(15),
                    environment={
                        "DOMAIN_ENDPOINT": self._domain_endpoint,
                        "DOMAIN_ARN": domain_arn
                    },
                    vpc=self._lambda_vpc,
                    vpc_subnets=ec2.SubnetSelection(
                        subnet_type=ec2.SubnetType.PRIVATE),
                    security_group=self._lambda_security_group)
            else:
                self._lambda_func = _lambda.Function(
                    self,
                    "CWAlarmHandler",
                    runtime=_lambda.Runtime.PYTHON_3_7,
                    code=_lambda.Code.asset("lambda"),
                    handler="lambda_function.lambda_handler",
                    timeout=core.Duration.minutes(15),
                    environment={
                        "DOMAIN_ENDPOINT": self._domain_endpoint,
                        "DOMAIN_ARN": domain_arn
                    },
                )

            # A Custom IAM Policy statement to grant _cat API access to the Lambda function
            self._es_policy_statement = iam.PolicyStatement(
                actions=["es:ESHttpHead", "es:ESHttpGet"],
                effect=iam.Effect.ALLOW,
                resources=[domain_arn + "/*"],
            )

            self._lambda_func.add_to_role_policy(self._es_policy_statement)

            # Attaching a SNS topic provided by the user as the trigger for the Lambda function
            # If more than one SNS topic is provided, we will attach just the first SNS topic as the trigger
            self._lambda_func.add_event_source(
                _lambda_event_source.SnsEventSource(self._sns_topic_list[0]))

            if es_api_output_sns_arn:
                self._lambda_func.add_environment("SNS_TOPIC_ARN",
                                                  es_api_output_sns_arn)

                # Adding SNS Publish permission since the Lambda function is configured to post
                # the output of _cat APIs to the same SNS topic that triggers the function
                self._sns_publish_policy_statement = iam.PolicyStatement(
                    actions=["SNS:Publish"],
                    effect=iam.Effect.ALLOW,
                    resources=[es_api_output_sns_arn],
                )

                self._lambda_func.add_to_role_policy(
                    self._sns_publish_policy_statement)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        table = ddb.Table(self,
                          'StripeSampleCustomers',
                          partition_key={
                              'name': 'CustomerID',
                              'type': ddb.AttributeType.STRING
                          })

        bus = events.EventBus(self,
                              'stripeAppEventBus',
                              event_bus_name='stripeAppEventBus')

        lambda_role_for_go = iam.Role(
            self,
            "Role",
            role_name='stripeAppRole',
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEventBridgeFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "SecretsManagerReadWrite")
            ])

        customer_created_handler = _lambda.Function(
            self,
            "createStripeCustomerHandler",
            runtime=_lambda.Runtime.GO_1_X,
            code=_lambda.Code.asset('lambda/stripe-create-customer'),
            handler='createCustomerHandler',
            timeout=core.Duration.seconds(8),
            role=lambda_role_for_go,
            environment={
                'CUSTOMER_TABLE_NAME': table.table_name,
            })
        table.grant_read_write_data(customer_created_handler)

        go_lambda = _lambda.Function(
            self,
            "stripeWebhookEventHandler",
            runtime=_lambda.Runtime.GO_1_X,
            code=_lambda.Code.asset('lambda/stripe-webhook-handler'),
            handler='stripeWebhookHandler',
            timeout=core.Duration.seconds(8),
            role=lambda_role_for_go,
        )

        _apigw.LambdaRestApi(self, "stripeWebhookAPI", handler=go_lambda)

        customer_created_handler.add_permission(
            "createStripeCustomerHandlerPermission",
            principal=iam.ServicePrincipal("events.amazonaws.com"),
            action='lambda:InvokeFunction',
            source_arn=go_lambda.function_arn)

        go_lambda.add_permission(
            "stripeWebhookHandlerPermission",
            principal=iam.ServicePrincipal("lambda.amazonaws.com"),
            action='lambda:InvokeFunction',
            source_arn=customer_created_handler.function_arn)

        event = events.Rule(
            self,
            'stripeWebhookEventRule',
            rule_name='stripeWebhookEventRule',
            enabled=True,
            event_bus=bus,
            description=
            'all success events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                detail={"stripeEvent": ["customer.subscription.created"]},
                source=["stripeWebHookHandler.lambda"]))

        event.add_target(targets.LambdaFunction(customer_created_handler))
Beispiel #12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        aws_region = os.environ.get("CDK_DEPLOY_REGION",
                                    os.environ["CDK_DEFAULT_REGION"])
        account_id = os.environ.get("CDK_DEPLOY_ACCOUNT",
                                    os.environ["CDK_DEFAULT_ACCOUNT"])

        ssm_client = boto3.client('ssm', aws_region)
        # Prepare pipeline config details in SSM parameters
        if prefix == 'us':
            self.qs_reports_env_config = {
                "Permissions": [{
                    "Group_Name": "Critical",
                    "Reports": ["Sales Results - Critical"]
                }, {
                    "Group_Name":
                    "HighlyConfidential",
                    "Reports": [
                        "Field Operations Dashboard",
                        "Sales Results - Highly Confidential"
                    ]
                }, {
                    "Group_Name": "BI-developer",
                    "Reports": ["all"]
                }, {
                    "Group_Name": "BI-Admin",
                    "Reports": ["all"]
                }, {
                    "Group_Name": "Power-reader",
                    "Reports": ["read-all"]
                }]
            }
        if prefix == 'eu':
            self.qs_reports_env_config = {
                "Permissions": [{
                    "Group_Name": "EU-Critical",
                    "Reports": ["EUResults - Critical"]
                }, {
                    "Group_Name": "BI-developer",
                    "Reports": ["all"]
                }, {
                    "Group_Name": "BI-Admin",
                    "Reports": ["all"]
                }, {
                    "Group_Name":
                    "EU-HighlyConfidential",
                    "Reports": [
                        "EUField Operations Dashboard",
                        "EUResults - Highly Confidential"
                    ]
                }, {
                    "Group_Name": "Power-reader",
                    "Reports": ["read-all"]
                }]
            }

        self.qs_reports_env_config_ssm = ssm.StringParameter(
            self,
            '/qs/config/access',
            string_value=json.dumps(self.qs_reports_env_config),
            parameter_name='/qs/config/access')

        #group-user mapping information is stored in s3 bucket. A ssm parameter stores the bucket name.
        self.qs_user_group_config = {
            'bucket-name': f'qs-granular-access-demo-{account_id}'
        }

        self.qs_user_group_config_ssm = ssm.StringParameter(
            self,
            '/qs/config/groups',
            string_value=json.dumps(self.qs_user_group_config),
            parameter_name='/qs/config/groups')

        # group-role mapping information is stored in a ssm parameter.
        self.qs_role_config = {
            'BI-developer': 'AUTHOR',
            'BI-Admin': 'ADMIN',
            'Power-reader': 'AUTHOR',
            'Critical': 'READER',
            'HighlyConfidential': 'READER'
        }

        self.qs_role_config_ssm = ssm.StringParameter(
            self,
            '/qs/config/roles',
            string_value=json.dumps(self.qs_role_config),
            parameter_name='/qs/config/roles')

        # group-namespace mapping information is stored in a ssm parameter.
        self.qs_ns_config = {
            'BI-developer': 'default',
            'BI-Admin': 'default',
            'Power-reader': 'default',
            'Critical': 'default',
            'HighlyConfidential': 'default'
        }

        self.qs_ns_config_ssm = ssm.StringParameter(
            self,
            '/qs/config/ns',
            string_value=json.dumps(self.qs_ns_config),
            parameter_name='/qs/config/ns')

        lambda_role = iam.Role(
            self,
            id='lambda-role',
            description='Role for the quicksight lambda',
            role_name=f'{aws_region}-role-quicksight-lambda',
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={
                'AllowS3Access':
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            "kms:GetParametersForImport", "kms:GetPublicKey",
                            "kms:ListKeyPolicies", "kms:ListRetirableGrants",
                            "kms:GetKeyPolicy", "kms:ListResourceTags",
                            "kms:ListGrants", "kms:GetParametersForImport",
                            "kms:GetKeyRotationStatus", "kms:DescribeKey",
                            "kms:CreateGrant", "kms:ListAliases",
                            "kms:ListKeys", "kms:DescribeCustomKeyStores",
                            "ssm:GetParameters", "ssm:GetParameter",
                            "ssm:GetParametersByPath"
                        ],
                        resources=['*']),
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=[
                            "lambda:InvokeFunction", "logs:CreateLogStream",
                            "logs:CreateLogGroup", "logs:PutLogEvents",
                            "quicksight:*", "s3:HeadBucket",
                            "s3:ListAllMyBuckets", "s3:PutObject",
                            "s3:GetObject", "s3:ListBucket",
                            "s3:GetObjectVersionForReplication",
                            "s3:GetBucketPolicy", "s3:GetObjectVersion",
                            "cloudwatch:PutMetricData", "sts:GetCallerIdentity"
                        ],
                        resources=['*'])
                ])
            })

        user_init = _lambda.Function(
            self,
            'user_init',
            handler='user_init.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset(
                os.path.join(current_dir, '../lambda_functions/user_init/')),
            function_name='user_init',
            role=lambda_role,
            timeout=core.Duration.minutes(15),
            memory_size=512)

        check_team_members = _lambda.Function(
            self,
            'check_team_members',
            handler='check_team_members.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset(
                os.path.join(current_dir,
                             '../lambda_functions/check_team_members/')),
            function_name='check_team_members',
            role=lambda_role,
            timeout=core.Duration.minutes(15),
            memory_size=512,
            environment={'aws_region': f'{core.Aws.REGION}'})

        downgrade_user = _lambda.Function(
            self,
            'downgrade_user',
            handler='downgrade_user.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset(
                os.path.join(current_dir,
                             '../lambda_functions/downgrade_user/')),
            function_name='downgrade_user',
            role=lambda_role,
            timeout=core.Duration.minutes(15),
            memory_size=2048,
            environment={'aws_region': f'{core.Aws.REGION}'})

        granular_access = _lambda.Function(
            self,
            'granular_access',
            handler='granular_access.lambda_handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.from_asset(
                os.path.join(current_dir,
                             '../lambda_functions/granular_access/')),
            function_name='granular_access',
            role=lambda_role,
            timeout=core.Duration.minutes(15),
            memory_size=2048,
            environment={'aws_region': f'{core.Aws.REGION}'})

        quicksight_event_rule = events.Rule(
            self,
            'QuickSightCWEventRule',
            description=
            'CloudWatch rule to detect new QuickSight user creation',
            rule_name='qs-gc-user-creation',
            targets=[targets.LambdaFunction(user_init)],
            event_pattern=events.EventPattern(
                source=['aws.quicksight'],
                detail_type=['AWS Service Event via CloudTrail'],
                detail={
                    "eventSource": ["quicksight.amazonaws.com"],
                    "eventName": ["CreateUser"]
                }))

        quicksight_schedule_rule = events.Rule(
            self,
            'quicksight_schedule_rule',
            description=
            'CloudWatch rule to run QS objects/groups assignment every hour',
            rule_name='qs-gc-every-hour',
            schedule=events.Schedule.cron(minute="0"),
            targets=[
                targets.LambdaFunction(check_team_members),
                targets.LambdaFunction(granular_access)
            ])

        quicksight_assume_condition_object = {
            "StringEquals": {
                "SAML:aud": "https://signin.aws.amazon.com/saml"
            }
        }

        quicksight_federated_prin_with_conditionb_obj = iam.FederatedPrincipal(
            f'arn:aws:iam::{core.Aws.ACCOUNT_ID}:saml-provider/saml',
            quicksight_assume_condition_object, 'sts:AssumeRoleWithSAML')

        quicksight_resource_scope = '${aws:userid}'
        quicksight_reader_saml_inline_policies = {
            'AllowQuicksightAccessSAML':
            iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=['quicksight:CreateReader'],
                    resources=[
                        f'arn:aws:quicksight::{core.Aws.ACCOUNT_ID}:user/{quicksight_resource_scope}'
                    ])
            ])
        }

        quicksight_users = iam.Role(
            self,
            id=
            f"quicksight-fed-{prefix}-users",  # this is the default group with no access
            description='Role for the quicksight reader SAML',
            role_name=f"quicksight-fed-{prefix}-users",
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=quicksight_federated_prin_with_conditionb_obj,
            inline_policies=quicksight_reader_saml_inline_policies)
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
        vpc = ec2.Vpc(self,
                      "vpc",
                      cidr=props['vpc_CIDR'],
                      max_azs=3,
                      subnet_configuration=[{
                          'cidrMask': 28,
                          'name': 'public',
                          'subnetType': ec2.SubnetType.PUBLIC
                      }, {
                          'cidrMask':
                          28,
                          'name':
                          'private',
                          'subnetType':
                          ec2.SubnetType.PRIVATE
                      }, {
                          'cidrMask':
                          28,
                          'name':
                          'db',
                          'subnetType':
                          ec2.SubnetType.ISOLATED
                      }])

        rds_subnetGroup = rds.SubnetGroup(
            self,
            "rds_subnetGroup",
            description=
            f"Group for {props['environment']}-{props['application']}-{props['unit']} DB",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html
        ##TODO:ADD Aurora Serverless Option
        rds_instance = rds.DatabaseCluster(
            self,
            'wordpress-db',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_2),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                enable_performance_insights=props[
                    'rds_enable_performance_insights'],
                instance_type=ec2.InstanceType(
                    instance_type_identifier=props['rds_instance_type'])),
            subnet_group=rds_subnetGroup,
            storage_encrypted=props['rds_storage_encrypted'],
            backup=rds.BackupProps(retention=core.Duration.days(
                props['rds_automated_backup_retention_days'])))

        EcsToRdsSeurityGroup = ec2.SecurityGroup(
            self,
            "EcsToRdsSeurityGroup",
            vpc=vpc,
            description="Allow WordPress containers to talk to RDS")

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        db_cred_generator = _lambda.Function(
            self,
            'db_creds_generator',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='db_creds_generator.handler',
            code=_lambda.Code.asset('lambda/db_creds_generator'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED
            ),  #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets ,
            environment={
                'SECRET_NAME': rds_instance.secret.secret_name,
            })

        #Set Permissions and Sec Groups
        rds_instance.connections.allow_from(
            EcsToRdsSeurityGroup,
            ec2.Port.tcp(3306))  #Open hole to RDS in RDS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html
        file_system = efs.FileSystem(
            self,
            "MyEfsFileSystem",
            vpc=vpc,
            encrypted=True,  # file system is not encrypted by default
            lifecycle_policy=props['efs_lifecycle_policy'],
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING,
            removal_policy=core.RemovalPolicy(props['efs_removal_policy']),
            enable_automatic_backups=props['efs_automatic_backups'])

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster
        cluster = ecs.Cluster(
            self,
            "Cluster",
            vpc=vpc,
            container_insights=props['ecs_enable_container_insights'])

        if props['deploy_bastion_host']:
            #ToDo: Deploy bastion host with a key file
            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html
            bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc=vpc)
            rds_instance.connections.allow_from(bastion_host,
                                                ec2.Port.tcp(3306))

            #######################
            ### Developer Tools ###
            # SFTP into the EFS Shared File System

            NetToolsSecret = secretsmanager.Secret(
                self,
                "NetToolsSecret",
                generate_secret_string=secretsmanager.SecretStringGenerator(
                    secret_string_template=json.dumps({
                        "username": '******',
                        "ip": ''
                    }),
                    generate_string_key="password",
                    exclude_characters='/"'))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point
            AccessPoint = file_system.add_access_point(
                "access-point",
                path="/",
                create_acl=efs.Acl(
                    owner_uid=
                    "100",  #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/
                    owner_gid="101",
                    permissions="0755"))

            EfsVolume = ecs.Volume(
                name="efs",
                efs_volume_configuration=ecs.EfsVolumeConfiguration(
                    file_system_id=file_system.file_system_id,
                    transit_encryption="ENABLED",
                    authorization_config=ecs.AuthorizationConfig(
                        access_point_id=AccessPoint.access_point_id)))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html
            NetToolsTask = ecs.FargateTaskDefinition(self,
                                                     "TaskDefinition",
                                                     cpu=256,
                                                     memory_limit_mib=512,
                                                     volumes=[EfsVolume])

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container
            NetToolsContainer = NetToolsTask.add_container(
                "NetTools",
                image=ecs.ContainerImage.from_registry('netresearch/sftp'),
                command=['test:test:100:101:efs'])
            NetToolsContainer.add_port_mappings(
                ecs.PortMapping(container_port=22, protocol=ecs.Protocol.TCP))

            NetToolsContainer.add_mount_points(
                ecs.MountPoint(
                    container_path=
                    "/home/test/efs",  #ToDo build path out with username from secret
                    read_only=False,
                    source_volume=EfsVolume.name,
                ))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html?highlight=fargateservice#aws_cdk.aws_ecs.FargateService
            service = ecs.FargateService(
                self,
                "Service",
                cluster=cluster,
                task_definition=NetToolsTask,
                platform_version=ecs.FargatePlatformVersion(
                    "VERSION1_4"),  #Required for EFS
            )
            #ToDo somehow store container's IP on deploy

            #Allow traffic to EFS Volume from Net Tools container
            service.connections.allow_to(file_system, ec2.Port.tcp(2049))
            #ToDo allow bastion host into container on port 22

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
            bastion_ip_locator = _lambda.Function(
                self,
                'bastion_ip_locator',
                function_name=
                f"{props['environment']}-{props['application']}-{props['unit']}-SFTP-IP",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler='bastion_ip_locator.handler',
                code=_lambda.Code.asset('lambda/bastion_ip_locator'),
                environment={
                    'CLUSTER_NAME': cluster.cluster_arn,
                    'SERVICE_NAME': service.service_name
                })

            #Give needed perms to bastion_ip_locator for reading info from ECS
            bastion_ip_locator.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["ecs:DescribeTasks"],
                    resources=[
                        #f"arn:aws:ecs:us-east-1:348757191778:service/{cluster.cluster_name}/{service.service_name}",
                        f"arn:aws:ecs:us-east-1:348757191778:task/{cluster.cluster_name}/*"
                    ]))
            bastion_ip_locator.add_to_role_policy(
                iam.PolicyStatement(actions=[
                    "ecs:ListTasks",
                ],
                                    resources=["*"],
                                    conditions={
                                        'ArnEquals': {
                                            'ecs:cluster': cluster.cluster_arn
                                        }
                                    }))

        self.output_props = props.copy()
        self.output_props["vpc"] = vpc
        self.output_props["rds_instance"] = rds_instance
        self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup
        self.output_props["file_system"] = file_system
        self.output_props["cluster"] = cluster
Beispiel #14
0
    def __init__(self, scope: core.Construct, id: str,
                 api_gateway: aws_apigateway.RestApi, **kwargs):
        super().__init__(scope, id, **kwargs)

        raw_bucket = aws_s3.Bucket(self,
                                   'DataPipelineRawBucket',
                                   public_read_access=False,
                                   removal_policy=core.RemovalPolicy.DESTROY)

        processed_bucket = aws_s3.Bucket(
            self,
            'DataPipelineProcessedBucket',
            public_read_access=False,
            removal_policy=core.RemovalPolicy.DESTROY)

        # Simple 1 step data pipeline - raw_bucket => lambda_processor => processed_bucket
        ecr_image = aws_lambda.EcrImageCode.from_asset_image(
            directory=os.path.join(os.getcwd(),
                                   "startuptoolbag/app/lambda-s3-processor"))
        lambda_processor = aws_lambda.Function(
            self,
            id="lambdaS3DataProcessor",
            description="Processes Data Landed In S3",
            code=ecr_image,
            handler=aws_lambda.Handler.FROM_IMAGE,
            runtime=aws_lambda.Runtime.FROM_IMAGE,
            environment={
                'RAW_BUCKET': raw_bucket.bucket_name,
                'PROCESSED_BUCKET': processed_bucket.bucket_name
            },
            memory_size=128,
            reserved_concurrent_executions=1,
            timeout=core.Duration.seconds(900))

        notification = aws_s3_notifications.LambdaDestination(lambda_processor)
        raw_bucket.add_event_notification(aws_s3.EventType.OBJECT_CREATED,
                                          notification)

        raw_bucket.grant_read(lambda_processor)
        processed_bucket.grant_read_write(lambda_processor)

        # API Lambda which is backed by data in S3
        # This is essentially an Object Lambda - https://aws.amazon.com/blogs/aws/introducing-amazon-s3-object-lambda-use-your-code-to-process-data-as-it-is-being-retrieved-from-s3/
        # TODO investigate when CDK support ObjectLambda or CDK Solutions Patterns
        ecr_image = aws_lambda.EcrImageCode.from_asset_image(
            directory=os.path.join(os.getcwd(),
                                   "startuptoolbag/app/lambda-s3-server"))
        lambda_handler = aws_lambda.Function(
            self,
            id="lambdaS3Server",
            description="Handle API requests backed by S3",
            code=ecr_image,
            handler=aws_lambda.Handler.FROM_IMAGE,
            runtime=aws_lambda.Runtime.FROM_IMAGE,
            environment={'BUCKET': processed_bucket.bucket_name},
            memory_size=128,
            reserved_concurrent_executions=1,
            timeout=core.Duration.seconds(10))
        processed_bucket.grant_read(lambda_handler)

        foo_r = api_gateway.root.add_resource("data")
        foo_r.add_method('GET',
                         aws_apigateway.LambdaIntegration(lambda_handler))
Beispiel #15
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 stack_log_level: str, back_end_api_name: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below):
        ###################################################
        #######                                     #######
        #######     Customer Info Data Producer     #######
        #######                                     #######
        ###################################################

        # Read Lambda Code
        try:
            with open(
                    "stacks/back_end/serverless_api_producer_cust_info/lambda_src/api_producer_cust_info.py",
                    encoding="utf-8",
                    mode="r") as f:
                data_producer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise

        data_producer_fn = _lambda.Function(
            self,
            "sqsDataProducerFn",
            function_name=f"data_producer_fn_{construct_id}",
            description=
            "Produce customer info messages with attributes and send them as a list",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(data_producer_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(2),
            reserved_concurrent_executions=2,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "APP_ENV": "Production",
                "MAX_MSGS_TO_PRODUCE": "2",
            })

        data_producer_fn_version = data_producer_fn.latest_version
        data_producer_fn_version_alias = _lambda.Alias(
            self,
            "greeterFnAlias",
            alias_name="MystiqueAutomation",
            version=data_producer_fn_version)

        # Create Custom Loggroup for Producer
        data_producer_lg = _logs.LogGroup(
            self,
            "dataProducerLogGroup",
            log_group_name=f"/aws/lambda/{data_producer_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        data_producer_fn.add_permission(
            "restrictLambdaInvocationToFhInOwnAccount",
            principal=_iam.AccountRootPrincipal(),
            action="lambda:InvokeFunction",
            source_account=core.Aws.ACCOUNT_ID)

        # Create API Gateway

        # Add API GW front end for the Lambda
        cust_info_api_stage_01_options = _apigw.StageOptions(
            stage_name="miztiik-cust-info-v1",
            logging_level=_apigw.MethodLoggingLevel.INFO)

        cust_info_api = _apigw.RestApi(
            self,
            "backEnd01Api",
            rest_api_name=f"{back_end_api_name}",
            deploy_options=cust_info_api_stage_01_options,
            endpoint_types=[_apigw.EndpointType.EDGE],
            description=
            f"{GlobalArgs.OWNER}: API Security Automation demonstration using - Mutual TLS"
        )

        cust_info_api_res_root = cust_info_api.root.add_resource("cust-info")
        cust_info_res_v1 = cust_info_api_res_root.add_resource("v1")
        cust_info_v1_data = cust_info_res_v1.add_resource("data")

        cust_info_v1_data_method_get = cust_info_v1_data.add_method(
            http_method="GET",
            request_parameters={
                "method.request.header.InvocationType": True,
                "method.request.path.number": True
            },
            integration=_apigw.LambdaIntegration(handler=data_producer_fn,
                                                 proxy=True))

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            "CustomerInfoDataProducer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={core.Aws.REGION}#/functions/{data_producer_fn.function_name}",
            description="Produce data events and push to SQS Queue.")

        output_2 = core.CfnOutput(
            self,
            "CustomerInfoProducerApi",
            value=f"{cust_info_v1_data.url}",
            description="Use your browser to fetch customer data from this API."
        )
    def __init__(self, scope: core.Construct, id: str, wiki_api_endpoint,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # DynamodDB Table(TODO:Create re-usable data model):
        queries_table = _dynamodb.Table(
            self,
            "queriesDataTable",
            partition_key=_dynamodb.Attribute(
                name="_id", type=_dynamodb.AttributeType.STRING))

        # Create AWS XRay Layer
        aws_xray_layer = _lambda.LayerVersion(
            self,
            'awsXrayLayer',
            code=_lambda.Code.from_asset(
                'lambda_src/layer_code/aws_xray_python_37.zip'),
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_8
            ],
            license=
            f'Mystique LambdaLayer of AWS XRay, Refer to AWS for license.',
            description='Layer to trace AWS Lamba Calls')

        # Create Requests Layer
        requests_layer = _lambda.LayerVersion(
            self,
            'requestsLayer',
            code=_lambda.Code.from_asset(
                'lambda_src/layer_code/requests_python_37.zip'),
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_7, _lambda.Runtime.PYTHON_3_8
            ],
            description='Python requests Layer to make HTTP calls')

        # Defines an AWS Lambda resource
        with open("lambda_src/polyglot_strangler_fig_svc.py",
                  encoding="utf8") as fp:
            polyglot_svc_fn_handler_code = fp.read()

        polyglot_svc_fn = _lambda.Function(
            self,
            id='polyglotStranglerFigService',
            function_name="polyglot_svc_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(polyglot_svc_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(59),
            environment={
                'LD_LIBRARY_PATH': '/opt/python',
                'WIKI_API_ENDPOINT': wiki_api_endpoint,
                'DDB_TABLE_NAME': queries_table.table_name,
                'TRIGGER_RANDOM_FAILURES': 'True'
            },
            layers=[aws_xray_layer, requests_layer],
            tracing=_lambda.Tracing.ACTIVE)
        # Grant Lambda permissions to write to Dynamodb
        queries_table.grant_read_write_data(polyglot_svc_fn)

        ##### PUBLISH TO API GW ######

        # Enable AWS XRay Tracing at API GW
        polyglot_svc_api_stage_options = _apigw.StageOptions(
            stage_name="myst",
            logging_level=_apigw.MethodLoggingLevel.INFO,
            data_trace_enabled=True,
            metrics_enabled=True,
            tracing_enabled=True)

        # Create API Gateway
        api_01 = _apigw.RestApi(self,
                                'polglotApiEndpoint',
                                rest_api_name='mystique-xray-tracer-api',
                                deploy_options=polyglot_svc_api_stage_options)

        v1 = api_01.root.add_resource("polyglot_svc")

        # Add resource for HTTP Endpoint: API Hosted on EC2
        polyglot_svc_api_resource_00 = v1.add_resource('wiki')
        self.polyglot_svc_api_resource_01 = polyglot_svc_api_resource_00.add_resource(
            '{query}')

        polyglot_svc_api_lambda_integration = _apigw.LambdaIntegration(
            handler=polyglot_svc_fn,
            proxy=True,
            integration_responses=[{
                "statusCode": "200"
            }],
            request_parameters={
                "integration.request.path.query": "method.request.path.query"
            })

        self.polyglot_svc_api_resource_01.add_method(
            http_method="GET",
            integration=polyglot_svc_api_lambda_integration,
            method_responses=[{
                "statusCode": "200"
            }],
            request_parameters={
                'method.request.header.Content-Type': False,
                'method.request.path.query': True
            })

        ##### MONITORING ######

        # Now let us create alarms for our Lambda Function
        # alarm is raised there are more than 5(threshold) of the measured metrics in two(datapoint) of the last three seconds(evaluation):
        # Period=60Seconds, Eval=3, Threshold=5
        # metric_errors(): How many invocations of this Lambda fail.
        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        polyglot_svc_fn_error_alarm = polyglot_svc_fn.metric_errors(
        ).create_alarm(
            self,
            "polglotSvcAlarm",
            alarm_name="polyglot_svc_fn_error_alarm",
            threshold=10,
            evaluation_periods=2,
            comparison_operator=_cloudwatch.ComparisonOperator.
            GREATER_THAN_THRESHOLD,
            period=core.Duration.minutes(1),
            treat_missing_data=_cloudwatch.TreatMissingData.NOT_BREACHING)

        # SNS For Alerts for Polyglot Service
        polyglot_svc_support_topic = _sns.Topic(
            self,
            "polyglotSvcTopic",
            display_name="PolyglotSvc",
            topic_name="polyglotSvcSupportTopic")

        # Subscribe Polyglot Service Team Email to topic
        for email in global_args.POLYGLOT_SUPPORT_EMAIL:
            polyglot_svc_support_topic.add_subscription(
                _subs.EmailSubscription(email_address=email))

        # polyglot_svc_support_topic.add_subscription(
        #     _subs.EmailSubscription(global_args.POLYGLOT_SUPPORT_EMAIL))

        # Add the topic to the Alarm
        polyglot_svc_fn_error_alarm.add_alarm_action(
            _cw_actions.SnsAction(polyglot_svc_support_topic))

        # Create CloudWatch Dashboard for Polyglot Service Team
        polyglot_svc_dashboard = _cloudwatch.Dashboard(
            self, id="polyglotSvcDashboard", dashboard_name="Polyglot-Svc")

        polyglot_svc_fn_invocation_metric = polyglot_svc_fn.metric_invocations(
            label="Invocations",
            period=core.Duration.minutes(1),
            statistic="Sum")

        polyglot_svc_dashboard.add_widgets(
            _cloudwatch.AlarmWidget(title="Lambda-Errors",
                                    alarm=polyglot_svc_fn_error_alarm))

        polyglot_svc_dashboard.add_widgets(

            # Lambda Metrics
            # TODO: here monitor all lambda concurrency not just the working one. Limitation from CDK
            # Lambda now supports monitor single lambda concurrency, will change this after CDK support
            _cloudwatch.GraphWidget(
                title="Lambda-all-concurrent",
                left=[
                    polyglot_svc_fn.metric_all_concurrent_executions(
                        statistic="Sum",
                        period=core.Duration.minutes(1),
                        color=_cloudwatch.Color.GREEN)
                ]),
            _cloudwatch.GraphWidget(
                title="Lambda-invocations/errors/throttles",
                left=[
                    polyglot_svc_fn.metric_invocations(
                        statistic="Sum", period=core.Duration.minutes(1)),
                    polyglot_svc_fn.metric_errors(
                        statistic="Sum",
                        period=core.Duration.minutes(1),
                        color=_cloudwatch.Color.RED),
                    polyglot_svc_fn.metric_throttles(
                        statistic="Sum",
                        period=core.Duration.minutes(1),
                        color=_cloudwatch.Color.ORANGE)
                ]),
            _cloudwatch.GraphWidget(title="Lambda-duration",
                                    left=[
                                        polyglot_svc_fn.metric_duration(
                                            statistic="Average",
                                            period=core.Duration.minutes(1))
                                    ]),
            # _cloudwatch.Row(_cloudwatch.TextWidget(markdown="# XRay Profiler KPI")),
            # _cloudwatch.Row(_cloudwatch.Spacer()),
            # DynamoDB Metrics
            _cloudwatch.Row(
                _cloudwatch.GraphWidget(
                    title="DynamoDB-Write-Capacity-Units",
                    left=[
                        queries_table.metric_consumed_write_capacity_units(
                            statistic="Sum", period=core.Duration.minutes(1))
                    ]),
                _cloudwatch.GraphWidget(
                    title="DynamoDB-Read-Capacity-Units",
                    left=[
                        queries_table.metric_consumed_read_capacity_units(
                            statistic="Sum", period=core.Duration.minutes(1))
                    ])),
        )

        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            'PolyglotServiceApiUrl',
            value=f'{self.polyglot_svc_api_resource_01.url}',
            description=
            f'Call the polyglot API, replace <query> with your search term')
Beispiel #17
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        s3_org_data = _s3.Bucket(self,
                                 ORIGINAL_DATA_BUCKET_NAME,
                                 bucket_name=ORIGINAL_DATA_BUCKET_NAME,
                                 removal_policy=core.RemovalPolicy.RETAIN)
        s3_transformed_data = _s3.Bucket(
            self,
            TRANSFORMED_DATA_BUCKET_NAME,
            bucket_name=TRANSFORMED_DATA_BUCKET_NAME,
            removal_policy=core.RemovalPolicy.RETAIN)

        # title-read
        s3_deployment.BucketDeployment(
            self,
            "s3-deployment-{}".format(TITLE_READ),
            sources=[
                s3_deployment.Source.asset("data/{}/".format(TITLE_READ))
            ],
            destination_bucket=s3_org_data,
            destination_key_prefix="{}/".format(TITLE_READ))
        # title
        s3_deployment.BucketDeployment(
            self,
            "s3-deployment-{}".format(TITLE),
            sources=[s3_deployment.Source.asset("data/{}/".format(TITLE))],
            destination_bucket=s3_org_data,
            destination_key_prefix="{}/".format(TITLE))
        # user
        s3_deployment.BucketDeployment(
            self,
            "s3-deployment-{}".format(USER),
            sources=[s3_deployment.Source.asset("data/{}/".format(USER))],
            destination_bucket=s3_org_data,
            destination_key_prefix="{}/".format(USER))

        statement = iam.PolicyStatement(actions=[
            "s3:*", "glue:*", "iam:ListRolePolicies", "iam:GetRole",
            "iam:GetRolePolicy"
        ],
                                        resources=["*"])
        write_to_s3_policy = iam.PolicyDocument(statements=[statement])

        glue_role = iam.Role(
            self,
            'GlueCrawlerRole-dna',
            role_name='GlueCrawlerRole-dna',
            inline_policies=[write_to_s3_policy],
            assumed_by=iam.ServicePrincipal('glue.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSGlueServiceRole')
            ])

        #TODO add IAM role for ctas lambda

        dna_database = glue.Database(self,
                                     "dna-glue-database-id",
                                     database_name=GLUE_DATABASE_NAME)

        # create glue table
        title_read_table = glue.Table(
            self,
            "{}-table-id".format(TITLE_READ),
            table_name="{}_table".format(TITLE_READ).replace("-", "_"),
            database=dna_database,
            columns=[{
                "name": "USER_ID",
                "type": glue.Schema.STRING
            }, {
                "name": "ITEM_ID",
                "type": glue.Schema.STRING
            }, {
                "name": "TIMESTAMP",
                "type": glue.Schema.BIG_INT
            }, {
                "name": "TITLE",
                "type": glue.Schema.STRING
            }, {
                "name": "EVENT_TYPE",
                "type": glue.Schema.STRING
            }],
            data_format=glue.DataFormat.CSV,
            bucket=s3_org_data,
            s3_prefix=TITLE_READ)

        title_table = glue.Table(self,
                                 "{}-table-id".format(TITLE),
                                 table_name="{}_table".format(TITLE).replace(
                                     "-", "_"),
                                 database=dna_database,
                                 columns=[{
                                     "name": "ITEM_ID",
                                     "type": glue.Schema.STRING
                                 }, {
                                     "name": "CREATION_TIMESTAMP",
                                     "type": glue.Schema.BIG_INT
                                 }, {
                                     "name": "TITLE",
                                     "type": glue.Schema.STRING
                                 }, {
                                     "name": "TAG",
                                     "type": glue.Schema.STRING
                                 }],
                                 data_format=glue.DataFormat.CSV,
                                 bucket=s3_org_data,
                                 s3_prefix=TITLE)

        user_table = glue.Table(self,
                                "{}-table-id".format(USER),
                                table_name="{}_table".format(USER).replace(
                                    "-", "_"),
                                database=dna_database,
                                columns=[
                                    {
                                        "name": "USER_ID",
                                        "type": glue.Schema.STRING
                                    },
                                    {
                                        "name": "NAME",
                                        "type": glue.Schema.STRING
                                    },
                                    {
                                        "name": "EMAIL",
                                        "type": glue.Schema.STRING
                                    },
                                    {
                                        "name": "GENDER",
                                        "type": glue.Schema.STRING,
                                        "categorical": True
                                    },
                                    {
                                        "name": "AGE",
                                        "type": glue.Schema.BIG_INT,
                                        "categorical": True
                                    },
                                ],
                                data_format=glue.DataFormat.CSV,
                                bucket=s3_org_data,
                                s3_prefix=USER)

        _athena.CfnWorkGroup(self,
                             "athena_workgroup_id",
                             name=ATHENA_WORKGROUP)

        ctas_lambda_trigger = _event.Rule(
            self,
            "ctas-lambda-trigger-event-id",
            rule_name="ctas-lambda-trigger-event",
            schedule=_event.Schedule.cron(minute="10", hour="*"))

        s3_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            # resources = [s3_bucket.bucket_arn, "{}/*".format(s3_bucket.bucket_arn)],
            resources=["*"],
            actions=["s3:*"])
        athena_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=["*"],
            actions=["athena:StartQueryExecution", "glue:*"])

        ctas_lambda_func = _lambda.Function(
            self,
            "CTAS_query",
            function_name="CTAS_query",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("src/lambda"),
            handler="ctas_lambda.lambda_handler",
            description="CTAS query to transform AVRO file, batch execution",
            environment={
                "BUCKET_NAME": s3_transformed_data.bucket_name,
                "DATABASE_NAME": GLUE_DATABASE_NAME,
                "ATHENA_WORKGROUP": ATHENA_WORKGROUP
            },
            timeout=core.Duration.minutes(3))
        ctas_lambda_func.add_to_role_policy(s3_statement)
        ctas_lambda_func.add_to_role_policy(athena_statement)

        ctas_lambda_trigger.add_target(
            _target.LambdaFunction(ctas_lambda_func))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Add your stack resources below):
        # Create Kinesis Data Stream
        stream_data_pipe = _kinesis.Stream(
            self,
            "streamDataPipe",
            retention_period=core.Duration.hours(24),
            shard_count=1,
            stream_name="data_pipe")

        # Create an S3 Bucket for storing streaming data events
        stream_data_store = _s3.Bucket(
            self, "streamDataLake", removal_policy=core.RemovalPolicy.DESTROY)

        # Read Lambda Code
        try:
            with open(
                    "advanced_use_cases/lambda_src/stream_record_consumer.py",
                    mode="r") as f:
                stream_consumer_fn_code = f.read()
        except OSError:
            print("Unable to read lambda function code")

        # Deploy the lambda function
        stream_consumer_fn = _lambda.Function(
            self,
            "streamConsumerFn",
            function_name="stream_consumer_fn",
            description=
            "Process streaming data events from kinesis and store in S3",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(stream_consumer_fn_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "BUCKET_NAME": f"{stream_data_store.bucket_name}"
            })

        # Update Lambda Permissions To Use Stream
        stream_data_pipe.grant_read(stream_consumer_fn)

        # Add permissions to lambda to write to S3
        roleStmt1 = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[f"{stream_data_store.bucket_arn}/*"],
            actions=["s3:PutObject"])
        roleStmt1.sid = "AllowLambdaToWriteToS3"
        stream_consumer_fn.add_to_role_policy(roleStmt1)

        # Create Custom Loggroup for Consumer
        stream_consumer_lg = _logs.LogGroup(
            self,
            "streamConsumerLogGroup",
            log_group_name=f"/aws/lambda/{stream_consumer_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Create New Kinesis Event Source
        stream_data_pipe_event_source = _lambda_event_sources.KinesisEventSource(
            stream=stream_data_pipe,
            starting_position=_lambda.StartingPosition.LATEST,
            batch_size=1)

        # Attach Kinesis Event Source To Lambda
        stream_consumer_fn.add_event_source(stream_data_pipe_event_source)

        ########################################
        #######                          #######
        #######   Stream Data Producer   #######
        #######                          #######
        ########################################

        # Read Lambda Code
        try:
            with open("advanced_use_cases/lambda_src/stream_data_producer.py",
                      mode="r") as f:
                data_producer_fn_code = f.read()
        except OSError:
            print("Unable to read lambda function code")

        # Deploy the lambda function
        data_producer_fn = _lambda.Function(
            self,
            "streamDataProducerFn",
            function_name="data_producer_fn",
            description=
            "Produce streaming data events and push to Kinesis stream",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(data_producer_fn_code),
            timeout=core.Duration.seconds(60),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": "INFO",
                "STREAM_NAME": f"{stream_data_pipe.stream_name}"
            })

        # Grant our Lambda Producer privileges to write to Kinesis Data Stream
        stream_data_pipe.grant_read_write(data_producer_fn)

        # Create Custom Loggroup for Producer
        data_producer_lg = _logs.LogGroup(
            self,
            "dataProducerLogGroup",
            log_group_name=f"/aws/lambda/{data_producer_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)
Beispiel #19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # 创建sts lambda访问角色
        #  action -> statement -> policy -> role -> attach lambda
        actions = [
            "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents",
            "sts:AssumeRole"
        ]

        policyStatement = PolicyStatement(actions=actions, effect=Effect.ALLOW)
        policyStatement.add_all_resources()

        policy_name = "{}-policy".format(Constant.PROJECT_NAME)
        sts_policy = Policy(self, policy_name, policy_name=policy_name)

        sts_policy.add_statements(policyStatement)

        role_name = "{}-role".format(Constant.PROJECT_NAME)
        access_role = Role(self,
                           role_name,
                           role_name=role_name,
                           assumed_by=ServicePrincipal('lambda.amazonaws.com'))

        sts_policy.attach_to_role(access_role)

        # 创建thum lambda访问角色
        #  action -> statement -> policy -> role
        thum_actions = [
            "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents",
            "s3:PutObject"
        ]

        thum_policyStatement = PolicyStatement(actions=thum_actions,
                                               effect=Effect.ALLOW)
        thum_policyStatement.add_all_resources()

        thum_policy_name = "{}-policy-thum".format(Constant.PROJECT_NAME)
        thum_policy = Policy(self,
                             thum_policy_name,
                             policy_name=thum_policy_name)

        thum_policy.add_statements(thum_policyStatement)

        thum_role_name = "{}-role-thum".format(Constant.PROJECT_NAME)
        thum_access_role = Role(
            self,
            thum_role_name,
            role_name=thum_role_name,
            assumed_by=ServicePrincipal('lambda.amazonaws.com'))

        thum_policy.attach_to_role(thum_access_role)

        # 创建S3 put的角色
        #  action -> statement -> policy -> role
        s3_actions = [
            "s3:PutObject",
            "s3:GetObject",
            "s3:ListBucket",
            "s3:PutObjectTagging",
            "s3:PutObjectAcl",
        ]
        s3_policyStatement = PolicyStatement(actions=s3_actions,
                                             effect=Effect.ALLOW)
        s3_policyStatement.add_all_resources()

        s3_policy_name = "{}-policy-s3".format(Constant.PROJECT_NAME)
        s3_policy = Policy(self, s3_policy_name, policy_name=s3_policy_name)

        s3_policy.add_statements(s3_policyStatement)

        s3_role_name = "{}-role-s3".format(Constant.PROJECT_NAME)
        s3_access_role = Role(self,
                              s3_role_name,
                              role_name=s3_role_name,
                              assumed_by=ArnPrincipal(access_role.role_arn))

        s3_policy.attach_to_role(s3_access_role)

        # 创建STS lambda
        sts_lambda = _lambda.Function(
            self,
            'sts',
            function_name='sts',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='auth.handler',
            timeout=Duration.minutes(1),
            role=access_role,
        )
        sts_lambda.add_environment("role_to_assume_arn",
                                   s3_access_role.role_arn)

        base_api = apigw.RestApi(
            self,
            'Endpoint',
            endpoint_types=[EndpointType.REGIONAL],
        )
        example_entity = base_api.root.add_resource('auth')
        example_entity_lambda_integration = apigw.LambdaIntegration(
            sts_lambda,
            proxy=False,
            integration_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin':
                    "'*'",
                }
            }])
        example_entity.add_method(
            'GET',
            example_entity_lambda_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        self.add_cors_options(example_entity)

        # 创建缩略图lambda
        layer_cv2 = _lambda.LayerVersion(
            self,
            'cv2',
            code=_lambda.Code.from_bucket(
                s3.Bucket.from_bucket_name(self,
                                           "cdk-data-layer",
                                           bucket_name='nowfox'),
                'cdk-data/cv2.zip'),
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
        )

        lambda_thum = _lambda.Function(
            self,
            'thum',
            function_name='thum',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='thum.handler',
            timeout=Duration.minutes(1),
            role=thum_access_role,
        )
        lambda_thum.add_environment("frame_second", "3")
        lambda_thum.add_layers(layer_cv2)

        # 创建存储上传图片的bucket
        s3_bucket_name = "{}-{}".format("upload", self._get_UUID(4))
        s3_upload = s3.Bucket(
            self,
            id=s3_bucket_name,
            bucket_name=s3_bucket_name,
            # access_control=s3.BucketAccessControl.PUBLIC_READ,#不建议使用这个,这个会有list权限,下面这个没有list权限
            public_read_access=True,
            removal_policy=core.RemovalPolicy.
            DESTROY,  # TODO:  destroy for test
            # removal_policy=core.RemovalPolicy.RETAIN
        )
        notification = aws_s3_notifications.LambdaDestination(lambda_thum)
        s3_filter = s3.NotificationKeyFilter(suffix=".mp4")
        s3_upload.add_event_notification(s3.EventType.OBJECT_CREATED_PUT,
                                         notification, s3_filter)
        s3_upload.add_cors_rule(allowed_methods=[
            HttpMethods.POST, HttpMethods.PUT, HttpMethods.GET
        ],
                                allowed_origins=["*"],
                                allowed_headers=["*"])
        '''
        #创建上传lambda
        lambda_upload = _lambda.Function(
            self, 'upload',function_name='upload',
            runtime=_lambda.Runtime.JAVA_8,
            code=_lambda.Code.from_bucket(s3.Bucket.from_bucket_name(self, "cdk-data-upload-jar", bucket_name='nowfox'),
                                          'cdk-data/fileupload.jar'),
            handler='fileupload.FileUploadFunctionHandler::handleRequest',
            timeout=Duration.minutes(1),
            memory_size=512,
            role=access_role,
        )
        lambda_upload.add_environment("bucket", s3_bucket_name)
        lambda_upload.add_environment("region", "cn-northwest-1")
        '''
        core.CfnOutput(self,
                       "authUrl",
                       value=base_api.url + "auth",
                       description="authUrl")
        core.CfnOutput(self,
                       "S3BucketName",
                       value=s3_bucket_name,
                       description="S3BucketName")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        lambda_greetings = _lambda.Function(self,
                                            'GreetingsHandler',
                                            runtime=_lambda.Runtime.PYTHON_3_7,
                                            code=_lambda.Code.asset('lambda'),
                                            handler='greetings.greet',
                                            memory_size=128)
        api_stage_options = api_gateway.StageOptions(stage_name="dev")
        api = api_gateway.LambdaRestApi(self,
                                        'Endpoint',
                                        handler=lambda_greetings,
                                        deploy_options=api_stage_options)

        # create dynamo table
        demo_table = aws_dynamodb.Table(
            self,
            "ToDos_table",
            partition_key=aws_dynamodb.Attribute(
                name="id", type=aws_dynamodb.AttributeType.STRING))
        get_todos_handler = _lambda.Function(
            self,
            'GetTodos',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='todos.get',
            memory_size=128)
        get_todos_handler.add_environment("TABLE_NAME", demo_table.table_name)
        demo_table.grant_read_data(get_todos_handler)

        get_todos_integration = api_gateway.LambdaIntegration(
            get_todos_handler)

        #/todos GET = todo listeleyecek
        todos = api.root.add_resource("todos")
        todos.add_method("GET", get_todos_integration)

        get_todo_handler = _lambda.Function(self,
                                            'GetTodo',
                                            runtime=_lambda.Runtime.PYTHON_3_7,
                                            code=_lambda.Code.asset('lambda'),
                                            handler='todo.get',
                                            memory_size=128)

        get_todo_handler.add_environment("TABLE_NAME", demo_table.table_name)
        demo_table.grant_read_data(get_todo_handler)

        put_todo_handler = _lambda.Function(self,
                                            'PutTodo',
                                            runtime=_lambda.Runtime.PYTHON_3_7,
                                            code=_lambda.Code.asset('lambda'),
                                            handler='todo.put',
                                            memory_size=128)
        put_todo_handler.add_environment("TABLE_NAME", demo_table.table_name)
        demo_table.grant_write_data(put_todo_handler)

        delete_todo_handler = _lambda.Function(
            self,
            'DeleteTodo',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset('lambda'),
            handler='todo.delete',
            memory_size=128)

        delete_todo_handler.add_environment("TABLE_NAME",
                                            demo_table.table_name)
        demo_table.grant_write_data(delete_todo_handler)

        get_todo_integration = api_gateway.LambdaIntegration(get_todo_handler)
        put_todo_integration = api_gateway.LambdaIntegration(put_todo_handler)
        delete_todo_integration = api_gateway.LambdaIntegration(
            delete_todo_handler)

        #/todo path
        todo = api.root.add_resource("todo")

        #/todo/{todoId}
        todo_gd = todo.add_resource("{todoId}")
        #/todo/{todoId} GET
        todo_gd.add_method("GET", get_todo_integration)
        #/todo/{todoId} DELETE = todo silecek
        todo_gd.add_method("DELETE", delete_todo_integration)

        #/todo/new
        todo_p = todo.add_resource("new")
        todo_p.add_method("PUT", put_todo_integration)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        bucket_with_sns = s3.Bucket(self,
                                    "bucket-with-sns-integration",
                                    removal_policy=core.RemovalPolicy.DESTROY)

        bucket_with_lambda = s3.Bucket(
            self,
            "bucket-with-lambda-integration",
            removal_policy=core.RemovalPolicy.DESTROY)

        exchange_topic = sns.Topic(self, "lambda-info-topic")

        bucket_with_sns.add_event_notification(
            event=s3.EventType.OBJECT_CREATED,
            dest=s3_notifications.SnsDestination(exchange_topic))

        measurement_table = dynamodb.Table(
            self,
            "measurement-table",
            partition_key=dynamodb.Attribute(
                name="PK", type=dynamodb.AttributeType.STRING),
            sort_key=dynamodb.Attribute(name="SK",
                                        type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY)

        s3_event_generator = _lambda.Function(
            self,
            "s3-event-generator",
            code=_lambda.Code.from_asset(
                path=os.path.join(os.path.dirname(__file__), "..", "src")),
            environment={
                "BUCKET_WITH_LAMBDA": bucket_with_lambda.bucket_name,
                "BUCKET_WITH_SNS": bucket_with_sns.bucket_name,
            },
            handler="s3_event_generator.lambda_handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(300),
            memory_size=1024,
        )

        bucket_with_lambda.grant_write(s3_event_generator)
        bucket_with_sns.grant_write(s3_event_generator)

        measure_lambda = _lambda.Function(
            self,
            "measure-lambda",
            code=_lambda.Code.from_asset(
                path=os.path.join(os.path.dirname(__file__), "..", "src")),
            environment={"MEASUREMENT_TABLE": measurement_table.table_name},
            handler="measure_lambda.lambda_handler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(50),
            memory_size=1024,
        )

        measurement_table.grant_read_write_data(measure_lambda)

        measure_lambda.add_event_source(
            lambda_event_sources.SnsEventSource(exchange_topic))

        measure_lambda.add_event_source(
            lambda_event_sources.S3EventSource(
                bucket=bucket_with_lambda,
                events=[s3.EventType.OBJECT_CREATED]))

        ssm.StringParameter(self,
                            "bucket-with-sns-parameter",
                            string_value=bucket_with_sns.bucket_name,
                            parameter_name=BUCKET_WITH_SNS_PARAMETER)

        ssm.StringParameter(self,
                            "bucket-with-lambda-parameter",
                            string_value=bucket_with_lambda.bucket_name,
                            parameter_name=BUCKET_WITH_LAMBDA_PARAMETER)

        ssm.StringParameter(self,
                            "measurement-table-parameter",
                            string_value=measurement_table.table_name,
                            parameter_name=MEASUREMENT_TABLE_PARAMETER)

        ssm.StringParameter(self,
                            "generator-function-name-parameter",
                            string_value=s3_event_generator.function_name,
                            parameter_name=GENERATOR_FUNCTION_NAME_PARAMETER)
Beispiel #22
0
    def __init__(
        self,
        scope: cdk.Construct,
        id: str,
        cognito_user_pool: cognito.UserPool,
        s3_bucket_name: str,
        create_configuration_lambda_role_arn: str,
        redis: ec.CfnCacheCluster,
        domain_name: str,
        **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        config_yaml = yaml.load(open("config.yaml"), Loader=yaml.FullLoader)
        spoke_accounts = config_yaml.get("spoke_accounts", [])

        cognito_user_pool_client = cognito.UserPoolClient(
            self,
            "UserPoolClient",
            user_pool=cognito_user_pool,
            generate_secret=True,
            supported_identity_providers=[
                cognito.UserPoolClientIdentityProvider.COGNITO
            ],
            prevent_user_existence_errors=True,
            o_auth=cognito.OAuthSettings(
                callback_urls=[
                    "https://" + domain_name + "/auth",
                    "https://" + domain_name + "/oauth2/idpresponse",
                ],
                logout_urls=["https://" + domain_name + "/logout"],
                flows=cognito.OAuthFlows(
                    authorization_code_grant=True, implicit_code_grant=True
                ),
                scopes=[cognito.OAuthScope.OPENID, cognito.OAuthScope.EMAIL],
            ),
            auth_flows=cognito.AuthFlow(user_password=True, user_srp=True),
        )

        describe_cognito_user_pool_client = cr.AwsCustomResource(
            self,
            "UserPoolClientIDResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE
            ),
            on_create=cr.AwsSdkCall(
                service="CognitoIdentityServiceProvider",
                action="describeUserPoolClient",
                parameters={
                    "UserPoolId": cognito_user_pool.user_pool_id,
                    "ClientId": cognito_user_pool_client.user_pool_client_id,
                },
                physical_resource_id=cr.PhysicalResourceId.of(
                    cognito_user_pool_client.user_pool_client_id
                ),
            ),
            install_latest_aws_sdk=True,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        cognito_user_pool_client_secret = (
            describe_cognito_user_pool_client.get_response_field(
                "UserPoolClient.ClientSecret"
            )
        )

        imported_create_configuration_lambda_role = iam.Role.from_role_arn(
            self,
            "ImportedCreateConfigurationFileLambdaRole",
            role_arn=create_configuration_lambda_role_arn,
        )

        jwt_secret = config_yaml["jwt_secret"]

        config_secret_dict = {
            "oidc_secrets": {
                "client_id": cognito_user_pool_client.user_pool_client_id,
                "secret": cognito_user_pool_client_secret,
                "client_scope": ["email", "openid"],
            },
            "jwt_secret": jwt_secret,
        }

        config_secret_yaml = yaml.dump(
            config_secret_dict,
            explicit_start=True,
            default_flow_style=False,
        )

        config_secret = cr.AwsCustomResource(
            self,
            "ConfigSecretResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE
            ),
            on_update=cr.AwsSdkCall(
                service="SecretsManager",
                action="updateSecret",
                parameters={
                    "SecretId": CONFIG_SECRET_NAME,
                    "SecretString": config_secret_yaml,
                },
                physical_resource_id=cr.PhysicalResourceId.from_response("Name"),
            ),
            on_create=cr.AwsSdkCall(
                service="SecretsManager",
                action="createSecret",
                parameters={
                    "Name": CONFIG_SECRET_NAME,
                    "Description": "Sensitive configuration parameters for ConsoleMe",
                    "SecretString": config_secret_yaml,
                },
                physical_resource_id=cr.PhysicalResourceId.from_response("Name"),
            ),
            on_delete=cr.AwsSdkCall(
                service="SecretsManager",
                action="deleteSecret",
                parameters={
                    "SecretId": CONFIG_SECRET_NAME,
                    "ForceDeleteWithoutRecovery": True,
                },
            ),
            install_latest_aws_sdk=True,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        create_configuration_lambda = lambda_.Function(
            self,
            "CreateConfigurationFileLambda",
            code=lambda_.Code.from_asset("resources/create_config_lambda"),
            handler="index.handler",
            timeout=cdk.Duration.seconds(30),
            layers=[create_dependencies_layer(self, "create_config_lambda")],
            runtime=lambda_.Runtime.PYTHON_3_8,
            role=imported_create_configuration_lambda_role,
            environment={
                "DEPLOYMENT_BUCKET": s3_bucket_name,
                "OIDC_METADATA_URL": "https://cognito-idp."
                + self.region
                + ".amazonaws.com/"
                + cognito_user_pool.user_pool_id
                + "/.well-known/openid-configuration",
                "REDIS_HOST": redis.attr_redis_endpoint_address,
                "SES_IDENTITY_ARN": "arn:aws:ses:"
                + self.region
                + ":"
                + self.account
                + ":identity/"
                + domain_name,
                "SUPPORT_CHAT_URL": "https://discord.gg/nQVpNGGkYu",
                "APPLICATION_ADMIN": "consoleme_admin",
                "ACCOUNT_NUMBER": self.account,
                "ISSUER": domain_name,
                "SPOKE_ACCOUNTS": ",".join(spoke_accounts),
                "CONFIG_SECRET_NAME": CONFIG_SECRET_NAME,
            },
        )

        create_configuration_resource_provider = cr.Provider(
            self,
            "CreateConfigurationFileProvider",
            on_event_handler=create_configuration_lambda,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        create_configuration_lambda_resource = cdk.CustomResource(
            self,
            "CreateConfigurationFile",
            service_token=create_configuration_resource_provider.service_token,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            properties={"UUID": str(uuid4())},
        )

        create_configuration_lambda_resource.node.add_dependency(config_secret)
    def __init__(self, scope: core.Construct, construct_id: str, configs, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        bucket = s3.Bucket(self, "ScraperBucket-{}".format(configs['stage']),
            bucket_name = 'scraper-{}-{}'.format(configs['stage'],configs['aws_account'])
        )

        scraper_fn = aws_lambda.Function(self, "ScraperFn-{}".format(configs['stage']),
            function_name = "scraper-{}".format(configs['stage']),
            code=aws_lambda.Code.from_asset("src/fanduelscraper",
                bundling={
                    "image": aws_lambda.Runtime.PYTHON_3_8.bundling_docker_image,
                    "command": ["bash", "-c", 
                                "pip install -r requirements.txt -t /asset-output && cp -au index.py src /asset-output"
                            ]
                }
            ),
            handler="index.scraper",
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            timeout = core.Duration.seconds(300),
            memory_size = 1048,
            environment={
                "BUCKET_NAME": bucket.bucket_name
            }
 
        )

        scraper_handler_fn = aws_lambda.Function(self, "ScraperHandlerFn-{}".format(configs['stage']),
            function_name = "scraper-handler-{}".format(configs['stage']),
            code=aws_lambda.Code.from_asset("src/fanduelscraper",
                bundling={
                    "image": aws_lambda.Runtime.PYTHON_3_8.bundling_docker_image,
                    "command": ["bash", "-c", 
                                "pip install -r requirements.txt -t /asset-output && cp -au index.py src /asset-output"
                            ]
                }
            ),
            handler="index.scraper_handler",
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            timeout = core.Duration.seconds(300),
            memory_size = 1048,
            environment={
                "SCRAPER_FN": scraper_fn.function_name
            }
        )

        #Make sure to give lambda permission to write to S3 bucket
        bucket.grant_write(scraper_fn)
        scraper_fn.grant_invoke(scraper_handler_fn)

        if(configs['stage'] == 'prod'):
            rule = events.Rule(self, "ScraperCronRule-{}".format(configs['stage']),
                schedule=events.Schedule.cron(
                    minute='0',
                    hour='*',
                    month='*',
                    week_day='*',
                    year='*'),
            )
            rule.add_target(targets.LambdaFunction(scraper_handler_fn))
Beispiel #24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        demo_vpc = aws_ec2.Vpc(self,
                               "network",
                               cidr="10.10.0.0/16",
                               max_azs=2,
                               subnet_configuration=[])

        demo_subnets = []
        demo_subnets.append(
            aws_ec2.Subnet(self,
                           'sbn-demo-1',
                           availability_zone=demo_vpc.availability_zones[0],
                           vpc_id=demo_vpc.vpc_id,
                           cidr_block='10.10.0.0/25'))
        demo_subnets.append(
            aws_ec2.Subnet(self,
                           'sbn-demo-2',
                           availability_zone=demo_vpc.availability_zones[1],
                           vpc_id=demo_vpc.vpc_id,
                           cidr_block='10.10.0.128/25'))

        demo_vpc.add_interface_endpoint(
            'secretmanager',
            service=aws_ec2.InterfaceVpcEndpointAwsService.SECRETS_MANAGER,
            subnets=aws_ec2.SubnetSelection(subnets=demo_subnets))

        db_subnet_group = aws_rds.SubnetGroup(
            self,
            'sbng-demo-rds',
            description='demo db subnet group',
            vpc=demo_vpc,
            removal_policy=core.RemovalPolicy.DESTROY,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=demo_subnets))

        db_security_group = aws_ec2.SecurityGroup(self,
                                                  'sg-demo-rds',
                                                  vpc=demo_vpc)

        db_security_group.add_ingress_rule(
            peer=aws_ec2.Peer.ipv4('10.10.0.0/16'),
            connection=aws_ec2.Port(
                protocol=aws_ec2.Protocol.TCP,
                string_representation="to allow from the vpc internal",
                from_port=3306,
                to_port=3306))

        mysql_instance = aws_rds.DatabaseInstance(
            self,
            'mys-demo-rds',
            engine=aws_rds.DatabaseInstanceEngine.MYSQL,
            vpc=demo_vpc,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=demo_subnets),
            security_groups=[db_security_group],
            iam_authentication=True)

        db_secret = mysql_instance.secret

        role_init_db = aws_iam.Role(
            self,
            'cmd_role_init_src_db',
            assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"))

        lambda_base_policy_statement = aws_iam.PolicyStatement(
            resources=['*'],
            actions=[
                'logs:CreateLogGroup',
                'logs:CreateLogStream',
                'logs:PutLogEvents',
                "ec2:CreateNetworkInterface",
                "ec2:DescribeNetworkInterfaces",
                "ec2:DeleteNetworkInterface",
            ])

        role_init_db.add_to_policy(lambda_base_policy_statement)

        role_init_db.add_to_policy(
            aws_iam.PolicyStatement(resources=[db_secret.secret_arn],
                                    actions=[
                                        "secretsmanager:GetResourcePolicy",
                                        "secretsmanager:GetSecretValue",
                                        "secretsmanager:DescribeSecret",
                                        "secretsmanager:ListSecretVersionIds"
                                    ]))

        db_user = '******'

        func_init_db = aws_lambda.Function(
            self,
            'func_init_db',
            function_name='demo-rds_func_init_db',
            handler='handler.init',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.asset('./app_stack/func_init_db'),
            role=role_init_db,
            timeout=core.Duration.seconds(10),
            allow_public_subnet=False,
            vpc=demo_vpc,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=demo_subnets),
            environment={
                'db_secret': db_secret.secret_name,
                'db_user': db_user
            })

        # When the role_name specified as static value, it could be occurring with KMS related exception.
        # It's because re-created lambda function with static name of role would be refer to the role which is not exsisted.
        # https://github.com/serverless/examples/issues/279
        role_test_db = aws_iam.Role(
            self,
            'demo_role_test_db',
            assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"),
            role_name=db_user)
        role_test_db.add_to_policy(lambda_base_policy_statement)
        role_test_db.add_to_policy(
            aws_iam.PolicyStatement(resources=[
                'arn:aws:rds-db:%s:%s:dbuser:*/*' % (REGION, ACCOUNT)
            ],
                                    actions=[
                                        "rds-db:connect",
                                    ]))

        role_test_db.add_to_policy(
            aws_iam.PolicyStatement(resources=[db_secret.secret_arn],
                                    actions=[
                                        "secretsmanager:GetResourcePolicy",
                                        "secretsmanager:GetSecretValue",
                                        "secretsmanager:DescribeSecret",
                                        "secretsmanager:ListSecretVersionIds"
                                    ]))

        func_test_db = aws_lambda.Function(
            self,
            'func_test_db',
            function_name='demo-rds_test_iam_auth',
            handler='handler.init',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.asset('./app_stack/func_test_db'),
            role=role_test_db,
            timeout=core.Duration.seconds(10),
            allow_public_subnet=False,
            vpc=demo_vpc,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=demo_subnets),
            environment={
                'db_secret': db_secret.secret_name,
                'db_user': db_user
            })
Beispiel #25
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        dataset_metadata_filename: str,
        memory: int = 1024,
        timeout: int = 30,
        concurrent: int = 100,
        code_dir: str = "./",
        **kwargs: Any,
    ) -> None:
        """Define stack."""
        super().__init__(scope, id, **kwargs)

        # add cache
        if config.VPC_ID:
            vpc = ec2.Vpc.from_lookup(
                self,
                f"{id}-vpc",
                vpc_id=config.VPC_ID,
            )
        else:
            vpc = ec2.Vpc(self, f"{id}-vpc")

        sb_group = escache.CfnSubnetGroup(
            self,
            f"{id}-subnet-group",
            description=f"{id} subnet group",
            subnet_ids=[sb.subnet_id for sb in vpc.private_subnets],
        )

        lambda_function_security_group = ec2.SecurityGroup(self,
                                                           f"{id}-lambda-sg",
                                                           vpc=vpc)
        lambda_function_security_group.add_egress_rule(
            ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ec2.Protocol("ALL"),
                                string_representation=""),
            description="Allow lambda security group all outbound access",
        )

        cache_security_group = ec2.SecurityGroup(self,
                                                 f"{id}-cache-sg",
                                                 vpc=vpc)

        cache_security_group.add_ingress_rule(
            lambda_function_security_group,
            connection=ec2.Port(protocol=ec2.Protocol("ALL"),
                                string_representation=""),
            description=
            "Allow Lambda security group access to Cache security group",
        )

        cache = escache.CfnCacheCluster(
            self,
            f"{id}-cache",
            cache_node_type=config.CACHE_NODE_TYPE,
            engine=config.CACHE_ENGINE,
            num_cache_nodes=config.CACHE_NODE_NUM,
            vpc_security_group_ids=[cache_security_group.security_group_id],
            cache_subnet_group_name=sb_group.ref,
        )

        logs_access = iam.PolicyStatement(
            actions=[
                "logs:CreateLogGroup",
                "logs:CreateLogStream",
                "logs:PutLogEvents",
            ],
            resources=["*"],
        )
        ec2_network_access = iam.PolicyStatement(
            actions=[
                "ec2:CreateNetworkInterface",
                "ec2:DescribeNetworkInterfaces",
                "ec2:DeleteNetworkInterface",
            ],
            resources=["*"],
        )

        lambda_env = DEFAULT_ENV.copy()
        lambda_env.update(
            dict(
                MODULE_NAME="dashboard_api.main",
                VARIABLE_NAME="app",
                WORKERS_PER_CORE="1",
                LOG_LEVEL="error",
                MEMCACHE_HOST=cache.attr_configuration_endpoint_address,
                MEMCACHE_PORT=cache.attr_configuration_endpoint_port,
                DATASET_METADATA_FILENAME=dataset_metadata_filename,
            ))

        lambda_function_props = dict(
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=self.create_package(code_dir),
            handler="handler.handler",
            memory_size=memory,
            timeout=core.Duration.seconds(timeout),
            environment=lambda_env,
            security_groups=[lambda_function_security_group],
            vpc=vpc,
        )

        if concurrent:
            lambda_function_props[
                "reserved_concurrent_executions"] = concurrent

        lambda_function = aws_lambda.Function(self, f"{id}-lambda",
                                              **lambda_function_props)

        lambda_function.add_to_role_policy(s3_full_access_to_data_bucket)
        lambda_function.add_to_role_policy(logs_access)
        lambda_function.add_to_role_policy(ec2_network_access)

        # defines an API Gateway Http API resource backed by our "dynamoLambda" function.
        api = apigw.HttpApi(
            self,
            f"{id}-endpoint",
            default_integration=apigw_integrations.LambdaProxyIntegration(
                handler=lambda_function),
        )
        core.CfnOutput(self, "API Endpoint", value=api.url)
Beispiel #26
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # create s3 bucket
        s3_Bucket = s3.Bucket(self,
                              "AWS-Cookbook-Recipe-403",
                              removal_policy=RemovalPolicy.DESTROY)

        aws_s3_deployment.BucketDeployment(
            self,
            'S3Deployment',
            destination_bucket=s3_Bucket,
            sources=[aws_s3_deployment.Source.asset("./s3_content")],
            retain_on_delete=False)

        isolated_subnets = ec2.SubnetConfiguration(
            name="ISOLATED",
            subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
            cidr_mask=24)

        # create VPC
        vpc = ec2.Vpc(self,
                      'AWS-Cookbook-VPC',
                      cidr='10.10.0.0/23',
                      subnet_configuration=[isolated_subnets])

        vpc.add_interface_endpoint(
            'VPCSecretsManagerInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService(
                'secretsmanager'
            ),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
        )

        vpc.add_gateway_endpoint(
            's3GateWayEndPoint',
            service=ec2.GatewayVpcEndpointAwsService('s3'),
            subnets=[
                ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)
            ],
        )

        subnet_group = rds.SubnetGroup(
            self,
            'rds_subnet_group',
            description='VPC Subnet Group for RDS',
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        rds_security_group = ec2.SecurityGroup(
            self,
            'rds_security_group',
            description='Security Group for the RDS Instance',
            allow_all_outbound=True,
            vpc=vpc)

        db_name = 'AWSCookbookRecipe403'

        rds_instance = rds.DatabaseInstance(
            self,
            'DBInstance',
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_26),
            instance_type=ec2.InstanceType("m5.large"),
            vpc=vpc,
            multi_az=False,
            database_name=db_name,
            instance_identifier='awscookbook403db',
            delete_automated_backups=True,
            deletion_protection=False,
            removal_policy=RemovalPolicy.DESTROY,
            allocated_storage=8,
            subnet_group=subnet_group,
            security_groups=[rds_security_group])

        # mkdir -p lambda-layers/sqlparse/python
        # cd layers/sqlparse/python
        # pip install sqlparse --target="."
        # cd ../../../

        # create Lambda Layer
        sqlparse = aws_lambda.LayerVersion(
            self,
            "sqlparse",
            code=aws_lambda.AssetCode('lambda-layers/sqlparse'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="sqlparse",
            license=
            "https://github.com/andialbrecht/sqlparse/blob/master/LICENSE")

        pymysql = aws_lambda.LayerVersion(
            self,
            "pymysql",
            code=aws_lambda.AssetCode('lambda-layers/pymysql'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="pymysql",
            license="MIT")

        smartopen = aws_lambda.LayerVersion(
            self,
            "smartopen",
            code=aws_lambda.AssetCode('lambda-layers/smart_open'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="smartopen",
            license="MIT")

        lambda_function = aws_lambda.Function(
            self,
            'LambdaRDS',
            code=aws_lambda.AssetCode("./mysql-lambda/"),
            handler="lambda_function.lambda_handler",
            environment={
                "DB_SECRET_ARN": rds_instance.secret.secret_arn,
                "S3_BUCKET": s3_Bucket.bucket_name
            },
            layers=[sqlparse, pymysql, smartopen],
            memory_size=1024,
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            timeout=Duration.seconds(600),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        rds_instance.secret.grant_read(lambda_function)

        rds_instance.connections.allow_from(lambda_function.connections,
                                            ec2.Port.tcp(3306), "Ingress")

        s3_Bucket.grant_read(lambda_function)

        db_app_security_group = ec2.SecurityGroup(
            self,
            'db_app_security_group',
            description='Security Group for the DB App',
            allow_all_outbound=True,
            vpc=vpc)

        db_app_lambda_function = aws_lambda.Function(
            self,
            'LambdaApp',
            code=aws_lambda.AssetCode("./db-app-lambda/"),
            handler="lambda_function.lambda_handler",
            environment={
                "DB_HOST": rds_instance.db_instance_endpoint_address,
            },
            layers=[pymysql],
            memory_size=1024,
            security_groups=[db_app_security_group],
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            timeout=Duration.seconds(600),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        create_params = {
            "FunctionName": lambda_function.function_arn,
        }

        on_create = custom_resources.AwsSdkCall(
            action='invoke',
            service='Lambda',
            parameters=create_params,
            physical_resource_id=custom_resources.PhysicalResourceId.of(
                'LambdaRDS'))

        policy_statement = iam.PolicyStatement(
            actions=["lambda:InvokeFunction"],
            effect=iam.Effect.ALLOW,
            resources=[lambda_function.function_arn],
        )

        policy = custom_resources.AwsCustomResourcePolicy.from_statements(
            statements=[policy_statement])

        custom_resources.AwsCustomResource(
            self,
            'CustomResource',
            policy=policy,
            on_create=on_create,
            log_retention=logs.RetentionDays.TWO_WEEKS)

        # outputs

        CfnOutput(self, 'VpcId', value=vpc.vpc_id)

        CfnOutput(self,
                  'PyMysqlLambdaLayerArn',
                  value=pymysql.layer_version_arn)

        CfnOutput(self,
                  'RdsDatabaseId',
                  value=rds_instance.instance_identifier)

        CfnOutput(self,
                  'RdsSecurityGroup',
                  value=rds_security_group.security_group_id)

        CfnOutput(self, 'DbName', value=db_name)

        CfnOutput(self,
                  'RdsSecretArn',
                  value=rds_instance.secret.secret_full_arn)

        CfnOutput(self,
                  'RdsEndpoint',
                  value=rds_instance.db_instance_endpoint_address)

        CfnOutput(self,
                  'RdsPort',
                  value=rds_instance.db_instance_endpoint_port)

        isolated_subnets = vpc.select_subnets(
            subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)

        CfnOutput(self,
                  'IsolatedSubnets',
                  value=', '.join(map(str, isolated_subnets.subnet_ids)))

        CfnOutput(self,
                  'DbAppFunctionRoleName',
                  value=db_app_lambda_function.role.role_name)

        CfnOutput(self,
                  'DbAppFunctionArn',
                  value=db_app_lambda_function.function_arn)

        CfnOutput(self,
                  'DbAppFunctionName',
                  value=db_app_lambda_function.function_name)

        CfnOutput(self, 'BucketName', value=s3_Bucket.bucket_name)
        CfnOutput(self,
                  'DbAppFunctionSgId',
                  value=db_app_security_group.security_group_id)
Beispiel #27
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # -----------------------------------------------------------------------------------------------------------
        # The Simple Webservice Logic - This is what we will be monitoring
        #
        # API GW HTTP API, Lambda Fn and DynamoDB
        # https://github.com/cdk-patterns/serverless/tree/master/the-simple-webservice
        # -----------------------------------------------------------------------------------------------------------

        # DynamoDB Table
        table = dynamo_db.Table(
            self,
            "Hits",
            partition_key=dynamo_db.Attribute(
                name="path", type=dynamo_db.AttributeType.STRING),
            billing_mode=dynamo_db.BillingMode.PAY_PER_REQUEST)

        # defines an AWS  Lambda resource
        dynamo_lambda = _lambda.Function(
            self,
            "DynamoLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,  # execution environment
            handler="lambda.handler",  # file is "lambda", function is "handler"
            code=_lambda.Code.from_asset(
                "lambda"),  # Code loaded from the lambda dir
            environment={'HITS_TABLE_NAME': table.table_name})

        # grant the lambda role read/write permissions to our table'
        table.grant_read_write_data(dynamo_lambda)

        # defines an API Gateway Http API resource backed by our "dynamoLambda" function.
        api = api_gw.HttpApi(self,
                             'HttpAPI',
                             default_integration=api_gw.LambdaProxyIntegration(
                                 handler=dynamo_lambda))

        core.CfnOutput(self, 'HTTP API Url', value=api.url)

        # -----------------------------------------------------------------------------------------------------------
        # Monitoring Logic Starts Here
        #
        # This is everything we need to understand the state of our system:
        # - custom metrics
        # - cloudwatch alarms
        # - custom cloudwatch dashboard
        # -----------------------------------------------------------------------------------------------------------

        # SNS Topic so we can hook things into our alerts e.g. email
        error_topic = sns.Topic(self, 'theBigFanTopic')

        ###
        # Custom Metrics
        ###

        api_gw_4xx_error_percentage = cloud_watch.MathExpression(
            expression="m1/m2*100",
            label="% API Gateway 4xx Errors",
            using_metrics={
                "m1":
                self.metric_for_api_gw(api.http_api_id, '4XXError',
                                       '4XX Errors', 'sum'),
                "m2":
                self.metric_for_api_gw(api.http_api_id, 'Count', '# Requests',
                                       'sum'),
            },
            period=core.Duration.minutes(5))

        # Gather the % of lambda invocations that error in past 5 mins
        lambda_error_perc = cloud_watch.MathExpression(
            expression="e / i * 100",
            label="% of invocations that errored, last 5 mins",
            using_metrics={
                "i":
                dynamo_lambda.metric(metric_name="Invocations",
                                     statistic="sum"),
                "e":
                dynamo_lambda.metric(metric_name="Errors", statistic="sum"),
            },
            period=core.Duration.minutes(5))

        # note: throttled requests are not counted in total num of invocations
        lambda_throttled_perc = cloud_watch.MathExpression(
            expression="t / (i + t) * 100",
            label="% of throttled requests, last 30 mins",
            using_metrics={
                "i":
                dynamo_lambda.metric(metric_name="Invocations",
                                     statistic="sum"),
                "t":
                dynamo_lambda.metric(metric_name="Throttles", statistic="sum"),
            },
            period=core.Duration.minutes(5))

        # I think usererrors are at an account level rather than a table level so merging
        # these two metrics until I can get a definitive answer. I think usererrors
        # will always show as 0 when scoped to a table so this is still effectively
        # a system errors count
        dynamo_db_total_errors = cloud_watch.MathExpression(
            expression="m1 + m2",
            label="DynamoDB Errors",
            using_metrics={
                "m1": table.metric_user_errors(),
                "m2": table.metric_system_errors(),
            },
            period=core.Duration.minutes(5))

        # Rather than have 2 alerts, let's create one aggregate metric
        dynamo_db_throttles = cloud_watch.MathExpression(
            expression="m1 + m2",
            label="DynamoDB Throttles",
            using_metrics={
                "m1":
                table.metric(metric_name="ReadThrottleEvents",
                             statistic="sum"),
                "m2":
                table.metric(metric_name="WriteThrottleEvents",
                             statistic="sum"),
            },
            period=core.Duration.minutes(5))
        ###
        # Alarms
        ###

        # Api Gateway

        # 4xx are user errors so a large volume indicates a problem
        cloud_watch.Alarm(self,
                          id="API Gateway 4XX Errors > 1%",
                          metric=api_gw_4xx_error_percentage,
                          threshold=1,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # 5xx are internal server errors so we want 0 of these
        cloud_watch.Alarm(self,
                          id="API Gateway 5XX Errors > 0",
                          metric=self.metric_for_api_gw(api_id=api.http_api_id,
                                                        metric_name="5XXError",
                                                        label="5XX Errors",
                                                        stat="p99"),
                          threshold=0,
                          period=core.Duration.minutes(5),
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        cloud_watch.Alarm(self,
                          id="API p99 latency alarm >= 1s",
                          metric=self.metric_for_api_gw(api_id=api.http_api_id,
                                                        metric_name="Latency",
                                                        label="API GW Latency",
                                                        stat="p99"),
                          threshold=1000,
                          period=core.Duration.minutes(5),
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # Lambda

        # 2% of Dynamo Lambda invocations erroring
        cloud_watch.Alarm(self,
                          id="Dynamo Lambda 2% Error",
                          metric=lambda_error_perc,
                          threshold=2,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # 1% of Lambda invocations taking longer than 1 second
        cloud_watch.Alarm(self,
                          id="Dynamo Lambda p99 Long Duration (>1s)",
                          metric=dynamo_lambda.metric_duration(),
                          period=core.Duration.minutes(5),
                          threshold=1000,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          statistic="p99",
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # 2% of our lambda invocations are throttled
        cloud_watch.Alarm(self,
                          id="Dynamo Lambda 2% Throttled",
                          metric=lambda_throttled_perc,
                          threshold=2,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # DynamoDB

        # DynamoDB Interactions are throttled - indicated poorly provisioned
        cloud_watch.Alarm(self,
                          id="DynamoDB Table Reads/Writes Throttled",
                          metric=dynamo_db_throttles,
                          threshold=1,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        # There should be 0 DynamoDB errors
        cloud_watch.Alarm(self,
                          id="DynamoDB Errors > 0",
                          metric=dynamo_db_total_errors,
                          threshold=0,
                          evaluation_periods=6,
                          datapoints_to_alarm=1,
                          treat_missing_data=cloud_watch.TreatMissingData.NOT_BREACHING) \
            .add_alarm_action(actions.SnsAction(error_topic))

        dashboard = cloud_watch.Dashboard(self, id="CloudWatchDashBoard")
        dashboard.add_widgets(
            cloud_watch.GraphWidget(title="Requests",
                                    width=8,
                                    left=[
                                        self.metric_for_api_gw(
                                            api_id=api.http_api_id,
                                            metric_name="Count",
                                            label="# Requests",
                                            stat="sum")
                                    ]),
            cloud_watch.GraphWidget(
                title="API GW Latency",
                width=8,
                stacked=True,
                left=[
                    self.metric_for_api_gw(api_id=api.http_api_id,
                                           metric_name="Latency",
                                           label="API Latency p50",
                                           stat="p50"),
                    self.metric_for_api_gw(api_id=api.http_api_id,
                                           metric_name="Latency",
                                           label="API Latency p90",
                                           stat="p90"),
                    self.metric_for_api_gw(api_id=api.http_api_id,
                                           metric_name="Latency",
                                           label="API Latency p99",
                                           stat="p99")
                ]),
            cloud_watch.GraphWidget(
                title="API GW Errors",
                width=8,
                stacked=True,
                left=[
                    self.metric_for_api_gw(api_id=api.http_api_id,
                                           metric_name="4XXError",
                                           label="4XX Errors",
                                           stat="sum"),
                    self.metric_for_api_gw(api_id=api.http_api_id,
                                           metric_name="5XXError",
                                           label="5XX Errors",
                                           stat="sum")
                ]),
            cloud_watch.GraphWidget(title="Dynamo Lambda Error %",
                                    width=8,
                                    left=[lambda_error_perc]),
            cloud_watch.GraphWidget(
                title="Dynamo Lambda Duration",
                width=8,
                stacked=True,
                left=[
                    dynamo_lambda.metric_duration(statistic="p50"),
                    dynamo_lambda.metric_duration(statistic="p90"),
                    dynamo_lambda.metric_duration(statistic="p99")
                ]),
            cloud_watch.GraphWidget(title="Dynamo Lambda Throttle %",
                                    width=8,
                                    left=[lambda_throttled_perc]),
            cloud_watch.GraphWidget(
                title="DynamoDB Latency",
                width=8,
                stacked=True,
                left=[
                    table.metric_successful_request_latency(
                        dimensions={
                            "TableName": table.table_name,
                            "Operation": "GetItem"
                        }),
                    table.metric_successful_request_latency(
                        dimensions={
                            "TableName": table.table_name,
                            "Operation": "UpdateItem"
                        }),
                    table.metric_successful_request_latency(
                        dimensions={
                            "TableName": table.table_name,
                            "Operation": "PutItem"
                        }),
                    table.metric_successful_request_latency(
                        dimensions={
                            "TableName": table.table_name,
                            "Operation": "DeleteItem"
                        }),
                    table.metric_successful_request_latency(
                        dimensions={
                            "TableName": table.table_name,
                            "Operation": "Query"
                        }),
                ]),
            cloud_watch.GraphWidget(
                title="DynamoDB Consumed Read/Write Units",
                width=8,
                stacked=False,
                left=[
                    table.metric(metric_name="ConsumedReadCapacityUnits"),
                    table.metric(metric_name="ConsumedWriteCapacityUnits")
                ]),
            cloud_watch.GraphWidget(
                title="DynamoDB Throttles",
                width=8,
                stacked=True,
                left=[
                    table.metric(metric_name="ReadThrottleEvents",
                                 statistic="sum"),
                    table.metric(metric_name="WriteThrottleEvents",
                                 statistic="sum")
                ]),
        )
Beispiel #28
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create repo for Amplify static site
        amplify_repo = codecommit.Repository(
            self,
            'amplify-wild-rydes-repo',
            repository_name='amplify-wild-rydes',
            description='Repo for the Wild Rydes static site for Amplify')

        # Create repo for holding the code for this project
        app_repo = codecommit.Repository(
            self,
            'app-serverless-workshop-repo',
            repository_name='app-wild-rydes-serverless-workshop',
            description=
            'Repo for project from webapp.serverlessworkshops.io/staticwebhosting/overview/'
        )

        # IAM Role & Policy for Amplify
        amplify_role = iam.Role(
            self,
            'amplify-wild-rydes-role',
            role_name='amplify-wild-rydes-role',
            assumed_by=iam.ServicePrincipal('amplify.amazonaws.com'))

        # Amplify
        amplify_static_site = amplify.App(
            self,
            'amplify-wild-rydes-site',
            source_code_provider=amplify.CodeCommitSourceCodeProvider(
                repository=amplify_repo),
            description='Wild Rydes Amplify Static Site',
            role=amplify_role,
            app_name='wild-rydes-site')

        master = amplify_static_site.add_branch("master")

        # Policy is fairly open
        # Ran into issues when I deployed the cognito user pools through the amplify cli
        # It creates a new CloudFormation stack and deploys several resources
        amplify_policy = iam.Policy(
            self,
            'amplify-wild-rydes-policy',
            roles=[amplify_role],
            statements=[
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    actions=['codecommit:GitPull'],
                                    resources=[amplify_repo.repository_arn]),
                iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                    actions=[
                                        'amplify:GetApp',
                                        'amplify:CreateBackendEnvironment',
                                        'cloudformation:*', 'cognito:*',
                                        'lambda:*', 's3:*', 'iam:*'
                                    ],
                                    resources=['*'])
            ])

        # DynamoDB
        # removal_policy=core.RemovalPolicy.DESTROY is to ensure it is deleted since this is only a lab
        # table_name is required to be Rides, its configured in the nodejs code that the lambda function runs
        rides_table = ddb.Table(self,
                                'Table',
                                table_name='Rides',
                                partition_key=ddb.Attribute(
                                    name='RideId',
                                    type=ddb.AttributeType.STRING),
                                removal_policy=core.RemovalPolicy.DESTROY)

        # Lambda Functions
        request_unicorn_role = iam.Role(
            self,
            'RequestUnicornRole',
            role_name='wild-rydes-lambda-role',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole')
            ])

        # Grant write access to the lambda role
        rides_table.grant_write_data(request_unicorn_role)

        request_unicorn = _lambda.Function(
            self,
            'request-unicorn',
            handler='requestUnicorn.handler',
            runtime=_lambda.Runtime.NODEJS_12_X,
            code=_lambda.AssetCode('request_unicorn'),
            role=request_unicorn_role,
            function_name='request-unicorn-wild-rydes')

        # Rest API
        ride_api_gw = apigw.RestApi(
            self,
            'wild-rydes-apigw',
            rest_api_name='WildRydes',
            endpoint_types=[apigw.EndpointType.REGIONAL])

        # APIGW Lambda Integration
        # proxy enabled for the workshop
        ride_api_gw_lambda_integration = apigw.LambdaIntegration(
            request_unicorn,
            proxy=True,
            integration_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin':
                    "'*'",
                }
            }])

        post_ride_resource = ride_api_gw.root.add_resource('ride')
        post_ride_resource_method = post_ride_resource.add_method(
            'POST',
            ride_api_gw_lambda_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        # This needs to be created after the Amplify site unless you create the cognito userpool in the cdk
        # I went through the Amplify CLI to create the backend
        ride_api_gw_authorizer = apigw.CfnAuthorizer(
            self,
            'wild-rydes-apigw-authorizer',
            rest_api_id=ride_api_gw.rest_api_id,
            name='wild-rydes-apigw-authorizer',
            type='COGNITO_USER_POOLS',
            identity_source='method.request.header.name.Authorization',
            identity_validation_expression="Bearer (.*)",
            provider_arns=[
                'arn:aws:cognito-idp:us-east-1:<ACCOUNT_ID>:userpool/<USER_POOL_ID>'
            ])

        # https://github.com/aws/aws-cdk/issues/5618
        post_ride_resource_fix = post_ride_resource_method.node.find_child(
            'Resource')
        post_ride_resource_fix.add_property_override('AuthorizationType',
                                                     'COGNITO_USER_POOLS')
        post_ride_resource_fix.add_property_override(
            'AuthorizerId', {"Ref": ride_api_gw_authorizer.logical_id})

        # Enable CORS for the workshop
        post_ride_resource.add_method(
            'OPTIONS',
            apigw.MockIntegration(integration_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Headers':
                    "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token'",
                    'method.response.header.Access-Control-Allow-Origin':
                    "'*'",
                    'method.response.header.Access-Control-Allow-Methods':
                    "'POST,OPTIONS'"
                }
            }],
                                  passthrough_behavior=apigw.
                                  PassthroughBehavior.WHEN_NO_MATCH,
                                  request_templates={
                                      "application/json":
                                      "{\"statusCode\":200}"
                                  }),
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Headers':
                    True,
                    'method.response.header.Access-Control-Allow-Methods':
                    True,
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        # Outputs
        amplify_repo_url = core.CfnOutput(
            self,
            'amplify-repo-url',
            value=amplify_repo.repository_clone_url_http)

        app_repo_url = core.CfnOutput(self,
                                      'app-repo-url',
                                      value=app_repo.repository_clone_url_http)

        amplify_default_domain = core.CfnOutput(
            self,
            'amplify-default-domain',
            value=amplify_static_site.default_domain)

        request_unicorn_apigw = core.CfnOutput(self,
                                               'request-unicorn-apigw',
                                               value=request_unicorn_apigw.url)
Beispiel #29
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        invoker_topic = sns.Topic(self, "experiment-invoker")

        result_table = dynamodb.Table(
            self,
            id="result-table",
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            partition_key=dynamodb.Attribute(
                name="PK", type=dynamodb.AttributeType.STRING),
            sort_key=dynamodb.Attribute(name="SK",
                                        type=dynamodb.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        lambda_code_asset = _lambda.Code.from_asset(
            path=os.path.join(os.path.dirname(__file__), "..", "src"))

        # Now we build as many lambdas as we need.
        current_mem_size = LAMBDA_MEMORY_MIN_SIZE_IN_MB

        while current_mem_size <= LAMBDA_MEMORY_MAX_SIZE_IN_MB:

            # Build the function to test the client call
            client_function = _lambda.Function(
                self,
                id=f"measurement-client-{current_mem_size}-mb",
                code=lambda_code_asset,
                environment={
                    "TEST_METHOD": "client",
                    "MEMORY_SIZE": str(current_mem_size),
                    "TABLE_NAME": result_table.table_name,
                },
                handler="lambda_handler.client_handler",
                runtime=_lambda.Runtime.PYTHON_3_8,
                memory_size=current_mem_size)

            client_function.add_event_source(
                lambda_event_sources.SnsEventSource(invoker_topic))

            result_table.grant_read_write_data(client_function)

            # Allow for self-mutating function
            client_function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "lambda:getFunctionConfiguration",
                        "lambda:updateFunctionConfiguration",
                    ],
                    # CFN screams at me with circular dependencies if I use the ref here.
                    resources=["*"]))

            # Build the function to test the resource call
            resource_function = _lambda.Function(
                self,
                id=f"measurement-resource-{current_mem_size}-mb",
                code=lambda_code_asset,
                environment={
                    "TEST_METHOD": "resource",
                    "MEMORY_SIZE": str(current_mem_size),
                    "TABLE_NAME": result_table.table_name
                },
                handler="lambda_handler.resource_handler",
                runtime=_lambda.Runtime.PYTHON_3_8,
                memory_size=current_mem_size)

            resource_function.add_event_source(
                lambda_event_sources.SnsEventSource(invoker_topic))

            result_table.grant_read_write_data(resource_function)

            # Allow for self-mutating function
            resource_function.add_to_role_policy(
                iam.PolicyStatement(
                    actions=[
                        "lambda:getFunctionConfiguration",
                        "lambda:updateFunctionConfiguration",
                    ],
                    # CFN screams at me with circular dependencies if I use the ref here.
                    resources=["*"]))

            current_mem_size += LAMBDA_MEMORY_INCREMENTS_IN_MB

        # The function to gather and aggregate the measurements
        result_aggregator = _lambda.Function(
            self,
            id="result-aggregator",
            code=lambda_code_asset,
            environment={
                "TABLE_NAME": result_table.table_name,
            },
            handler="lambda_handler.result_aggregator",
            runtime=_lambda.Runtime.PYTHON_3_8,
            memory_size=1024)

        result_table.grant_read_write_data(result_aggregator)

        # The function to gather and aggregate the measurements
        invoker = _lambda.Function(self,
                                   id="experiment-invoker-function",
                                   code=lambda_code_asset,
                                   environment={
                                       "INVOKER_TOPIC_ARN":
                                       invoker_topic.topic_arn,
                                   },
                                   handler="lambda_handler.invoke_handler",
                                   runtime=_lambda.Runtime.PYTHON_3_8,
                                   memory_size=1024)

        invoker_topic.grant_publish(invoker)

        core.CfnOutput(self,
                       "invokerFn",
                       value=invoker.function_name,
                       description="Name of the invoker function")

        core.CfnOutput(self,
                       "resultAggregatorFn",
                       value=result_aggregator.function_name,
                       description="Name of the result aggregator function")
Beispiel #30
0
    def __init__(self, scope: core.Construct, id: str,
                 artifact_bucket: s3.Bucket, static_website_bucket: s3.Bucket,
                 backend_fn: _lambda.Function, api: apigateway.LambdaRestApi,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        fn = _lambda.Function(
            scope=self,
            id="source-update-function",
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="index.handler",
            # memory_size=500,
            timeout=core.Duration.seconds(10),
            code=_lambda.Code.from_asset(
                os.path.join("lambdas", "updateSource")))
        fn.add_to_role_policy(
            statement=iam.PolicyStatement(actions=[
                "lambda:UpdateFunctionCode",
            ],
                                          resources=[
                                              backend_fn.function_arn,
                                          ]))
        fn.add_to_role_policy(
            statement=iam.PolicyStatement(actions=[
                "s3:GetObject",
            ],
                                          resources=[
                                              artifact_bucket.bucket_arn +
                                              "/Server/main.zip",
                                          ]))

        # Codepipeline
        deploy_pipeline = codepipeline.Pipeline(
            scope=self,
            id="deploy-pipeline",
            restart_execution_on_update=True,
        )

        lambda_source_output = codepipeline.Artifact()
        client_source_output = codepipeline.Artifact()
        deploy_pipeline.add_stage(stage_name="Source",
                                  actions=[
                                      codepipeline_actions.S3SourceAction(
                                          action_name="LambdaSource",
                                          bucket=artifact_bucket,
                                          bucket_key="Server/main.zip",
                                          output=lambda_source_output,
                                      ),
                                      codepipeline_actions.S3SourceAction(
                                          action_name="ClientSource",
                                          bucket=artifact_bucket,
                                          bucket_key="Client/src.zip",
                                          output=client_source_output,
                                      )
                                  ])

        build_specs = {
            "version": "0.2",
            "env": {
                "variables": {
                    "REACT_APP_AUTH_URL": api.url,
                }
            },
            "phases": {
                "install": {
                    "commands": [
                        "npm install -g yarn",
                    ]
                },
                "build": {
                    "commands": [
                        "npm install",
                        "yarn test",
                        "yarn build",
                    ]
                }
            },
            "artifacts": {
                "base-directory": "build",
                "files": [
                    "**/*",
                ],
            }
        }
        client_build_output = codepipeline.Artifact()
        deploy_pipeline.add_stage(
            stage_name="Build",
            actions=[
                codepipeline_actions.CodeBuildAction(
                    action_name="ClientBuild",
                    project=codebuild.Project(
                        scope=self,
                        id="codebuild-client",
                        build_spec=codebuild.BuildSpec.from_object(
                            build_specs),
                    ),
                    input=client_source_output,
                    outputs=[client_build_output])
            ])

        deploy_pipeline.add_stage(stage_name="Deploy",
                                  actions=[
                                      codepipeline_actions.LambdaInvokeAction(
                                          lambda_=fn,
                                          inputs=[lambda_source_output],
                                          action_name="UpdateSource",
                                          user_parameters={
                                              "functionName":
                                              backend_fn.function_name,
                                              "sourceBucket":
                                              artifact_bucket.bucket_name,
                                              "sourceKey": "Server/main.zip",
                                          }),
                                      codepipeline_actions.S3DeployAction(
                                          bucket=static_website_bucket,
                                          input=client_build_output,
                                          action_name="DeployClient",
                                          extract=True,
                                      ),
                                  ])