コード例 #1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ### VPC

        # create a new VPC with 2 AZ's and two NAT gateways
        # TODO - include VPC SQS endpoint so the NAT gateway isn't needed anymore
        vpc = aws_ec2.Vpc(self,
                          "Vpc",
                          max_azs=2,
                          nat_gateways=2,
                          subnet_configuration=[
                              aws_ec2.SubnetConfiguration(
                                  name="private",
                                  cidr_mask=24,
                                  subnet_type=aws_ec2.SubnetType.PRIVATE),
                              aws_ec2.SubnetConfiguration(
                                  name="public",
                                  cidr_mask=28,
                                  subnet_type=aws_ec2.SubnetType.PUBLIC)
                          ])

        # create a new ECS cluster
        cluster = aws_ecs.Cluster(self, "FargateSQS", vpc=vpc)

        ### SQS

        # create a new SQS queue
        msg_queue = aws_sqs.Queue(self,
                                  "SQSQueue",
                                  visibility_timeout=core.Duration.seconds(0),
                                  retention_period=core.Duration.minutes(30))

        ### FARGATE

        # build the docker image from local "./docker" directory
        sqscontainer = aws_ecs.ContainerImage.from_asset(directory="docker")

        # add the aws-xray-daemon as a sidecar running on UDP/2000
        xraycontainer = aws_ecs.ContainerImage.from_registry(
            "amazon/aws-xray-daemon")

        # create the queue processing service on fargate with a locally built container
        # the pattern automatically adds an environment variable with the queue name for the container to read
        fargate_service = aws_ecs_patterns.QueueProcessingFargateService(
            self,
            "Service",
            cluster=cluster,
            memory_limit_mib=512,
            cpu=256,
            image=sqscontainer,
            enable_logging=True,
            desired_task_count=0,
            max_scaling_capacity=5,
            scaling_steps=
            [{
                "upper": 0,
                "change": -5
            }, {
                "lower": 1,
                "change": +1
            }
             # disabled metric based scaling to test scaling on cpu usage only
             # this may potentially lower cost as fargate will scale in smaller steps
             #{"lower": 50000, "change": +2},
             #{"lower": 250000, "change": +4}
             ],
            queue=msg_queue,
            environment={"sqs_queue_url": msg_queue.queue_url})

        # add the standard aws xray sidecar to the container task
        xray_sidecar = fargate_service.task_definition.add_container(
            "xraycontainer",
            image=xraycontainer,
            logging=fargate_service.log_driver)

        # expose the sidecar on port UDP/2000
        xray_sidecar.add_port_mappings(
            aws_ecs.PortMapping(container_port=2000,
                                protocol=aws_ecs.Protocol.UDP))

        ### LAMBDA

        # build the go binary for the lambda SQS generator and retrieve the unix timestamp of when the file was modified
        # since CDK cannot natively build Go binaries yet, we need to do this manually through build_lambda_zip.py
        os.system("python loadgen/build_lambda_zip.py")
        filets = str(int(os.path.getctime("./loadgen/lambda.zip")))

        # create a lambda function to generate load, using the filets value as a source hash for the zip
        sqs_lambda = aws_lambda.Function(
            self,
            "GenerateLoadSQS",
            runtime=aws_lambda.Runtime.GO_1_X,
            code=aws_lambda.Code.from_asset("./loadgen/lambda.zip",
                                            source_hash=filets),
            handler="loadgensqs",
            timeout=core.Duration.seconds(20),
            memory_size=128,
            retry_attempts=0,
            tracing=aws_lambda.Tracing.ACTIVE,
            environment={
                "sqs_queue_url": msg_queue.queue_url,
                "total_message_count": "100"
            })

        ### CLOUDWATCH RULE

        # create a new cloudwatch rule running every minute to trigger the lambda function
        eventRuleMinu = aws_events.Rule(
            self,
            "lambda-generator-minute-rule",
            enabled=True,
            schedule=aws_events.Schedule.cron(minute="*"))

        eventRuleMinu.add_target(aws_events_targets.LambdaFunction(sqs_lambda))

        ### IAM

        # add the Lambda IAM permission to send SQS messages
        msg_queue.grant_send_messages(sqs_lambda)

        # add XRay permissions to Fargate task and Lambda
        xray_policy = PolicyStatement(resources=["*"],
                                      actions=[
                                          "xray:GetGroup", "xray:GetGroups",
                                          "xray:GetSampling*", "xray:GetTime*",
                                          "xray:GetService*",
                                          "xray:PutTelemetryRecords",
                                          "xray:PutTraceSegments"
                                      ])

        fargate_service.task_definition.add_to_task_role_policy(xray_policy)
        sqs_lambda.add_to_role_policy(xray_policy)
コード例 #2
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open("index.py", encoding="utf8") as fp:
            handler_code = fp.read()

        role = iam.Role(
            self,
            'myappRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'))

        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=['events:*']))

        role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=["arn:aws:iam::*:role/AWS_Events_Invoke_Targets"],
                actions=['iam:PassRole']))

        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=[
                                    "logs:CreateLogGroup",
                                    "logs:CreateLogStream", "logs:PutLogEvents"
                                ]))

        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=["s3:*"]))

        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=["lambda:*"]))

        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=["sns:*"]))

        lambdaFn = lambdas.Function(self,
                                    "Singleton",
                                    code=lambdas.InlineCode(handler_code),
                                    handler="index.lambda_handler",
                                    timeout=core.Duration.seconds(600),
                                    runtime=lambdas.Runtime.PYTHON_3_6,
                                    memory_size=512,
                                    environment=dict(PATH="/opt"),
                                    role=role)

        rule = events.Rule(
            self,
            "Rule",
            schedule=events.Schedule.cron(minute='59',
                                          hour='6-20/4',
                                          month='*',
                                          week_day='*',
                                          year='*'),
        )
        rule.add_target(targets.LambdaFunction(lambdaFn))

        ac = AssetCode("./python")

        layer = LayerVersion(self,
                             "myapp1",
                             code=ac,
                             description="myapp1 layer",
                             compatible_runtimes=[lambdas.Runtime.PYTHON_3_6],
                             layer_version_name='myapp-layer')
        lambdaFn.add_layers(layer)
コード例 #3
0
ファイル: slack_lambda.py プロジェクト: umccr/infrastructure
    def __init__(self, scope: core.Construct, id: str, slack_channel: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        lambda_role = _iam.Role(
            self,
            'SlackLambdaRole',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonSSMReadOnlyAccess'),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole')
            ])

        function = _lambda.Function(self,
                                    'BatchSlackLambda',
                                    handler='notify_slack.lambda_handler',
                                    runtime=_lambda.Runtime.PYTHON_3_7,
                                    code=_lambda.Code.asset('lambdas/batch'),
                                    environment={
                                        "SLACK_HOST": "hooks.slack.com",
                                        "SLACK_CHANNEL": slack_channel
                                    },
                                    role=lambda_role)

        batch_job_queues = {
            'AGHA': {
                'name': 'agha-job-queue',
                'enabled': True
            },
            'Umccrise': {
                'name': 'cdk-umccrise_job_queue',
                'enabled': True
            },
            'UmccriseDragen': {
                'name': 'cdk-umccrise_job_queue-dragen-testing',
                'enabled': False
            },
            'Nextflow': {
                'name': 'nextflow-job-queue',
                'enabled': False
            },
            'WtsReport': {
                'name': 'wts_report_batch_queue_dev',
                'enabled': True
            },
        }
        for job_queue_id, job_queue_config in batch_job_queues.items():
            job_queue_name = job_queue_config['name']
            job_queue_arn = f'arn:aws:batch:{self.region}:{self.account}:job-queue/{job_queue_name}'
            _events.Rule(
                self,
                f'BatchEventToSlackLambda{job_queue_id}',
                enabled=job_queue_config['enabled'],
                event_pattern=_events.EventPattern(
                    detail={
                        'status': [
                            'FAILED',
                            'SUCCEEDED',
                            'RUNNABLE',
                        ],
                        'jobQueue': [job_queue_arn],
                    },
                    detail_type=['Batch Job State Change'],
                    source=['aws.batch'],
                ),
                rule_name=f'batch-slack-notifications-{job_queue_id.lower()}',
                targets=[_events_targets.LambdaFunction(handler=function)])
コード例 #4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        """Invoke the base class constructor via super with the received scope, id, and props

        Args:
            scope: Defines scope in which this custom construct stack is created.
            id (str): Defines local identity of the construct. Must be unique amongst constructs
                within the same scope, as it's used to formulate the cF logical id for each resource
                defined in this scope.
            kwargs: Lots of possibilities
        """

        # example of passing app.py level params to stack class
        self.stage=kwargs['stage']
        kwargs={}

        super().__init__(scope, id, **kwargs)

        # Resources to create
        s3_bucket = s3.Bucket(
            self, "Bucket",
            bucket_name=f"asteroids-{self.stage}",
            versioned=False,
            removal_policy=core.RemovalPolicy.DESTROY # NOT recommended for production code
        )

        ddb_asteroids_table = ddb.Table(
            self, "Table",
            table_name="asteroids_table",
            partition_key={
                "name": "id",
                "type": ddb.AttributeType.STRING
            },
            removal_policy=core.RemovalPolicy.DESTROY # NOT recommended for production code
        )

        # Lambdas and layers
        requests_layer = _lambda.LayerVersion(
            self, "requests",
            code=_lambda.AssetCode('layers/requests.zip')
        )

        process_asteroid_data = _lambda.Function(
            self, "ProcessAsteroidsLambda",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.AssetCode("lambda"),
            handler="asteroids.handler",
            layers=[requests_layer],
            environment={
                "S3_BUCKET": s3_bucket.bucket_name,
                "NASA_KEY": "<nasa_key_here>",
            }
        )

        db_write = _lambda.Function(
            self, "DbWriteLambda",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="dbwrite.handler",
            code=_lambda.Code.asset('lambda'),
            environment={
                "ASTEROIDS_TABLE": ddb_asteroids_table.table_name,
                "S3_BUCKET": s3_bucket.bucket_name
            }
        )

        # Rules and Events
        json_rule = events.Rule(
            self, "JSONRule",
            schedule=events.Schedule.cron(
                minute="15",
                hour="*",
                month="*",
                week_day="*",
                year="*"
                )
        )

        csv_rule = events.Rule(
            self, "CSVRule",
            schedule=events.Schedule.cron(
                minute="30",
                hour="*",
                month="*",
                week_day="*",
                year="*"
                )
        )

        # add lambda function target as well as custom trigger input to rules
        json_rule.add_target(
            targets.LambdaFunction(
                process_asteroid_data,
                event=events.RuleTargetInput.from_text("json")
                )
            )
        csv_rule.add_target(
            targets.LambdaFunction(
                process_asteroid_data,
                event=events.RuleTargetInput.from_text("csv")
                )
            )
        # create s3 notification for the db_write function
        notify_lambda = s3n.LambdaDestination(db_write)
        # assign 'notify_lambda' notification for 'OBJECT_CREATED' event type
        s3_bucket.add_event_notification(s3.EventType.OBJECT_CREATED, notify_lambda)

        # Permissions
        s3_bucket.grant_read_write(process_asteroid_data)
        s3_bucket.grant_read_write(db_write)
        ddb_asteroids_table.grant_read_write_data(db_write)
コード例 #5
0
    def __init__(self, scope: core.Construct, _id: str, **kwargs) -> None:
        super().__init__(scope, _id, **kwargs)

        # Setup SSM parameter of credentials, bucket_para, ignore_list
        ssm_credential_para = ssm.StringParameter.from_secure_string_parameter_attributes(
            self,
            "ssm_parameter_credentials",
            parameter_name=ssm_parameter_credentials,
            version=1)

        ssm_bucket_para = ssm.StringParameter(self,
                                              "s3bucket_serverless",
                                              string_value=json.dumps(
                                                  bucket_para, indent=4))

        ssm_parameter_ignore_list = ssm.StringParameter(
            self, "s3_migrate_ignore_list", string_value=ignore_list)

        # Setup DynamoDB
        ddb_file_list = ddb.Table(self,
                                  "s3migrate_serverless",
                                  partition_key=ddb.Attribute(
                                      name="Key",
                                      type=ddb.AttributeType.STRING),
                                  billing_mode=ddb.BillingMode.PAY_PER_REQUEST)

        # Setup SQS
        sqs_queue_DLQ = sqs.Queue(self,
                                  "s3migrate_serverless_Q_DLQ",
                                  visibility_timeout=core.Duration.minutes(15),
                                  retention_period=core.Duration.days(14))
        sqs_queue = sqs.Queue(self,
                              "s3migrate_serverless_Q",
                              visibility_timeout=core.Duration.minutes(15),
                              retention_period=core.Duration.days(14),
                              dead_letter_queue=sqs.DeadLetterQueue(
                                  max_receive_count=3, queue=sqs_queue_DLQ))

        # Setup API for Lambda to get IP address (for debug networking routing purpose)
        checkip = api.RestApi(
            self,
            "lambda-checkip-api",
            cloud_watch_role=True,
            deploy=True,
            description="For Lambda get IP address",
            default_integration=api.MockIntegration(
                integration_responses=[
                    api.IntegrationResponse(status_code="200",
                                            response_templates={
                                                "application/json":
                                                "$context.identity.sourceIp"
                                            })
                ],
                request_templates={"application/json": '{"statusCode": 200}'}),
            endpoint_types=[api.EndpointType.REGIONAL])
        checkip.root.add_method("GET",
                                method_responses=[
                                    api.MethodResponse(
                                        status_code="200",
                                        response_models={
                                            "application/json":
                                            api.Model.EMPTY_MODEL
                                        })
                                ])

        # Setup Lambda functions
        handler = lam.Function(self,
                               "s3-migrate-worker",
                               code=lam.Code.asset("./lambda"),
                               handler="lambda_function_worker.lambda_handler",
                               runtime=lam.Runtime.PYTHON_3_8,
                               memory_size=1024,
                               timeout=core.Duration.minutes(15),
                               tracing=lam.Tracing.ACTIVE,
                               environment={
                                   'table_queue_name':
                                   ddb_file_list.table_name,
                                   'Des_bucket_default':
                                   Des_bucket_default,
                                   'Des_prefix_default':
                                   Des_prefix_default,
                                   'StorageClass':
                                   StorageClass,
                                   'checkip_url':
                                   checkip.url,
                                   'ssm_parameter_credentials':
                                   ssm_parameter_credentials
                               })

        handler_jobsender = lam.Function(
            self,
            "s3-migrate-jobsender",
            code=lam.Code.asset("./lambda"),
            handler="lambda_function_jobsender.lambda_handler",
            runtime=lam.Runtime.PYTHON_3_8,
            memory_size=1024,
            timeout=core.Duration.minutes(15),
            tracing=lam.Tracing.ACTIVE,
            environment={
                'table_queue_name': ddb_file_list.table_name,
                'StorageClass': StorageClass,
                'checkip_url': checkip.url,
                'sqs_queue': sqs_queue.queue_name,
                'ssm_parameter_credentials': ssm_parameter_credentials,
                'ssm_parameter_ignore_list':
                ssm_parameter_ignore_list.parameter_name,
                'ssm_parameter_bucket': ssm_bucket_para.parameter_name
            })

        # Allow lambda read/write DDB, SQS
        ddb_file_list.grant_read_write_data(handler)
        ddb_file_list.grant_read_write_data(handler_jobsender)
        sqs_queue.grant_send_messages(handler_jobsender)
        # SQS trigger Lambda worker
        handler.add_event_source(SqsEventSource(sqs_queue, batch_size=1))

        # Option1: Create S3 Bucket, all new objects in this bucket will be transmitted by Lambda Worker
        s3bucket = s3.Bucket(self, "s3_new_migrate")
        s3bucket.grant_read(handler)
        s3bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                        s3n.SqsDestination(sqs_queue))

        # Option2: Allow Exist S3 Buckets to be read by Lambda functions.
        # Lambda Jobsender will scan and compare the these buckets and trigger Lambda Workers to transmit
        bucket_name = ''
        for b in bucket_para:
            if bucket_name != b['src_bucket']:  # 如果列了多个相同的Bucket,就跳过
                bucket_name = b['src_bucket']
                s3exist_bucket = s3.Bucket.from_bucket_name(
                    self,
                    bucket_name,  # 用这个做id
                    bucket_name=bucket_name)
                s3exist_bucket.grant_read(handler_jobsender)
                s3exist_bucket.grant_read(handler)

        # Allow Lambda read ssm parameters
        ssm_bucket_para.grant_read(handler_jobsender)
        ssm_credential_para.grant_read(handler)
        ssm_credential_para.grant_read(handler_jobsender)
        ssm_parameter_ignore_list.grant_read(handler_jobsender)

        # Schedule cron event to trigger Lambda Jobsender per hour:
        event.Rule(self,
                   'cron_trigger_jobsender',
                   schedule=event.Schedule.rate(core.Duration.hours(1)),
                   targets=[target.LambdaFunction(handler_jobsender)])

        # Create Lambda logs filter to create network traffic metric
        handler.log_group.add_metric_filter(
            "Complete-bytes",
            metric_name="Complete-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Complete", bytes, key]'))
        handler.log_group.add_metric_filter(
            "Uploading-bytes",
            metric_name="Uploading-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Uploading", bytes, key]'))
        handler.log_group.add_metric_filter(
            "Downloading-bytes",
            metric_name="Downloading-bytes",
            metric_namespace="s3_migrate",
            metric_value="$bytes",
            filter_pattern=logs.FilterPattern.literal(
                '[info, date, sn, p="--->Downloading", bytes, key]'))
        lambda_metric_Complete = cw.Metric(namespace="s3_migrate",
                                           metric_name="Complete-bytes",
                                           statistic="Sum",
                                           period=core.Duration.minutes(1))
        lambda_metric_Upload = cw.Metric(namespace="s3_migrate",
                                         metric_name="Uploading-bytes",
                                         statistic="Sum",
                                         period=core.Duration.minutes(1))
        lambda_metric_Download = cw.Metric(namespace="s3_migrate",
                                           metric_name="Downloading-bytes",
                                           statistic="Sum",
                                           period=core.Duration.minutes(1))
        handler.log_group.add_metric_filter(
            "ERROR",
            metric_name="ERROR-Logs",
            metric_namespace="s3_migrate",
            metric_value="1",
            filter_pattern=logs.FilterPattern.literal('"ERROR"'))
        handler.log_group.add_metric_filter(
            "WARNING",
            metric_name="WARNING-Logs",
            metric_namespace="s3_migrate",
            metric_value="1",
            filter_pattern=logs.FilterPattern.literal('"WARNING"'))
        log_metric_ERROR = cw.Metric(namespace="s3_migrate",
                                     metric_name="ERROR-Logs",
                                     statistic="Sum",
                                     period=core.Duration.minutes(1))
        log_metric_WARNING = cw.Metric(namespace="s3_migrate",
                                       metric_name="WARNING-Logs",
                                       statistic="Sum",
                                       period=core.Duration.minutes(1))

        # Dashboard to monitor SQS and Lambda
        board = cw.Dashboard(self, "s3_migrate_serverless")

        board.add_widgets(
            cw.GraphWidget(title="Lambda-NETWORK",
                           left=[
                               lambda_metric_Download, lambda_metric_Upload,
                               lambda_metric_Complete
                           ]),
            # TODO: here monitor all lambda concurrency not just the working one. Limitation from CDK
            # Lambda now supports monitor single lambda concurrency, will change this after CDK support
            cw.GraphWidget(title="Lambda-all-concurrent",
                           left=[
                               handler.metric_all_concurrent_executions(
                                   period=core.Duration.minutes(1))
                           ]),
            cw.GraphWidget(
                title="Lambda-invocations/errors/throttles",
                left=[
                    handler.metric_invocations(
                        period=core.Duration.minutes(1)),
                    handler.metric_errors(period=core.Duration.minutes(1)),
                    handler.metric_throttles(period=core.Duration.minutes(1))
                ]),
            cw.GraphWidget(
                title="Lambda-duration",
                left=[
                    handler.metric_duration(period=core.Duration.minutes(1))
                ]),
        )

        board.add_widgets(
            cw.GraphWidget(
                title="SQS-Jobs",
                left=[
                    sqs_queue.metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1))
                ]),
            cw.GraphWidget(
                title="SQS-DeadLetterQueue",
                left=[
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1))
                ]),
            cw.GraphWidget(title="ERROR/WARNING Logs",
                           left=[log_metric_ERROR],
                           right=[log_metric_WARNING]),
            cw.SingleValueWidget(
                title="Running/Waiting and Dead Jobs",
                metrics=[
                    sqs_queue.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue.metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_not_visible(
                        period=core.Duration.minutes(1)),
                    sqs_queue_DLQ.
                    metric_approximate_number_of_messages_visible(
                        period=core.Duration.minutes(1))
                ],
                height=6))
        # Alarm for queue - DLQ
        alarm_DLQ = cw.Alarm(
            self,
            "SQS_DLQ",
            metric=sqs_queue_DLQ.metric_approximate_number_of_messages_visible(
            ),
            threshold=0,
            comparison_operator=cw.ComparisonOperator.GREATER_THAN_THRESHOLD,
            evaluation_periods=1,
            datapoints_to_alarm=1)
        alarm_topic = sns.Topic(self, "SQS queue-DLQ has dead letter")
        alarm_topic.add_subscription(
            subscription=sub.EmailSubscription(alarm_email))
        alarm_DLQ.add_alarm_action(action.SnsAction(alarm_topic))

        core.CfnOutput(self,
                       "Dashboard",
                       value="CloudWatch Dashboard name s3_migrate_serverless")
コード例 #6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ## create lambda function from inline function
        try:
            with open("./lambda_src/custom_lambda.py", "r") as f:
                custom_lambda_code = f.read()
        except OSError as e:
            raise Exception(f"fals to open lambda function code, error {e}")

        custom_lambda_fn = _lambda.Function(
            self,
            "customLambda",
            function_name="custom_lambda",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="index.lambda_handler",
            code=_lambda.InlineCode(custom_lambda_code),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={"LOG_LEVEL": "INFO"},
        )

        # create custom log group
        custom_loggroup = _logs.LogGroup(
            self,
            "customLogGroup",
            log_group_name=f"/aws/lambda/{custom_lambda_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        ## create lambda function from s3
        source_bucket = _s3.Bucket.from_bucket_name(self, "sourceBucket", "cdk-tutorials-resources")

        custom_lambda_s3 = _lambda.Function(
            self,
            "customLambdaS3",
            function_name="custom_lambda_s3",
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler="custom_lambda.lambda_handler",
            code=_lambda.S3Code(bucket=source_bucket, key="custom_lambda.zip"),
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={"Log_Group": "INFO"},
        )

        _logs.LogGroup(
            self,
            "customLogGroupS3",
            log_group_name=f"/aws/lambda/{custom_lambda_s3.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_WEEK,
        )

        ## cloudwatch rules
        #  - every day at 10:00 UTC
        six_pm_cron = _events.Rule(
            self,
            "sixPmRule",
            schedule=_events.Schedule.cron(
                minute="0", hour="18", month="*", week_day="MON-FRI", year="*"
            ),
        )
        # - every 3 minutes
        run_every_3_mins = _events.Rule(
            self, "every3Mins", schedule=_events.Schedule.rate(core.Duration.minutes(3))
        )

        # add lambda to cloudwatch event rules
        six_pm_cron.add_target(_targets.LambdaFunction(custom_lambda_fn))
        run_every_3_mins.add_target(_targets.LambdaFunction(custom_lambda_s3))
コード例 #7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # store
        dynamodb_table = dynamodb.Table(
            self,
            'dynamodb_table',
            table_name=f'{PROJECT}_{STAGE}',
            partition_key=dynamodb.Attribute(
                name='date', type=dynamodb.AttributeType.STRING),
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            point_in_time_recovery=False,
            removal_policy=core.RemovalPolicy.DESTROY,
            server_side_encryption=True,
        )

        # public api
        public_api = appsync.CfnGraphQLApi(
            self,
            'public_api',
            name=f'{PROJECT}_{STAGE}',
            authentication_type='API_KEY',
        )

        now = time.localtime()
        epoch = time.mktime(now)
        public_api_key = appsync.CfnApiKey(
            self,
            'public_api_key',
            api_id=public_api.attr_api_id,
            expires=epoch + core.Duration.days(90).to_seconds(),
        )

        with open('schema.gql', mode='r') as f:
            graphql_schema = f.read()

            appsync.CfnGraphQLSchema(self,
                                     'public_api_schema',
                                     api_id=public_api.attr_api_id,
                                     definition=graphql_schema)

        public_api_role = iam.Role(
            self,
            'public_api_role',
            assumed_by=iam.ServicePrincipal('appsync.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonDynamoDBFullAccess')
            ],
        )

        public_api_datasource = appsync.CfnDataSource(
            self,
            'public_api_datasource',
            api_id=public_api.attr_api_id,
            name=f'{PROJECT}_{STAGE}_dynamodb',
            type='AMAZON_DYNAMODB',
            dynamo_db_config={
                'awsRegion': 'us-east-1',
                'tableName': dynamodb_table.table_name,
            },
            service_role_arn=public_api_role.role_arn,
        )

        with open('mapping_templates/get_holiday.json', mode='r') as f:
            get_holiday_json = f.read()

            appsync.CfnResolver(
                self,
                'public_api_resolver_get_holiday',
                api_id=public_api.attr_api_id,
                type_name='Query',
                field_name='getHoliday',
                data_source_name=public_api_datasource.attr_name,
                kind='UNIT',
                request_mapping_template=get_holiday_json,
                response_mapping_template='$util.toJson($context.result)',
            )

        with open('mapping_templates/list_holidays.json', mode='r') as f:
            list_holidays_json = f.read()

            appsync.CfnResolver(
                self,
                'public_api_resolver_list_holidays',
                api_id=public_api.attr_api_id,
                type_name='Query',
                field_name='listHolidays',
                data_source_name=public_api_datasource.attr_name,
                kind='UNIT',
                request_mapping_template=list_holidays_json,
                response_mapping_template='$util.toJson($context.result)',
            )

        # lambda source code upload to s3
        lambda_assets = s3_assets.Asset(self,
                                        'lambda_assets',
                                        path='./function/.artifact/')

        # update function
        func_api = lambda_.Function(
            self,
            f'{PROJECT}-{STAGE}-func',
            function_name=f'{PROJECT}-{STAGE}-func',
            code=lambda_.Code.from_bucket(bucket=lambda_assets.bucket,
                                          key=lambda_assets.s3_object_key),
            handler='app.handler',
            runtime=lambda_.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(120),
            log_retention=logs.RetentionDays.SIX_MONTHS,
            memory_size=128,
            tracing=lambda_.Tracing.ACTIVE,
        )
        func_api.add_environment('TABLE_NAME', dynamodb_table.table_name)
        func_api.add_environment('CSV_URL', CSV_URL)
        func_api.add_to_role_policy(
            iam.PolicyStatement(
                actions=[
                    'dynamodb:Get*',
                    'dynamodb:Put*',
                    'dynamodb:Batch*',
                ],
                resources=[dynamodb_table.table_arn],
            ))

        # schedule execute
        events.Rule(
            self,
            f'{PROJECT}-{STAGE}-schedule',
            enabled=True,
            schedule=events.Schedule.rate(core.Duration.days(10)),
            targets=[events_targets.LambdaFunction(func_api)],
        )

        # lambda@edge
        func_lambdaedge = lambda_.Function(
            self,
            f'{PROJECT}-{STAGE}-func-lambdaedge',
            function_name=f'{PROJECT}-{STAGE}-func-lambdaedge',
            code=lambda_.Code.from_inline(
                open('./function/src/lambdaedge.py').read().replace(
                    '__X_API_KEY__', public_api_key.attr_api_key)),
            handler='index.handler',
            runtime=lambda_.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(30),
            memory_size=128,
            role=iam.Role(
                self,
                f'{PROJECT}-{STAGE}-func-lambdaedge-role',
                assumed_by=iam.CompositePrincipal(
                    iam.ServicePrincipal('edgelambda.amazonaws.com'),
                    iam.ServicePrincipal('lambda.amazonaws.com'),
                ),
                managed_policies=[
                    iam.ManagedPolicy.from_aws_managed_policy_name(
                        'service-role/AWSLambdaBasicExecutionRole'),
                ],
            ),
        )
        lambdaedge_version = func_lambdaedge.add_version(
            hashlib.sha256(
                open('./function/src/lambdaedge.py').read().replace(
                    '__X_API_KEY__',
                    public_api_key.attr_api_key).encode()).hexdigest())

        # ACM
        certificates = acm.Certificate(
            self,
            'certificates',
            domain_name=DOMAIN,
            validation_method=acm.ValidationMethod.DNS,
        )

        # CDN
        cdn = cloudfront.CloudFrontWebDistribution(
            self,
            f'{PROJECT}-{STAGE}-cloudfront',
            origin_configs=[
                cloudfront.SourceConfiguration(
                    behaviors=[
                        # default behavior
                        cloudfront.Behavior(
                            allowed_methods=cloudfront.
                            CloudFrontAllowedMethods.ALL,
                            default_ttl=core.Duration.seconds(0),
                            max_ttl=core.Duration.seconds(0),
                            min_ttl=core.Duration.seconds(0),
                            is_default_behavior=True,
                            lambda_function_associations=[
                                cloudfront.LambdaFunctionAssociation(
                                    event_type=cloudfront.LambdaEdgeEventType.
                                    ORIGIN_REQUEST,
                                    lambda_function=lambdaedge_version,
                                ),
                            ])
                    ],
                    custom_origin_source=cloudfront.CustomOriginConfig(
                        domain_name=core.Fn.select(
                            2, core.Fn.split('/',
                                             public_api.attr_graph_ql_url)), ),
                )
            ],
            alias_configuration=cloudfront.AliasConfiguration(
                acm_cert_ref=certificates.certificate_arn,
                names=[DOMAIN],
                security_policy=cloudfront.SecurityPolicyProtocol.
                TLS_V1_2_2018,
            ),
            price_class=cloudfront.PriceClass.PRICE_CLASS_ALL,
        )
        core.CfnOutput(
            self,
            'cloudfront-domain',
            value=cdn.domain_name,
        )
コード例 #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        
        
        # create dynamo table
        demo_table = aws_dynamodb.Table(
            self, "demo_table",
            partition_key=aws_dynamodb.Attribute(
                name="id",
                type=aws_dynamodb.AttributeType.STRING
            )
        )
        
        # create a Cloudwatch Event rule
        one_day_rule = aws_events.Rule(
            self, "one_day_rule",
            schedule=aws_events.Schedule.rate(core.Duration.days(1)),
        )


        # Lambda stuff
        self.lambda_code_etl = _lambda.Code.from_cfn_parameters()
        lambda_etl = _lambda.Function(self,'LambdaETL',
            handler='lambda-handler-etl.handler',
            timeout=core.Duration.seconds(120),
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=self.lambda_code_etl,
        )
        


        self.lambda_code_serve = _lambda.Code.from_cfn_parameters()
        lambda_serve = _lambda.Function(self,'LambdaServe',
            handler='lambda-handler-serve.handler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=self.lambda_code_serve,
        )
  
  
          # Add target to Cloudwatch Event
        one_day_rule.add_target(aws_events_targets.LambdaFunction(lambda_etl))
  
        # grant permission to lambda to write to demo table      
        lambda_etl.add_environment("TABLE_NAME", demo_table.table_name)
        demo_table.grant_write_data(lambda_etl)
        demo_table.grant_read_data(lambda_etl)
        
        lambda_serve.add_environment("TABLE_NAME", demo_table.table_name)
        demo_table.grant_write_data(lambda_serve)
        demo_table.grant_read_data(lambda_serve)



        # API Gateway stuff        
        base_api = _apigw.RestApi(self, 'ApiGatewayWithCors',
        rest_api_name='ApiGatewayWithCors')

        entity = base_api.root.add_resource('api')
        entity_lambda_integration = _apigw.LambdaIntegration(lambda_serve,proxy=False, integration_responses=[
            {
                'statusCode': '200',
                'responseParameters': {
                'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }
                ]
            )
        entity.add_method('GET', entity_lambda_integration, 
                method_responses=[{
                    'statusCode': '200',
                    'responseParameters': {
                        'method.response.header.Access-Control-Allow-Origin': True,
                }
            }
        ]
            )

        self.add_cors_options(entity)

        
        # make the SNS resource (called a topic) and give access to the ETL lambda
        sns_topic = aws_sns.Topic(self, 'PipelineTopic')
        lambda_etl.add_environment('SNS_TOPIC_ARN', sns_topic.topic_arn)
        sns_topic.grant_publish(lambda_etl)
        
        # make an email subscription
        subscription = aws_sns_subscriptions.EmailSubscription('*****@*****.**') # fake email because I don't want AWS spam
        sns_topic.add_subscription(subscription)
コード例 #9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        eventTargets = []

        policyStatement = _iam.PolicyStatement(
            resources = ['*'],
            actions = [
                "cloudwatch:PutMetricAlarm",
                "cloudwatch:ListMetrics",
                "cloudwatch:DeleteAlarms",
                "ec2:CreateTags",
                "ec2:Describe*",
                "ec2:Attach*",
                "elasticloadbalancing:Describe*",
                "elasticloadbalancing:Create*",
                "elasticloadbalancing:AddTags"
            ],
            effect = _iam.Effect.ALLOW
        )

        glom_layer = _lambda.LayerVersion.from_layer_version_attributes(
            self,
            "glom_api_layer",
            layer_version_arn="arn:aws:lambda:us-east-1:<AWS ACCOUNT>:layer:python-glom-layer:1",
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_6,
                _lambda.Runtime.PYTHON_3_7
            ]
        )

        eventHandler = _lambda.Function(
            self,
            'resourceTagger',
            runtime = _lambda.Runtime.PYTHON_3_7,
            code = _lambda.Code.asset('lambda'),
            handler = 'auto_tag.handler',
            layers=[glom_layer]
        )

        eventHandler.add_to_role_policy(policyStatement)

        eventTargets.append(_targets.LambdaFunction(handler = eventHandler))

        pattern = _events.EventPattern(
            source = ['aws.ec2', 'aws.elasticloadbalancing'],
            detail_type = [ "AWS API Call via CloudTrail"],
            detail = {
                "eventSource": [
                  "ec2.amazonaws.com",
                  "elasticloadbalancing.amazonaws.com"
                ],
                "eventName": [
                    "RunInstances",
                    "CreateSnapshot",
                    "CreateVolume",
                    "CreateImage",
                    "CreateLoadBalancer",
                    "AttachNetworkInterface"
                ]
            }
        )

        _events.Rule(
            scope = self,
            id = 'AutoTagsRule',
            description = 'Monitor EC2 and ELB events',
            rule_name = 'AutoTagsRule',
            event_pattern = pattern,
            targets = eventTargets
        )
コード例 #10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # If left unchecked this pattern could "fan out" on the transform and load
        # lambdas to the point that it consumes all resources on the account. This is
        # why we are limiting concurrency to 2 on all 3 lambdas. Feel free to raise this.
        lambda_throttle_size = 2

        ####
        # DynamoDB Table
        # This is where our transformed data ends up
        ####
        table = dynamo_db.Table(self,
                                "TransformedData",
                                partition_key=dynamo_db.Attribute(
                                    name="id",
                                    type=dynamo_db.AttributeType.STRING))

        ####
        # S3 Landing Bucket
        # This is where the user uploads the file to be transformed
        ####
        bucket = s3.Bucket(self, "LandingBucket")

        ####
        # Queue that listens for S3 Bucket events
        ####
        queue = sqs.Queue(self,
                          'newObjectInLandingBucketEventQueue',
                          visibility_timeout=core.Duration.seconds(300))

        bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                      s3n.SqsDestination(queue))

        # EventBridge Permissions
        event_bridge_put_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=['*'],
            actions=['events:PutEvents'])

        ####
        # Fargate ECS Task Creation to pull data from S3
        #
        # Fargate is used here because if you had a seriously large file,
        # you could stream the data to fargate for as long as needed before
        # putting the data onto eventbridge or up the memory/storage to
        # download the whole file. Lambda has limitations on runtime and
        # memory/storage
        ####
        vpc = ec2.Vpc(self, "Vpc", max_azs=2)

        logging = ecs.AwsLogDriver(stream_prefix='TheEventBridgeETL',
                                   log_retention=logs.RetentionDays.ONE_WEEK)

        cluster = ecs.Cluster(self, 'Ec2Cluster', vpc=vpc)

        task_definition = ecs.TaskDefinition(
            self,
            'FargateTaskDefinition',
            memory_mib="512",
            cpu="256",
            compatibility=ecs.Compatibility.FARGATE)

        # We need to give our fargate container permission to put events on our EventBridge
        task_definition.add_to_task_role_policy(event_bridge_put_policy)
        # Grant fargate container access to the object that was uploaded to s3
        bucket.grant_read(task_definition.task_role)

        container = task_definition.add_container(
            'AppContainer',
            image=ecs.ContainerImage.from_asset(
                'container/s3DataExtractionTask'),
            logging=logging,
            environment={
                'S3_BUCKET_NAME': bucket.bucket_name,
                'S3_OBJECT_KEY': ''
            })

        ####
        # Lambdas
        #
        # These are used for 4 phases:
        #
        # Extract    - kicks of ecs fargate task to download data and splinter to eventbridge events
        # Transform  - takes the two comma separated strings and produces a json object
        # Load       - inserts the data into dynamodb
        # Observe    - This is a lambda that subscribes to all events and logs them centrally
        ####

        subnet_ids = []
        for subnet in vpc.private_subnets:
            subnet_ids.append(subnet.subnet_id)

        ####
        # Extract
        # defines an AWS Lambda resource to trigger our fargate ecs task
        ####
        extract_lambda = _lambda.Function(
            self,
            "extractLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="s3SqsEventConsumer.handler",
            code=_lambda.Code.from_asset("lambdas/extract"),
            reserved_concurrent_executions=lambda_throttle_size,
            environment={
                "CLUSTER_NAME": cluster.cluster_name,
                "TASK_DEFINITION": task_definition.task_definition_arn,
                "SUBNETS": json.dumps(subnet_ids),
                "CONTAINER_NAME": container.container_name
            })
        queue.grant_consume_messages(extract_lambda)
        extract_lambda.add_event_source(_event.SqsEventSource(queue=queue))
        extract_lambda.add_to_role_policy(event_bridge_put_policy)

        run_task_policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[task_definition.task_definition_arn],
            actions=['ecs:RunTask'])
        extract_lambda.add_to_role_policy(run_task_policy_statement)

        task_execution_role_policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[
                task_definition.obtain_execution_role().role_arn,
                task_definition.task_role.role_arn
            ],
            actions=['iam:PassRole'])
        extract_lambda.add_to_role_policy(task_execution_role_policy_statement)

        ####
        # Transform
        # defines a lambda to transform the data that was extracted from s3
        ####

        transform_lambda = _lambda.Function(
            self,
            "TransformLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="transform.handler",
            code=_lambda.Code.from_asset("lambdas/transform"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3))
        transform_lambda.add_to_role_policy(event_bridge_put_policy)

        # Create EventBridge rule to route extraction events
        transform_rule = events.Rule(
            self,
            'transformRule',
            description='Data extracted from S3, Needs transformed',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl'],
                detail_type=['s3RecordExtraction'],
                detail={"status": ["extracted"]}))
        transform_rule.add_target(
            targets.LambdaFunction(handler=transform_lambda))

        ####
        # Load
        # load the transformed data in dynamodb
        ####

        load_lambda = _lambda.Function(
            self,
            "LoadLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="load.handler",
            code=_lambda.Code.from_asset("lambdas/load"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3),
            environment={"TABLE_NAME": table.table_name})
        load_lambda.add_to_role_policy(event_bridge_put_policy)
        table.grant_read_write_data(load_lambda)

        load_rule = events.Rule(
            self,
            'loadRule',
            description='Data transformed, Needs loaded into dynamodb',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl'],
                detail_type=['transform'],
                detail={"status": ["transformed"]}))
        load_rule.add_target(targets.LambdaFunction(handler=load_lambda))

        ####
        # Observe
        # Watch for all cdkpatterns.the-eventbridge-etl events and log them centrally
        ####

        observe_lambda = _lambda.Function(
            self,
            "ObserveLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="observe.handler",
            code=_lambda.Code.from_asset("lambdas/observe"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3))

        observe_rule = events.Rule(
            self,
            'observeRule',
            description='all events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl']))

        observe_rule.add_target(targets.LambdaFunction(handler=observe_lambda))
コード例 #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        dynamodb_table = _ddb.Table(
            self,
            id="lab2-cm-ddb",
            table_name="lab2-cm-order-status",
            partition_key=Attribute(name='ID', type=AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY  # NOT for production
        )

        eb = _eb.EventBus(self,
                          id="lab2-cm-eventbus",
                          event_bus_name="lab2-cm-eventbus")

        lambda_role = _iam.Role(
            self,
            id='lab2-cm-role',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'))

        dynamodb_policy_statement = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW)
        dynamodb_policy_statement.add_actions("dynamodb:*")
        dynamodb_policy_statement.add_resources(dynamodb_table.table_arn)
        lambda_role.add_to_policy(dynamodb_policy_statement)

        eventbridge_policy_statement = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW)
        eventbridge_policy_statement.add_actions("events:*")
        eventbridge_policy_statement.add_resources(eb.event_bus_arn)
        lambda_role.add_to_policy(eventbridge_policy_statement)

        cloudwatch_policy_statement = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW)
        cloudwatch_policy_statement.add_actions("logs:CreateLogGroup")
        cloudwatch_policy_statement.add_actions("logs:CreateLogStream")
        cloudwatch_policy_statement.add_actions("logs:PutLogEvents")
        cloudwatch_policy_statement.add_actions("logs:DescribeLogStreams")
        cloudwatch_policy_statement.add_resources("*")
        lambda_role.add_to_policy(cloudwatch_policy_statement)

        fn_lambda_invoice_service = aws_lambda.Function(
            self,
            "lab2-cm-invoiceService",
            code=aws_lambda.AssetCode("../lambda-functions/invoice-service/"),
            handler="app.lambda_handler",
            tracing=aws_lambda.Tracing.ACTIVE,
            timeout=core.Duration.seconds(30),
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        fn_lambda_invoice_service.add_environment("TABLE_NAME",
                                                  dynamodb_table.table_name)

        fn_lambda_fulfilment_service = aws_lambda.Function(
            self,
            "lab2-cm-fulfilmentService",
            code=aws_lambda.AssetCode(
                "../lambda-functions/fulfilment-service/"),
            handler="app.lambda_handler",
            tracing=aws_lambda.Tracing.ACTIVE,
            timeout=core.Duration.seconds(30),
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        fn_lambda_fulfilment_service.add_environment("TABLE_NAME",
                                                     dynamodb_table.table_name)
        fn_lambda_fulfilment_service.add_environment("EVENTBUS_NAME",
                                                     eb.event_bus_name)

        fn_lambda_forecasting_service = aws_lambda.Function(
            self,
            "lab2-cm-forecastingService",
            code=aws_lambda.AssetCode(
                "../lambda-functions/forecasting-service/"),
            handler="app.lambda_handler",
            tracing=aws_lambda.Tracing.ACTIVE,
            timeout=core.Duration.seconds(30),
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        fn_lambda_forecasting_service.add_environment(
            "TABLE_NAME", dynamodb_table.table_name)

        fn_lambda_order_service = aws_lambda.Function(
            self,
            "lab2-cm-orderService",
            code=aws_lambda.AssetCode("../lambda-functions/order-service/"),
            handler="app.lambda_handler",
            timeout=core.Duration.seconds(30),
            tracing=aws_lambda.Tracing.ACTIVE,
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        fn_lambda_order_service.add_environment("TABLE_NAME",
                                                dynamodb_table.table_name)
        fn_lambda_order_service.add_environment("EVENTBUS_NAME",
                                                eb.event_bus_name)

        fn_lambda_logistic_service = aws_lambda.Function(
            self,
            "lab2-cm-logisticService",
            code=aws_lambda.AssetCode("../lambda-functions/logistic-service/"),
            handler="app.lambda_handler",
            timeout=core.Duration.seconds(30),
            tracing=aws_lambda.Tracing.ACTIVE,
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        fn_lambda_logistic_service.add_environment("TABLE_NAME",
                                                   dynamodb_table.table_name)

        eb_order_created_pattern = _eb.EventPattern(
            detail_type=["order_created"], )
        eb_fulfilment_completed_pattern = _eb.EventPattern(
            detail_type=["fulfilment_completed"], )
        eb_order_created_rule = _eb.Rule(
            self,
            id="lab2-cm-eventRule-order-created",
            description="Order created event",
            enabled=True,
            event_bus=eb,
            event_pattern=eb_order_created_pattern,
            rule_name="lab2-OrderCreated",
            targets=[
                _ebt.LambdaFunction(handler=fn_lambda_invoice_service),
                _ebt.LambdaFunction(handler=fn_lambda_fulfilment_service),
                _ebt.LambdaFunction(handler=fn_lambda_forecasting_service)
            ])

        eb_fulfilment_completed_rule = _eb.Rule(
            self,
            id="lab2-cm-eventRule-fulfilment-completed",
            description="Fulfilment completedevent",
            enabled=True,
            event_bus=eb,
            event_pattern=eb_fulfilment_completed_pattern,
            rule_name="lab2-FulfilmentCompleted",
            targets=[_ebt.LambdaFunction(handler=fn_lambda_logistic_service)])
        api = _ag.RestApi(
            self,
            id='lab2-cm-api-gateway',
        )
        api_lambda_integration = _ag.LambdaIntegration(fn_lambda_order_service)
        api.root.add_resource('order').add_method('GET',
                                                  api_lambda_integration)
コード例 #12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        email_subscription_parameter = core.CfnParameter(
            self,
            "EmailSubscriptionParameter",
            description="Email Address for Notification Subscription")
        email_subscription = email_subscription_parameter.value_as_string

        ar1 = accessanalyzer.CfnAnalyzer.ArchiveRuleProperty(
            rule_name="test",
            filter=[
                accessanalyzer.CfnAnalyzer.FilterProperty(
                    property="principal.AWS", eq=["123456789123"])
            ])
        analyzer = accessanalyzer.CfnAnalyzer(
            self,
            id="accessanalyzer",
            type="ACCOUNT",
            tags=[core.CfnTag(key="AccessAnalyzerType", value="ACCOUNT")],
            archive_rules=[ar1])

        runtime = aws_lambda.Runtime.PYTHON_3_8

        boto3_lambda_layer = aws_lambda.LayerVersion(
            self,
            "Boto3LambdaLayer",
            code=aws_lambda.AssetCode("./layers/boto3"),
            compatible_runtimes=[runtime],
            description="Boto3 Lambda Layer")

        context_enrichment = aws_lambda.Function(
            self,
            "context_enrichment",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode("./functions/context-enrichment"),
            layers=[boto3_lambda_layer])
        handler_statement = iam.PolicyStatement(actions=[
            "iam:ListRoleTags", "s3:GetBucketTagging", "lambda:ListTags",
            "sqs:ListQueueTags", "kms:ListAliases", "kms:ListResourceTags"
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        context_enrichment.add_to_role_policy(handler_statement)

        cmk_key = kms.Key(
            self,
            "SNSEncryptionAtRestKey",
            description="SNS Encryption at rest key",
            alias="sns-encryption-at-rest",
            enable_key_rotation=True,
        )

        email_topic = sns.Topic(
            self,
            "AccessAnalyzerNotificationTopic",
            display_name="Access Analyzer Finding Notification Topic",
            master_key=cmk_key)
        email_topic.add_subscription(
            subscriptions.EmailSubscription(email_subscription))

        notification = aws_lambda.Function(
            self,
            "notification",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode("./functions/notification"),
            layers=[boto3_lambda_layer],
            environment={"SNS_TOPIC_ARN": email_topic.topic_arn})
        notification_statement = iam.PolicyStatement(actions=[
            "sns:Publish",
        ],
                                                     effect=iam.Effect.ALLOW,
                                                     resources=["*"])
        notification.add_to_role_policy(notification_statement)
        cmk_key.grant_encrypt_decrypt(notification)

        archive_access_analyzer_finding = aws_lambda.Function(
            self,
            "archive-access-analyzer-finding",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode(
                "./functions/archive-access-analyzer-finding"),
            layers=[boto3_lambda_layer])
        archive_statement = iam.PolicyStatement(actions=[
            "access-analyzer:UpdateFindings",
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        archive_access_analyzer_finding.add_to_role_policy(archive_statement)

        evaluate_access_analyzer_finding = aws_lambda.Function(
            self,
            "evaluate-access-analyzer-finding",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode(
                "./functions/evaluate-access-analyzer-finding"),
            layers=[boto3_lambda_layer])

        #https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html
        access_analyzer_handler_task = sfn.Task(
            self,
            "Context Enrichment",
            task=sfn_tasks.InvokeFunction(context_enrichment),
            result_path="$.guid",
        )

        notification_task = sfn.Task(
            self,
            "Send Notification",
            task=sfn_tasks.InvokeFunction(notification),
            result_path="$.guid",
        )

        archive_task = sfn.Task(
            self,
            "Archive Finding",
            task=sfn_tasks.InvokeFunction(archive_access_analyzer_finding),
            result_path="$.guid",
        )

        evaluate_task = sfn.Task(
            self,
            "Evaluate Risk Level",
            task=sfn_tasks.InvokeFunction(evaluate_access_analyzer_finding),
            result_path="$.guid",
        )

        definition=access_analyzer_handler_task. \
          next(evaluate_task). \
          next(sfn.Choice(self, "Archive?"). \
            when(sfn.Condition.string_equals("$.guid.status", "ARCHIVE"), archive_task). \
            when(sfn.Condition.string_equals("$.guid.status", "NOTIFY"), notification_task) \
          )

        state_machine = sfn.StateMachine(
            self,
            "Access-Analyzer-Automatic-Finding-Archive-State-Machine",
            definition=definition,
            timeout=core.Duration.minutes(5),
        )

        #https://docs.aws.amazon.com/step-functions/latest/dg/tutorial-cloudwatch-events-s3.html
        access_analyzer_finding_rule = aws_events.Rule(
            self,
            "AccessAnalzyerFindingActiveEventRule",
            description="Access Analyzer Finding Event Active",
            enabled=True,
            event_pattern=aws_events.EventPattern(
                source=["aws.access-analyzer"],
                detail_type=["Access Analyzer Finding"],
                detail={"status": ["ACTIVE"]}),
            targets=[
                aws_events_targets.SfnStateMachine(state_machine),
                aws_events_targets.LambdaFunction(context_enrichment)
            ])
    def __init__(self, scope: core.Construct, construct_id: str,
                 stack_log_level: str, orders_bus, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below)

        # Read Lambda Code
        try:
            with open(
                    "stacks/back_end/serverless_eventbridge_consumer_stack/lambda_src/eventbridge_data_consumer.py",
                    encoding="utf-8",
                    mode="r") as f:
                msg_consumer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise
        msg_consumer_fn = _lambda.Function(
            self,
            "msgConsumerFn",
            function_name=f"events_consumer_fn",
            description="Process messages in EventBridge queue",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(msg_consumer_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "APP_ENV": "Production"
            })

        msg_consumer_fn_version = msg_consumer_fn.latest_version
        msg_consumer_fn_version_alias = _lambda.Alias(
            self,
            "msgConsumerFnAlias",
            alias_name="MystiqueAutomation",
            version=msg_consumer_fn_version)

        # Create Custom Loggroup for Producer
        msg_consumer_fn_lg = _logs.LogGroup(
            self,
            "msgConsumerFnLogGroup",
            log_group_name=f"/aws/lambda/{msg_consumer_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        msg_consumer_fn.add_permission("restrictLambdaInvocationToOwnAccount",
                                       principal=_iam.AccountRootPrincipal(),
                                       action="lambda:InvokeFunction",
                                       source_account=core.Aws.ACCOUNT_ID,
                                       source_arn=orders_bus.event_bus_arn)

        # Event Pattern
        self.orders_pattern = _evnts.EventPattern(detail_type=["sales-events"])

        # EventBridge Routing Rule
        self.orders_routing = _evnts.Rule(
            self,
            f"ordersEventRoutingRule01",
            description="A simple events routing rule",
            enabled=True,
            event_bus=orders_bus,
            event_pattern=self.orders_pattern,
            rule_name="orders_routing_to_consumer",
            targets=[_evnts_tgt.LambdaFunction(handler=msg_consumer_fn)])

        self.orders_routing.apply_removal_policy(core.RemovalPolicy.DESTROY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        data_producer_fn.add_permission(
            "restrictLambdaInvocationToFhInOwnAccount",
            principal=_iam.AccountRootPrincipal(),
            action="lambda:InvokeFunction",
            source_account=core.Aws.ACCOUNT_ID)

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_2 = core.CfnOutput(
            self,
            "msgConsumer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={core.Aws.REGION}#/functions/{msg_consumer_fn.function_name}",
            description="Process events received from eventbridge event bus")
コード例 #14
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        load_dotenv(override=True)

        org_list = getenv("GITHUB_ORGS").split(",")

        # S3 bucket
        bucket_name = getenv("S3_ROOT_BUCKET")
        bucket = s3.Bucket(
            self,
            bucket_name,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            bucket_name=bucket_name,
            encryption=s3.BucketEncryption.S3_MANAGED,
            versioned=False,
        )

        # SQS queue
        sqs_queue = sqs.Queue(
            self,
            "GitHubDatastoreQueue",
            encryption=sqs.QueueEncryption.KMS_MANAGED,
            queue_name="GitHubDatastoreQueue",
            visibility_timeout=core.Duration.minutes(15),
        )

        # SSM config
        config_manager = ssm.StringListParameter(
            self,
            "GitHubDatastoreOrgList",
            string_list_value=org_list,
            description="List of orgs to get data",
            parameter_name="GitHubDatastoreOrgList",
        )

        # Lambda function role
        lambda_role = iam.Role(
            self,
            "GitHubDataLambdaRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name("AmazonS3FullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSQSFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name("AWSLambdaFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "SecretsManagerReadWrite"
                ),
            ],
        )

        # Lambda function(s)
        oss_datastore_repo_lambda = _lambda.Function(
            self,
            "GitHubRepoAggregate",
            runtime=_lambda.Runtime.PYTHON_3_6,
            code=_lambda.Code.from_asset("lambda/package.zip"),
            handler="github-data-pull.github_repo_handler",
            role=lambda_role,
            timeout=core.Duration.minutes(15),
        )

        # TODO: rate limit this as I am running out of tokens
        oss_datastore_pull_lambda = _lambda.Function(
            self,
            "GitHubDataHandler",
            runtime=_lambda.Runtime.PYTHON_3_6,
            code=_lambda.Code.from_asset("lambda/package.zip"),
            events=[lambda_events.SqsEventSource(sqs_queue)],
            handler="github-data-pull.github_data_handler",
            reserved_concurrent_executions=2,  # To slow down the token drain
            role=lambda_role,
            timeout=core.Duration.minutes(15),
        )

        # lambda scheduler
        lambda_rule = events.Rule(
            self,
            "Cron Rule",
            description="Setup cron schedule to get org data",
            schedule=events.Schedule.cron(
                # timezone is GMT and unconfigurable
                # adjust accordingly for your desired timezone
                minute="0",
                hour="7",
                month="*",
                year="*",
                week_day="*",
            ),
            targets=[targets.LambdaFunction(oss_datastore_repo_lambda)],
        )
コード例 #15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #
        # Producer Lambda
        #
        event_producer_lambda = _lambda.Function(
            self,
            "eventProducerLambda",
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="event_producer_lambda.lambda_handler",
            code=_lambda.Code.from_asset("lambda"))

        event_policy = iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                           resources=['*'],
                                           actions=['events:PutEvents'])

        event_producer_lambda.add_to_role_policy(event_policy)

        #
        # Approved Consumer1
        #
        event_consumer1_lambda = _lambda.Function(
            self,
            "eventConsumer1Lambda",
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="event_consumer_lambda.lambda_handler",
            code=_lambda.Code.from_asset("lambda"))

        event_consumer1_rule = events.Rule(
            self,
            'eventConsumer1LambdaRule',
            description='Approved Transactions',
            event_pattern=events.EventPattern(source=['com.mycompany.myapp']))

        event_consumer1_rule.add_target(
            targets.LambdaFunction(handler=event_consumer1_lambda))

        #
        # Approved Consumer2
        #
        event_consumer2_lambda = _lambda.Function(
            self,
            "eventConsumer2Lambda",
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler="event_consumer_lambda.lambda_handler",
            code=_lambda.Code.from_asset("lambda"))

        event_consumer2_rule = events.Rule(
            self,
            'eventConsumer2LambdaRule',
            description='Approved Transactions',
            event_pattern=events.EventPattern(source=['com.mycompany.myapp']))
        event_consumer2_rule.add_target(
            targets.LambdaFunction(handler=event_consumer2_lambda))

        #
        # Approved Consumer3
        #

        # Create S3 bucket for KinesisFirehose destination
        ingest_bucket = s3.Bucket(self, 'test-ngest-bucket')

        # Create a Role for KinesisFirehose
        firehose_role = iam.Role(
            self,
            'myRole',
            assumed_by=iam.ServicePrincipal('firehose.amazonaws.com'))

        # Create and attach policy that gives permissions to write in to the S3 bucket.
        iam.Policy(
            self,
            's3_attr',
            policy_name='s3kinesis',
            statements=[
                iam.PolicyStatement(actions=['s3:*'],
                                    resources=[
                                        'arn:aws:s3:::' +
                                        ingest_bucket.bucket_name + '/*'
                                    ])
            ],
            # resources=['*'])],
            roles=[firehose_role],
        )

        event_consumer3_kinesisfirehose = _firehose.CfnDeliveryStream(
            self,
            "consumer3-firehose",
            s3_destination_configuration=_firehose.CfnDeliveryStream.
            S3DestinationConfigurationProperty(
                bucket_arn=ingest_bucket.bucket_arn,
                buffering_hints=_firehose.CfnDeliveryStream.
                BufferingHintsProperty(interval_in_seconds=60),
                compression_format="UNCOMPRESSED",
                role_arn=firehose_role.role_arn))

        event_consumer3_rule = events.Rule(
            self,
            'eventConsumer3KinesisRule',
            description='Approved Transactions',
            event_pattern=events.EventPattern(source=['com.mycompany.myapp']))
        event_consumer3_rule.add_target(
            targets.KinesisFirehoseStream(
                stream=event_consumer3_kinesisfirehose))

        # defines an API Gateway REST API resource backed by our "atm_producer_lambda" function.
        api = api_gw.LambdaRestApi(self,
                                   'SampleAPI-EventBridge-Multi-Consumer',
                                   handler=event_producer_lambda,
                                   proxy=False)
        items = api.root.add_resource("items")
        items.add_method("POST")  # POST /items
コード例 #16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Kinesis to lambda
        self.stream_lambda = kinesis_lambda.KinesisStreamsToLambda(
            self,
            'clickstream',
            lambda_function_props=_lambda.FunctionProps(
                runtime=_lambda.Runtime.PYTHON_3_7,
                handler='index.lambda_handler',
                code=_lambda.Code.inline(
                    get_code('send_data_to_firehose.py'))),
            kinesis_stream_props=kinesis.StreamProps(
                stream_name='clickstream',
                retention_period=core.Duration.days(1),
                shard_count=4),
            kinesis_event_source_props=lambda_sources.KinesisEventSourceProps(
                starting_position=_lambda.StartingPosition.TRIM_HORIZON,
                batch_size=1))

        # Lambda to produce data
        self.produce_fake_data = _lambda.Function(
            self,
            'produce_data',
            runtime=_lambda.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(90),
            handler='index.lambda_handler',
            code=_lambda.Code.inline(get_code('produce_data.py')),
            environment={
                'STREAM_NAME': self.stream_lambda.kinesis_stream.stream_name
            })
        self.stream_lambda.kinesis_stream.grant_read_write(
            self.produce_fake_data)

        # EventBridge to activate my function above
        self.event_rule = events.Rule(
            self,
            'scheduledRule',
            schedule=events.Schedule.expression('rate(1 minute)'))
        self.event_rule.add_target(
            targets.LambdaFunction(self.produce_fake_data))

        # S3 Bucket
        self.bucket = s3.Bucket(self,
                                'data-clicks-lake',
                                removal_policy=core.RemovalPolicy.DESTROY,
                                auto_delete_objects=True)

        # Glue
        self.glue_db_analytical = glue.Database(
            self,
            'analytic_clickstream',
            database_name='clickstream_db',
            location_uri=None,
        )

        self.glue_table_analytical = glue.Table(
            self,
            'analytical-table',
            table_name='analytical-table',
            columns=[
                glue_column('custid', 'int'),
                glue_column('trafficfrom', 'string'),
                glue_column('url', 'string'),
                glue_column('device', 'string'),
                glue_column('touchproduct', 'int'),
                glue_column('trans_timestamp', 'string')
            ],
            database=self.glue_db_analytical,
            data_format=glue.DataFormat.PARQUET,
            bucket=self.bucket,
            s3_prefix='kinesis/',
        )

        # Firehose
        iam_role_firehose_analytical = self.create_firehose_role()
        self.bucket.grant_read_write(iam_role_firehose_analytical)

        firehose_props = FirehoseProps(
            bucket=self.bucket,
            role=iam_role_firehose_analytical,
            stream=self.stream_lambda.kinesis_stream,
            glue_db=self.glue_db_analytical,
            glue_table=self.glue_table_analytical)

        self.firehose = FirehoseLib(self, 'firehose_clickstream',
                                    firehose_props)

        # Elasticsearh
        self.es_domain = ElasticsearchLib(self,
                                          'ES-clickstream-domain').es_domain

        # Lambda to send data to Elasticsearch
        self.send_data_to_elasticsearch = lambda_python.PythonFunction(
            self,
            'clickstream_to_es',
            entry='./analytics_ml_flow/lambda/lambda_with_requirements/',
            handler='handler',
            timeout=core.Duration.seconds(180),
            index='Kinesis_ES.py',
            environment={
                'ES_HOST_HTTP': self.es_domain.domain_endpoint,
                'ES_INDEX': 'clickstream',
                'ES_IND_TYPE': 'transactions',
                'ES_REGION': 'us-west-2',
            })
        self.es_domain.grant_index_read_write('clickstream',
                                              self.send_data_to_elasticsearch)
        self.es_domain.grant_read_write(self.send_data_to_elasticsearch)

        stream_source = lambda_sources.KinesisEventSource(
            self.stream_lambda.kinesis_stream,
            starting_position=_lambda.StartingPosition.TRIM_HORIZON,
            batch_size=1)

        self.stream_lambda.kinesis_stream.grant_read(
            self.send_data_to_elasticsearch)
        self.send_data_to_elasticsearch.add_event_source(stream_source)

        # Glue Crawler
        crawler_role = self.create_crawler_permissions()
        glue_props = GlueCrawlerProps(bucket=self.bucket, role=crawler_role)
        self.glue_crawler = GlueCrawlerLib(self, 'glueCrawler', glue_props)
コード例 #17
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Parameters
        notification_email_address = CfnParameter(
            self,
            "notification_email_address",
            type="String",
            min_length=7,
            description=
            "The E-mail address subscribed to notifications when an S3 bucket is detected as open to the public."
        )
        profiling = CfnParameter(
            self,
            "profiling",
            type="String",
            allowed_values=["TRUE", "FALSE"],
            default="FALSE",
            description=
            "Enable Profiling on Lambda functions: TRUE or FALSE. Default: FALSE"
        )
        tracing = CfnParameter(
            self,
            "tracing",
            type="String",
            allowed_values=["TRUE", "FALSE"],
            default="FALSE",
            description=
            "Enable tracing on Lambda functions: TRUE or FALSE. Default: FALSE"
        )
        trusted_advisor_refresh_minutes = CfnParameter(
            self,
            "trusted_advisor_refresh_minutes",
            type="Number",
            default=6,
            min_value=5,
            max_value=1440,
            description=
            "Number of minutes to schedule a trusted advisor refresh. Default: 6"
        )
        enable_profiling = profiling.value_as_string == 'TRUE'
        enable_tracing = aws_lambda.Tracing.ACTIVE
        if tracing.value_as_string != 'TRUE':
            enable_tracing = aws_lambda.Tracing.DISABLED

        # Layers
        dependencies_layer = aws_lambda.LayerVersion(
            self,
            "dependenciesLayer",
            code=aws_lambda.Code.from_asset(
                "lambda_functions/dependencies_layer/"),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
        )
        # create SNS target
        email_notification_topic = sns.Topic(
            self,
            'taEmailNotificationTopic',
            display_name='taEmailNotificationTopic',
            topic_name='taEmailNotificationTopic')
        # add subscription
        sns.Subscription(self,
                         'emailSubscription',
                         protocol=sns.SubscriptionProtocol.EMAIL,
                         endpoint=notification_email_address.value_as_string,
                         topic=email_notification_topic)

        default_event_bus = events.EventBus.from_event_bus_name(
            self, 'default', 'default')
        ta_event_pattern = events.EventPattern(
            source=['aws.trustedadvisor'],
            detail_type=['Trusted Advisor Check Item Refresh Notification'],
            detail={
                'check-name': ['Amazon S3 Bucket Permissions'],
                'status': ['WARN', 'ERROR']
            })
        # Lambda function to trigger when TA check flagged
        ta_check_s3_open_lambda_function_code = aws_lambda.AssetCode(
            'lambda_functions/s3openbucket')
        ta_check_s3_open_lambda_function = aws_lambda.Function(
            self,
            'ta_s3_open_bucket',
            code=ta_check_s3_open_lambda_function_code,
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='s3openbucket.lambda_handler',
            description='Function Triggered from Trusted Advisor '
            'to Block public access to an S3 Bucket',
            function_name='ta-check-s3-open-lambda-function',
            memory_size=128,
            profiling=enable_profiling,
            tracing=enable_tracing,
            log_retention=aws_logs.RetentionDays.ONE_WEEK,
            timeout=Duration.seconds(10),
            environment={'topic_arn': email_notification_topic.topic_arn},
            initial_policy=[
                aws_iam.PolicyStatement(actions=[
                    's3:GetBucketPolicy', 's3:DeleteBucketPolicy',
                    's3:PutBucketPolicy', 's3:GetAccountPublicAccessBlock',
                    's3:GetBucketPublicAccessBlock',
                    's3:PutAccountPublicAccessBlock',
                    's3:PutBucketPublicAccessBlock', 's3:GetBucketAcl',
                    's3:GetObjectAcl', 's3:PutBucketAcl', 's3:PutObjectAcl'
                ],
                                        effect=aws_iam.Effect.ALLOW,
                                        resources=['*']),
                aws_iam.PolicyStatement(
                    actions=['SNS:Publish'],
                    effect=aws_iam.Effect.ALLOW,
                    resources=[email_notification_topic.topic_arn])
            ])
        events.Rule(
            self,
            's3PublicBucketRule',
            description=
            'Blocks Public access on an S3 bucket once detected by Trusted Advisor',
            event_pattern=ta_event_pattern,
            event_bus=default_event_bus,
            targets=[targets.LambdaFunction(ta_check_s3_open_lambda_function)])
        # Refresh TA check every X minutes
        # Lambda function to trigger when TA check flagged
        ta_refresh_lambda_function_code = aws_lambda.AssetCode(
            'lambda_functions/refreshTrustedAdvisorCheck')
        ta_refresh_lambda_function = aws_lambda.Function(
            self,
            'refresh_ta_check',
            code=ta_refresh_lambda_function_code,
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='refreshTrustedAdvisorCheck.lambda_handler',
            description='Refreshes Trusted Advisor checks',
            function_name='ta-refresh-ta-check-lambda-function',
            memory_size=128,
            profiling=enable_profiling,
            tracing=enable_tracing,
            log_retention=aws_logs.RetentionDays.ONE_WEEK,
            timeout=Duration.seconds(5),
            initial_policy=[
                aws_iam.PolicyStatement(actions=[
                    'support:DescribeTrustedAdvisorChecks',
                    'support:RefreshTrustedAdvisorCheck',
                    'support:DescribeTrustedAdvisorCheckResult'
                ],
                                        effect=aws_iam.Effect.ALLOW,
                                        resources=['*'])
            ])
        ta_refresh_lambda_function.add_layers(dependencies_layer)
        events.Rule(
            self,
            'refreshTAS3BucketPermissionsRule',
            schedule=events.Schedule.rate(
                Duration.minutes(
                    trusted_advisor_refresh_minutes.value_as_number)),
            rule_name='refreshTAS3BucketPermissionsRule',
            targets=[targets.LambdaFunction(ta_refresh_lambda_function)])
コード例 #18
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        replicator_role = iam.Role(
            scope=self,
            role_name='SecretsManagerRegionReplicatorRole',
            id='region-replicator-role',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={
                'ReplicatorPermissions':
                iam.PolicyDocument(statements=[
                    iam.PolicyStatement(resources=['*'],
                                        actions=[
                                            'kms:Decrypt',
                                            'kms:Encrypt',
                                            'kms:GenerateDataKey',
                                        ]),
                    iam.PolicyStatement(resources=[
                        'arn:aws:secretsmanager:{region}:{account}:secret:*'.
                        format(region=constants.ACCOUNT_REGION,
                               account=constants.ACCOUNT_ID)
                    ],
                                        actions=[
                                            'secretsmanager:DescribeSecret',
                                            'secretsmanager:GetSecretValue'
                                        ]),
                    iam.PolicyStatement(
                        resources=[
                            'arn:aws:secretsmanager:{region}:{account}:secret:*'
                            .format(region=constants.TARGET_REGION_1,
                                    account=constants.ACCOUNT_ID)
                        ],
                        actions=[
                            'secretsmanager:CreateSecret',
                            'secretsmanager:UpdateSecretVersionStage',
                            'secretsmanager:PutSecretValue',
                            'secretsmanager:DescribeSecret'
                        ]),
                    iam.PolicyStatement(
                        resources=[
                            'arn:aws:secretsmanager:{region}:{account}:secret:*'
                            .format(region=constants.TARGET_REGION_2,
                                    account=constants.ACCOUNT_ID)
                        ],
                        actions=[
                            'secretsmanager:CreateSecret',
                            'secretsmanager:UpdateSecretVersionStage',
                            'secretsmanager:PutSecretValue',
                            'secretsmanager:DescribeSecret'
                        ]),
                ])
            },
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    managed_policy_name=
                    'service-role/AWSLambdaBasicExecutionRole'),
            ])

        fn = _lambda.Function(
            scope=self,
            id='replicator-lambda',
            runtime=_lambda.Runtime.PYTHON_3_8,
            role=replicator_role,
            handler='index.handler',
            code=_lambda.Code.from_asset(path=path.join('lambda')),
            log_retention=logs.RetentionDays.ONE_WEEK,
            retry_attempts=0,
            environment={
                'TargetRegions':
                constants.TARGET_REGION_1 + ";" + constants.TARGET_REGION_2,
            },
        )

        rule = events.Rule(
            scope=self,
            id='event-rule',
        )

        rule.add_target(target=targets.LambdaFunction(handler=fn))
        rule.add_event_pattern(source=['aws.secretsmanager'],
                               detail_type=['AWS API Call via CloudTrail'],
                               region=[constants.ACCOUNT_REGION],
                               detail={
                                   "eventSource":
                                   ['secretsmanager.amazonaws.com'],
                                   "eventName":
                                   ['CreateSecret', 'PutSecretValue']
                               })
コード例 #19
0
ファイル: __init__.py プロジェクト: shaftoe/api-l3x-in
    def __init__(self, scope: core.Construct, id: str,  # pylint: disable=redefined-builtin
                 lambda_notifications: aws_lambda.IFunction, **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        # CloudWatch LogGroup and Stream to store 'since' timestamp value
        since_log_group = aws_logs.LogGroup(
            self,
            f"{id}-log-group",
            log_group_name=f"{id}-timestamps",
            retention=DEFAULT_LOG_RETENTION,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
        since_log_group.add_stream(
            f"{id}-log-stream",
            log_stream_name=since_log_group.log_group_name,
        )

        # Lambda shared code
        lambda_code = code_from_path(path=f"lib/stacks/{id}/lambdas")

        # Lambda create_doc (and layers): build document file and store to S3 bucket
        bucket = get_bucket(self, f"{id}-bucket")

        lambda_create_doc = get_lambda(
            self,
            id + "-create-document",
            code=lambda_code,
            handler="create_doc.handler",
            environment={
                "DOCUMENT_BUCKET": bucket.bucket_name,
            },
            layers=[get_layer(self, layer_name=layer, prefix=id)
                    for layer in ("readability", "requests_oauthlib")],
            timeout=core.Duration.minutes(5),  # pylint: disable=no-value-for-parameter
        )
        bucket.grant_write(lambda_create_doc)

        # Lambda send_to_kindle: invoked when new documents dropped into S3 bucket,
        # deliver document as email attachment via lambda_notifications
        lambda_send_to_kindle = get_lambda(
            self,
            id + "-send-to-kindle",
            code=lambda_code,
            handler="send_to_kindle.handler",
            environment={
                "KINDLE_EMAIL": env["KINDLE_EMAIL"],
                "LAMBDA_NOTIFICATIONS": lambda_notifications.function_name,
                "DOCUMENT_SRC_BUCKET": bucket.bucket_name,
                "POCKET_CONSUMER_KEY": env["POCKET_CONSUMER_KEY"],
                "POCKET_SECRET_TOKEN": env["POCKET_SECRET_TOKEN"],
            }
        )
        bucket.add_event_notification(
            event=aws_s3.EventType.OBJECT_CREATED_PUT,
            dest=aws_s3_notifications.LambdaDestination(lambda_send_to_kindle),
        )
        lambda_notifications.grant_invoke(lambda_send_to_kindle)
        aws_iam.Policy(
            self,
            f"{id}-mail-attachment-policy",
            roles=[lambda_notifications.role],
            statements=[
                aws_iam.PolicyStatement(
                    actions=["s3:GetObject"],
                    resources=[f"{bucket.bucket_arn}/*"]
                )
            ],
        )

        # Lambda reader: fetch new articles from Pocket and fan-out trigger create_doc Lambda
        lambda_reader = get_lambda(
            self,
            id + "-reader",
            code=lambda_code,
            handler="reader.handler",
            environment={
                "LAMBDA_PUBLISHER": lambda_create_doc.function_name,
                "POCKET_CONSUMER_KEY": env["POCKET_CONSUMER_KEY"],
                "POCKET_SECRET_TOKEN": env["POCKET_SECRET_TOKEN"],
                "SINCE_LOG_GROUP": since_log_group.log_group_name,
            },
        )
        since_log_group.grant(
            lambda_reader,
            "logs:GetLogEvents",
            "logs:PutLogEvents",
        )
        lambda_create_doc.grant_invoke(lambda_reader)

        # Cloudwatch cronjob event to check for new articles every hour
        cronjob = aws_events.Rule(
            self,
            f"{id}-scheduled-event",
            enabled=True,
            schedule=aws_events.Schedule.cron(minute="0"),  # pylint: disable=no-value-for-parameter
        )
        cronjob.add_target(aws_events_targets.LambdaFunction(handler=lambda_reader))
コード例 #20
0
    def __init__(self, scope: core.Construct, id: str, *, email: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        
        # Code Asset 
        lamba_code = lambda_.AssetCode("./assets/")
        
        # DynamoDB
        dynamo_store_db = dynamo.Table(self,"products_to_check_db",
                                        partition_key=dynamo.Attribute(name="ProductTs",type=dynamo.AttributeType.NUMBER))

        # SNS Topics
        sns_input_topic = sns.Topic(self,"checker_url_topic")
        sns_output_topic = sns.Topic(self,"email_topic")

        # Lambda function that scrapes the pages & emails
        lambda_checker = lambda_.Function(
            self, "lambda_checker",
            code=lamba_code,
            handler="checker.handler",
            timeout=core.Duration.seconds(60),
            runtime=lambda_.Runtime.NODEJS_12_X,
            environment= {
                "TOPIC_ARN": sns_output_topic.topic_arn,
                "DYNAMO_TABLE": dynamo_store_db.table_name
            }
        )
        # Subscribe to SNS
        sns_input_topic.add_subscription(subs.LambdaSubscription(lambda_checker))
        sns_output_topic.add_subscription(subs.EmailSubscription(email))

        # Lambda function that populates SNS
        lambda_invoker = lambda_.Function(
            self, "lambda_invoker",
            code=lamba_code,
            handler="invoker.handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.NODEJS_12_X,
            environment= {
                "TOPIC_ARN": sns_input_topic.topic_arn,
                "DYNAMO_TABLE": dynamo_store_db.table_name
            }
        )

        # Grant access to publish on SNS topics
        sns_input_topic.grant_publish(lambda_invoker)
        sns_output_topic.grant_publish(lambda_checker)

        # Grant access to Dynamo for lambdas
        dynamo_store_db.grant_read_data(lambda_invoker)
        dynamo_store_db.grant_read_write_data(lambda_checker)
    
        # Run every day at 05:00 UTC
        # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
        rule = events.Rule(
            self, "runEveryDayAt5AM",
            schedule=events.Schedule.cron(
                minute='0',
                hour='5',
                month='*',
                week_day='*',
                year='*'),
        )
        rule.add_target(targets.LambdaFunction(lambda_invoker))
コード例 #21
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        
        lambda_policies = [iam.PolicyStatement(
                actions=[ "logs:CreateLogStream", "logs:PutLogEvents", "logs:CreateLogGroup"],
                effect=iam.Effect.ALLOW,
                resources=["arn:aws:logs:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*"]
            ), iam.PolicyStatement(
                actions=[ "dynamodb:*"],
                effect=iam.Effect.ALLOW,
                resources=["arn:aws:dynamodb:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*"]
            )]
            
        base_api = _apigw.RestApi(self, 'PetclinicApiGatewayWithCors',
            rest_api_name='PetclinicApiGatewayWithCors')
            
        api_resource = base_api.root.add_resource('api')
        
        website_bucket = _s3.Bucket(self, 'PetclinicWebsite',
            website_index_document='index.html',
            public_read_access=True,
            removal_policy=core.RemovalPolicy.DESTROY
        )
        
        deployment = _s3deploy.BucketDeployment(self, 'PetclinicDeployWebsite',
          sources=[_s3deploy.Source.asset('./spring-petclinic-static')],
          destination_bucket=website_bucket,
          retain_on_delete=False
          #destination_key_prefix='web/static'
        )
        
        # Modify the config.js with CF custome resource
        modify_policy = [iam.PolicyStatement(
                actions=[ "s3:PutObject","s3:PutObjectAcl","s3:PutObjectVersionAcl","s3:GetObject"],
                effect=iam.Effect.ALLOW,
                resources=[website_bucket.bucket_arn + "/*"]
            ),iam.PolicyStatement(
                actions=[ "s3:ListBucket"],
                effect=iam.Effect.ALLOW,
                resources=[website_bucket.bucket_arn]
            ),iam.PolicyStatement(
                actions=[ "dynamodb:*"],
                effect=iam.Effect.ALLOW,
                resources=["arn:aws:dynamodb:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":*"]
            )]
            
        with open("custom-resource-code/init.py", encoding="utf-8") as fp:
            code_body = fp.read()
        
        dynamodb_tables = []
        
        # Warm Lambda function Event rule
        event_rule = _event.Rule(self, 'PetclinicLambdaWarmRule',
            schedule=_event.Schedule.rate(core.Duration.minutes(3))
        )
        
        for service in ['customer', 'vet', 'visit']:
            table = _dynamodb.Table(self, service.capitalize() + 'Table',
              partition_key={ 'name': 'id', 'type': _dynamodb.AttributeType.STRING },
              removal_policy=core.RemovalPolicy.DESTROY,
              read_capacity=5,
              write_capacity=5,
            )
            
            dynamodb_tables.append(table.table_name)
                
            base_lambda = _lambda.Function(self,'ApiPetclinic' + service.capitalize() + 'Lambda',
                handler='org.springframework.samples.petclinic.' + service + 's.StreamLambdaHandler::handleRequest',
                runtime=_lambda.Runtime.JAVA_8,
                code=_lambda.Code.asset('./spring-petclinic-serverless/spring-petclinic-' + service +'s-serverless/target/spring-petclinic-' + service +'s-serverless-2.0.7.jar'),
                memory_size=1024,
                timeout=core.Duration.seconds(300),
                initial_policy=lambda_policies,
                environment={"DYNAMODB_TABLE_NAME":table.table_name, "SERVER_SERVLET_CONTEXT_PATH":"/api/" + service}
            )
        
            entity = api_resource.add_resource(service)
            entity.add_proxy(default_integration=_apigw.LambdaIntegration(base_lambda))
            self.add_cors_options(entity)
            event_rule.add_target(_target.LambdaFunction(handler=base_lambda))
            
        resource = _cfn.CustomResource(self, "S3ModifyCustomResource",
            provider=_cfn.CustomResourceProvider.lambda_(
                _lambda.SingletonFunction(
                    self, "CustomResourceSingleton",
                    uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                    code=_lambda.InlineCode(code_body),
                    handler="index.handler",
                    timeout=core.Duration.seconds(300),
                    runtime=_lambda.Runtime.PYTHON_3_7,
                    initial_policy=modify_policy
                )
            ),
            properties={"Bucket": website_bucket.bucket_name, 
                        "InvokeUrl":base_api.url,
                        "DynamoDBTables": dynamodb_tables
            }
        )
        
        core.CfnOutput(self,"PetclinicWebsiteUrl",export_name="PetclinicWebsiteUrl",value=website_bucket.bucket_website_url)
コード例 #22
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #
        # Producer Lambda
        #
        atm_producer_lambda = _lambda.Function(
            self,
            "atmProducerLambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="handler.lambdaHandler",
            code=_lambda.Code.from_asset("lambda_fns/atmProducer"))

        event_policy = iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                           resources=['*'],
                                           actions=['events:PutEvents'])

        atm_producer_lambda.add_to_role_policy(event_policy)

        #
        # Approved Transaction Consumer
        #
        atm_consumer1_lambda = _lambda.Function(
            self,
            "atmConsumer1Lambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="handler.case1Handler",
            code=_lambda.Code.from_asset("lambda_fns/atmConsumer"))

        atm_consumer1_rule = events.Rule(self,
                                         'atmConsumer1LambdaRule',
                                         description='Approved Transactions',
                                         event_pattern=events.EventPattern(
                                             source=['custom.myATMapp'],
                                             detail_type=['transaction'],
                                             detail={"result": ["approved"]}))

        atm_consumer1_rule.add_target(
            targets.LambdaFunction(handler=atm_consumer1_lambda))

        #
        # NY Prefix Consumer
        #
        atm_consumer2_lambda = _lambda.Function(
            self,
            "atmConsumer2Lambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="handler.case2Handler",
            code=_lambda.Code.from_asset("lambda_fns/atmConsumer"))

        atm_consumer2_rule = events.Rule(
            self,
            'atmConsumer2LambdaRule',
            event_pattern=events.EventPattern(
                source=['custom.myATMapp'],
                detail_type=['transaction'],
                detail={"location": [{
                    "prefix": "NY-"
                }]}))

        atm_consumer2_rule.add_target(
            targets.LambdaFunction(handler=atm_consumer2_lambda))

        #
        # Not Approved Consumer
        #
        atm_consumer3_lambda = _lambda.Function(
            self,
            "atmConsumer3Lambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="handler.case3Handler",
            code=_lambda.Code.from_asset("lambda_fns/atmConsumer"))

        atm_consumer3_rule = events.Rule(
            self,
            'atmConsumer3LambdaRule',
            event_pattern=events.EventPattern(
                source=['custom.myATMapp'],
                detail_type=['transaction'],
                detail={"result": [{
                    "anything-but": "approved"
                }]}))

        atm_consumer3_rule.add_target(
            targets.LambdaFunction(handler=atm_consumer3_lambda))

        # defines an API Gateway REST API resource backed by our "atm_producer_lambda" function.
        api_gw.LambdaRestApi(self, 'Endpoint', handler=atm_producer_lambda)
コード例 #23
0
    def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)


        codeLocation = 'lambdas'
        layerLocation = self.installRequirements(codeLocation)
        self.ip = self.getIp()
        self.vpc = self.createVpc()
        self.lambdaRole = self.createLambdaRole()
        self.lambdaCode = lambda_.Code.from_asset(codeLocation)
        self.lambdaLayer = lambda_.LayerVersion(self, 'lambdaLayer', 
            code=lambda_.Code.from_asset(layerLocation),
            compatible_runtimes=[
                lambda_.Runtime.PYTHON_3_8
            ]
        )
        self.statesRole = iam.Role(self, 'statesExecutionRole',
            assumed_by=iam.ServicePrincipal('states.amazonaws.com'),
            inline_policies={
                'StatesExecutionPolicy': iam.PolicyDocument(
                    statements=[
                        iam.PolicyStatement(
                            effect=iam.Effect.ALLOW,
                            actions=['lambda:InvokeFunction'],
                            resources=['*']
                        )
                    ]
                )
            }
        )

        api = agw.RestApi(self, 'lntipbot',
            endpoint_types=[agw.EndpointType.REGIONAL],
            deploy_options=agw.StageOptions(
                metrics_enabled=True
            )
        )

        api.root.add_resource('info', default_integration=agw.MockIntegration(
            integration_responses=[
                agw.IntegrationResponse(
                    status_code='301',
                    response_parameters={
                        'method.response.header.Location': '\'https://www.reddit.com/r/LNTipBot2/wiki/index\'',
                        'method.response.header.Cache-Control': '\'max-age=300\''
                    }
                )
            ],
            request_templates={
                'application/json': '{"statusCode": 301}'
            }
        )).add_method('GET',
            method_responses=[{
                'statusCode': '301',
                'responseParameters': {
                    'method.response.header.Location': True,
                    'method.response.header.Cache-Control': True
                }
            }]
        )

        api.root.add_resource('uri', default_integration=agw.LambdaIntegration(
            self.createLambda('invoiceUriFunction', 'getURI.getURI')
        )).add_method('GET')

        api.root.add_resource('qr', default_integration=agw.LambdaIntegration(
            self.createLambda('qrFunction', 'qrEncoder.qrEncoder')
        )).add_method('GET')

        events.Rule(self, 'oauthRefreshEvent',
            schedule=events.Schedule.rate(cdk.Duration.minutes(28)),
            targets=[eventsTargets.LambdaFunction(
                self.createLambda('oauthFunction', 'redditOAuthRequester.redditOAuthRequester')
            )]
        )

        self.settledInvoiceHandler = self.createLambda('settledInvoiceHandler', 'settledInvoiceHandler.settledInvoiceHandler')

        self.createLambda('apiTest', 'lambda_function.lambda_handler')
        
        withdrawWorkflow = self.createWithdrawWorkflow()
        tipWorkflow = self.createTipWorkflow()
        
        events.Rule(self, 'redditCommentScannerEvent',
            schedule=events.Schedule.rate(cdk.Duration.minutes(1)),
            targets=[eventsTargets.LambdaFunction(
                lambda_.Function(self, 'redditCommentScanner', 
                    code=self.lambdaCode,
                    runtime=lambda_.Runtime.PYTHON_3_8,
                    handler='scanComments.scannerLoop',
                    role=self.lambdaRole,
                    layers=[self.lambdaLayer],
                    timeout=cdk.Duration.seconds(55),
                    reserved_concurrent_executions=1
                ),
                event=events.RuleTargetInput.from_object({
                    'tipWorkflowArn': tipWorkflow.state_machine_arn,
                    'withdrawWorkflowArn': withdrawWorkflow.state_machine_arn
                })
            )]
        )

        self.backupBucket = s3.Bucket(self, 'bitcoindBackups',
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            bucket_name='bitcoind-pruned-backups-lntipbot',
        )

        self.serverRole = self.createServerRole()
        self.securityGroup = self.createSecurityGroup()
        self.createServer()

        self.createOps()
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Let us create the DENY ALL policy
        deny_iam_policy_statement = _iam.PolicyStatement(
            resources=['*'],
            actions=['iam:*'],
            effect=_iam.Effect.DENY,
            # sid='DenyIAMPermissions'
        )
        deny_iam_policy_statement.sid = "DenyIAMPermissions"

        deny_iam_policy = _iam.ManagedPolicy(
            self,
            "deny_iam_policy",
            description="A policy to deny IAM permissions",
            managed_policy_name="deny_iam_privileges",
            statements=[deny_iam_policy_statement])

        # Lambda Function that will quarantine the user
        try:
            with open("./lambda_src/revoke_iam_privileges.py",
                      mode='r') as file:
                revoke_iam_privileges_fn_handler_code = file.read()
        except OSError as e:
            print(f'Unable to read file. ERROR:{str(e)}')

        revoke_iam_privileges_fn = _lambda.Function(
            self,
            id='revokeIamPrivilegesFnId',
            function_name="revoke_iam_privileges_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(revoke_iam_privileges_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(5),
            environment={
                "ADMIN_GROUP_NAME": global_args.ADMIN_GROUP_NAME,
                "DENY_IAM_POLICY_ARN": f"{deny_iam_policy.managed_policy_arn}"
            })

        revoke_iam_privileges_fn_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "*",
            ],
            actions=[
                "iam:AttachRolePolicy",
                "iam:AttachUserPolicy",
                "iam:ListAttachedUserPolicies",
                "iam:ListGroupsForUser",
                "iam:PutUserPolicy",
            ])
        revoke_iam_privileges_fn_perms.sid = "AllowLambdaToQuarantineUser"
        revoke_iam_privileges_fn.add_to_role_policy(
            revoke_iam_privileges_fn_perms)

        # Cloudwatch IAM Events Rule
        iam_evant_validator_targets = []
        iam_evant_validator_targets.append(
            _targets.LambdaFunction(handler=revoke_iam_privileges_fn))

        iam_event_pattern = _events.EventPattern(
            source=["aws.iam"],
            detail_type=["AWS API Call via CloudTrail"],
            detail={
                "eventSource": ["iam.amazonaws.com"],
                "userIdentity": {
                    "type": ["IAMUser"]
                }
            })
        """
        # Dedicted Event Bus for Security
        sec_event_bus = _events.EventBus(
            self,
            "securityEventBusId",
            event_bus_name=f"{global_args.OWNER}_security_event_bus"
        )
        """

        # Event Rule to trigger Lambda
        iam_event_rule = _events.Rule(
            self,
            "iamEventRuleId",
            event_pattern=iam_event_pattern,
            rule_name=f"iam_event_pattern_{global_args.OWNER}",
            # event_bus=sec_event_bus,
            enabled=True,
            description="Trigger an event for IAM Events",
            targets=iam_evant_validator_targets)

        # Lets create a cloudtrail to track API events
        _event_trail = _cloudtrail.Trail(self,
                                         "cloudEventTrailId",
                                         is_multi_region_trail=False,
                                         include_global_service_events=True,
                                         enable_file_validation=False,
                                         send_to_cloud_watch_logs=False)

        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output0 = core.CfnOutput(
            self,
            "SecuirtyAutomationBy",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output1 = core.CfnOutput(
            self,
            "LambdaFunction",
            value=(f"https://console.aws.amazon.com/lambda/home?region="
                   f"{core.Aws.REGION}"
                   f"#functions/"
                   f"{revoke_iam_privileges_fn.function_name}"),
            description="The Quarantine Lambda Function")

        output2 = core.CfnOutput(
            self,
            "CreateUser",
            value=(f"aws iam create-user --user-name Mystique$RANDOM"),
            description="command to create users")

        output3 = core.CfnOutput(
            self,
            "DeleteUser",
            value=(f"aws iam delete-user --user-name Mystique*"),
            description="command to delete users")
コード例 #25
0
ファイル: __init__.py プロジェクト: sierrezinal/api-l3x-in
    def __init__(
            self,
            scope: core.Construct,
            id: str,  # pylint: disable=redefined-builtin
            lambda_notifications: aws_lambda.IFunction,
            **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        # CloudWatch LogGroup and Stream to store 'since' timestamp value
        since_log_group = aws_logs.LogGroup(
            self,
            f"{id}-log-group",
            log_group_name=f"{id}-timestamps",
            retention=DEFAULT_LOG_RETENTION,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
        since_log_group.add_stream(
            f"{id}-log-stream",
            log_stream_name=since_log_group.log_group_name,
        )

        # Lambda shared code
        lambda_code = code_from_path(path=f"lib/stacks/{id}/lambdas")

        # Lambda create_epub (and layers): build epub file and store to S3 bucket
        epub_bucket = get_bucket(self, f"{id}-epub-bucket")

        lambda_create_epub = get_lambda(
            self,
            id + "-create-epub",
            code=lambda_code,
            handler="create_epub.handler",
            environment={
                "EPUB_BUCKET": epub_bucket.bucket_name,
            },
            layers=[
                get_layer(self, layer_name=layer, prefix=id)
                for layer in ("pandoc", "html2text", "requests_oauthlib")
            ],
            timeout=core.Duration.minutes(5),  # pylint: disable=no-value-for-parameter
        )
        epub_bucket.grant_write(lambda_create_epub)

        # Lambda send_to_kindle: invoked when new MOBI dropped into S3 bucket, deliver MOBI as
        # email attachment via lambda_notifications
        mobi_bucket = get_bucket(self, f"{id}-mobi-bucket")

        lambda_send_to_kindle = get_lambda(
            self,
            id + "-send-to-kindle",
            code=lambda_code,
            handler="send_to_kindle.handler",
            environment={
                "KINDLE_EMAIL": env["KINDLE_EMAIL"],
                "LAMBDA_NOTIFICATIONS": lambda_notifications.function_name,
                "MOBI_SRC_BUCKET": mobi_bucket.bucket_name,
                "POCKET_CONSUMER_KEY": env["POCKET_CONSUMER_KEY"],
                "POCKET_SECRET_TOKEN": env["POCKET_SECRET_TOKEN"],
            })
        mobi_bucket.add_event_notification(
            event=aws_s3.EventType.OBJECT_CREATED_PUT,
            dest=aws_s3_notifications.LambdaDestination(lambda_send_to_kindle),
        )
        lambda_notifications.grant_invoke(lambda_send_to_kindle)
        aws_iam.Policy(
            self,
            f"{id}-mail-attachment-policy",
            roles=[lambda_notifications.role],
            statements=[
                aws_iam.PolicyStatement(
                    actions=["s3:GetObject"],
                    resources=[f"{mobi_bucket.bucket_arn}/*"])
            ],
        )

        # Lambda reader: fetch new articles from Pocket and fan-out trigger create_epub Lambda
        lambda_reader = get_lambda(
            self,
            id + "-reader",
            code=lambda_code,
            handler="reader.handler",
            environment={
                "LAMBDA_PUBLISHER": lambda_create_epub.function_name,
                "POCKET_CONSUMER_KEY": env["POCKET_CONSUMER_KEY"],
                "POCKET_SECRET_TOKEN": env["POCKET_SECRET_TOKEN"],
                "SINCE_LOG_GROUP": since_log_group.log_group_name,
            },
        )
        since_log_group.grant(
            lambda_reader,
            "logs:GetLogEvents",
            "logs:PutLogEvents",
        )
        lambda_create_epub.grant_invoke(lambda_reader)

        # Fargate task: run dockerized `kindlegen` to parse EPUB to MOBI,
        # triggered by trigger_ecs_task Lambda
        # https://medium.com/@piyalikamra/s3-event-based-trigger-mechanism-to-start-ecs-far-gate-tasks-without-lambda-32f57ed10b0d
        cluster, vpc = get_fargate_cluster(self, id)

        mem_limit = "512"
        task = get_fargate_task(self, id, mem_limit)
        aws_iam.Policy(
            self,
            f"{id}-bucket-policy",
            roles=[task.task_role],
            statements=[
                aws_iam.PolicyStatement(
                    actions=["s3:GetObject"],
                    resources=[f"{epub_bucket.bucket_arn}/*"]),
                aws_iam.PolicyStatement(
                    actions=["s3:PutObject"],
                    resources=[f"{mobi_bucket.bucket_arn}/*"]),
            ],
        )

        container = get_fargate_container(self, id, task, mem_limit)

        # Lambda trigger_ecs_task: trigger Fargate task when new EPUB file is dropped into epub_bucket
        lambda_trigger_ecs_task = get_lambda(
            self,
            f"{id}-trigger-ecs-task",
            code=lambda_code,
            handler="trigger_ecs_task.handler",
            environment={
                "ECS_CLUSTER": cluster.cluster_arn,
                "ECS_CLUSTER_SECURITY_GROUP": vpc.vpc_default_security_group,
                "ECS_CLUSTER_SUBNET": vpc.public_subnets[0].subnet_id,
                "ECS_CONTAINER": container.container_name,
                "ECS_TASK": task.task_definition_arn,
                "MOBI_DEST_BUCKET": mobi_bucket.bucket_name,
            },
        )
        epub_bucket.add_event_notification(
            event=aws_s3.EventType.OBJECT_CREATED_PUT,
            dest=aws_s3_notifications.LambdaDestination(
                lambda_trigger_ecs_task),
        )
        aws_iam.Policy(
            self,
            f"{id}-lambda-trigger-policy",
            roles=[lambda_trigger_ecs_task.role],
            statements=[
                aws_iam.PolicyStatement(
                    actions=["ecs:RunTask"],
                    resources=[task.task_definition_arn],
                ),
                aws_iam.PolicyStatement(
                    actions=["iam:PassRole"],
                    resources=[
                        task.execution_role.role_arn,
                        task.task_role.role_arn,
                    ],
                )
            ],
        )

        # Cloudwatch cronjob event to check for new articles every hour
        cronjob = aws_events.Rule(
            self,
            f"{id}-scheduled-event",
            enabled=True,
            schedule=aws_events.Schedule.cron(minute="0"),  # pylint: disable=no-value-for-parameter
        )
        cronjob.add_target(
            aws_events_targets.LambdaFunction(handler=lambda_reader))
コード例 #26
0
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        log_bucket_name = cdk.Fn.import_value('sime-log-bucket-name')
        service_role_kdf_to_s3 = cdk.Fn.import_value(
            'siem-kdf-to-s3-role-name')

        cwe_frequency = cdk.CfnParameter(
            self,
            'cweRulesFrequency',
            type='Number',
            description=(
                'How often do you get WorkSpaces Inventory? (every minutes)'),
            default=720)
        kdf_workspaces_name = cdk.CfnParameter(
            self,
            'KdfWorkSpacesName',
            description=(
                'Kinesis Data Firehose Name to deliver workspaces event'),
            default='siem-workspaces-event-to-s3',
        )
        kdf_buffer_size = cdk.CfnParameter(
            self,
            'KdfBufferSize',
            type='Number',
            description='Enter a buffer size between 1 - 128 (MiB)',
            default=1,
            min_value=1,
            max_value=128)
        kdf_buffer_interval = cdk.CfnParameter(
            self,
            'KdfBufferInterval',
            type='Number',
            description='Enter a buffer interval between 60 - 900 (seconds.)',
            default=60,
            min_value=60,
            max_value=900)

        role_get_workspaces_inventory = aws_iam.Role(
            self,
            'getWorkspacesInventoryRole',
            role_name='siem-get-workspaces-inventory-role',
            inline_policies={
                'describe-workspaces':
                aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(
                        actions=['workspaces:Describe*'],
                        resources=['*'],
                        sid='DescribeWorkSpacesPolicyGeneratedBySeimCfn')
                ]),
                'firehose-to-s3':
                aws_iam.PolicyDocument(statements=[
                    aws_iam.PolicyStatement(
                        actions=['s3:PutObject'],
                        resources=[f'arn:aws:s3:::{log_bucket_name}/*'],
                        sid='FirehoseToS3PolicyGeneratedBySeimCfn')
                ])
            },
            managed_policies=[
                aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
            ],
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com'))

        # Lambda Functions to get workspaces inventory
        lambda_func = aws_lambda.Function(
            self,
            'lambdaGetWorkspacesInventory',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.InlineCode(LAMBDA_GET_WORKSPACES_INVENTORY),
            function_name='siem-get-workspaces-inventory',
            description='SIEM: get workspaces inventory',
            handler='index.lambda_handler',
            timeout=cdk.Duration.seconds(300),
            role=role_get_workspaces_inventory,
            environment={'log_bucket_name': log_bucket_name})
        rule = aws_events.Rule(self,
                               'eventBridgeRuleWorkSpaceInventory',
                               rule_name='siem-workspaces-inventory-to-lambda',
                               schedule=aws_events.Schedule.rate(
                                   cdk.Duration.minutes(
                                       cwe_frequency.value_as_number)))
        rule.add_target(aws_events_targets.LambdaFunction(lambda_func))

        kdf_to_s3 = aws_kinesisfirehose.CfnDeliveryStream(
            self,
            "KDFForWorkSpacesEvent",
            delivery_stream_name=kdf_workspaces_name.value_as_string,
            s3_destination_configuration=CDS.
            S3DestinationConfigurationProperty(
                bucket_arn=f'arn:aws:s3:::{log_bucket_name}',
                prefix=f'AWSLogs/{cdk.Aws.ACCOUNT_ID}/WorkSpaces/Event/',
                compression_format='GZIP',
                buffering_hints=CDS.BufferingHintsProperty(
                    interval_in_seconds=kdf_buffer_interval.value_as_number,
                    size_in_m_bs=kdf_buffer_size.value_as_number),
                role_arn=(f'arn:aws:iam::{cdk.Aws.ACCOUNT_ID}:role/'
                          f'service-role/{service_role_kdf_to_s3}')))

        pattern = aws_events.EventPattern(detail_type=["WorkSpaces Access"],
                                          source=['aws.workspaces'])

        aws_events.Rule(
            self,
            'eventBridgeRuleWorkSpacesEvent',
            event_pattern=pattern,
            rule_name='siem-workspaces-event-to-kdf',
            targets=[aws_events_targets.KinesisFirehoseStream(kdf_to_s3)])
コード例 #27
0
ファイル: app.py プロジェクト: hassantahhan/iamcanary
    def __init__(self, app: cdk.App, id: str) -> None:
        super().__init__(app, id)

        with open("lambda-handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        principals_actions_json = cdk.CfnParameter(
            self,
            "PrincipalsActionsJSON",
            type="String",
            default="{}",
        )

        alert_notification_email = cdk.CfnParameter(
            self,
            "AlertNotificationEmail",
            type="String",
            default="*****@*****.**",
        )

        role = iam.Role(
            self,
            "CheckPrincipalsActionsLambdaRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole")
            ])
        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=["iam:SimulatePrincipalPolicy"],
                                resources=["*"]))

        lambdaFn = lambda_.Function(
            self,
            "CheckPrincipalsActions",
            code=lambda_.InlineCode(handler_code),
            handler="index.lambda_handler",
            memory_size=128,
            timeout=cdk.Duration.seconds(10),
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                "principals_actions_json":
                principals_actions_json.value_as_string
            },
            description=
            "Check actions assigned to an IAM users, roles, or groups",
            role=role,
        )

        rule = events.Rule(
            self,
            "CheckPrincipalsActionsEventScheduler",
            schedule=events.Schedule.rate(cdk.Duration.minutes(1)),
        )
        rule.add_target(targets.LambdaFunction(lambdaFn))

        topic = sns.Topic(self, "CheckPrincipalsActionsLambdaErrorTopic")
        topic.add_subscription(
            subscriptions.EmailSubscription(
                alert_notification_email.value_as_string))

        metric = lambdaFn.metric("Errors").with_(
            period=cdk.Duration.seconds(60), statistic="Sum")

        alarm = metric.create_alarm(
            self,
            "CheckPrincipalsActionsAlarm",
            threshold=1,
            evaluation_periods=1,
            datapoints_to_alarm=1,
        )
        alarm.add_alarm_action(actions.SnsAction(topic))
コード例 #28
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        env = kwargs['env']

        work_dir = pathlib.Path(__file__).parents[1]

        # These below steps allows to reuse ecs cluster which is aleady creatd by shared stack

        # Get cluster name from ssm parameter
        cluster_name = ssm.StringParameter.from_string_parameter_name(
            self,
            "GetClusterName",
            string_parameter_name="/dev/compute/container/ecs-cluster-name"
        ).string_value

        vpc_az = ssm.StringListParameter.from_string_list_parameter_name(
            self,
            "GetVpcAz",
            string_list_parameter_name="/dev/network/vpc/vpc-az"
        ).string_list_value

        # using string instead of stringlist because of subnets parsing issue
        vpc_public_subnets_1 = ssm.StringParameter.from_string_parameter_name(
            self,
            "GetVpcPublicSubnets1",
            string_parameter_name="/dev/network/vpc/vpc-public-subnets-1"
        ).string_value

        vpc_public_subnets_2 = ssm.StringParameter.from_string_parameter_name(
            self,
            "GetVpcPublicSubnets2",
            string_parameter_name="/dev/network/vpc/vpc-public-subnets-2"
        ).string_value

        vpc_id = ssm.StringParameter.from_string_parameter_name(
            self, "GetVpcId",
            string_parameter_name="/dev/network/vpc/vpc-id").string_value

        ec2_vpc = ec2.Vpc.from_vpc_attributes(
            self,
            "GetVpc",
            availability_zones=vpc_az,
            vpc_id=vpc_id,
            public_subnet_ids=[vpc_public_subnets_1, vpc_public_subnets_2])

        # Get security group id from ssm parameter
        security_group_id = ssm.StringParameter.from_string_parameter_name(
            self,
            "GetSgId",
            string_parameter_name="/dev/network/vpc/security-group-id"
        ).string_value

        # Get security group from lookup
        ec2_sgp = ec2.SecurityGroup.from_security_group_id(
            self, "GetSgp", security_group_id=security_group_id)

        # myDateTimeFunction lambda function
        my_datetime_lambda = _lambda.Function(
            self,
            "my-datetime",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="myDateTimeFunction.handler",
            code=_lambda.Code.asset("./lambda"),
            current_version_options=_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.RETAIN, retry_attempts=1))

        my_datetime_lambda.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=["lambda:InvokeFunction"],
                                resources=["*"]))

        # beforeAllowTraffic lambda function
        pre_traffic_lambda = _lambda.Function(
            self,
            "pre-traffic",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="beforeAllowTraffic.handler",
            code=_lambda.Code.asset("./lambda"),
            environment=dict(
                NewVersion=my_datetime_lambda.current_version.function_arn))

        pre_traffic_lambda.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["codedeploy:PutLifecycleEventHookExecutionStatus"],
                resources=["*"]))

        pre_traffic_lambda.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=["lambda:InvokeFunction"],
                                resources=["*"]))

        # afterAllowTraffic lambda function
        post_traffic_lambda = _lambda.Function(
            self,
            "post-traffic",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="afterAllowTraffic.handler",
            code=_lambda.Code.asset("./lambda"),
            environment=dict(
                NewVersion=my_datetime_lambda.current_version.function_arn))

        post_traffic_lambda.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["codedeploy:PutLifecycleEventHookExecutionStatus"],
                resources=["*"]))

        post_traffic_lambda.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                actions=["lambda:InvokeFunction"],
                                resources=["*"]))

        # create a cloudwatch event rule
        rule = events.Rule(
            self,
            "CanaryRule",
            schedule=events.Schedule.expression("rate(10 minutes)"),
            targets=[
                events_targets.LambdaFunction(
                    my_datetime_lambda.current_version)
            ],
        )

        # create a cloudwatch alarm based on the lambda erros metrics
        alarm = cloudwatch.Alarm(
            self,
            "CanaryAlarm",
            metric=my_datetime_lambda.current_version.metric_invocations(),
            threshold=0,
            evaluation_periods=2,
            datapoints_to_alarm=2,
            treat_missing_data=cloudwatch.TreatMissingData.IGNORE,
            period=core.Duration.minutes(5),
            alarm_name="CanaryAlarm")

        lambda_deployment_group = codedeploy.LambdaDeploymentGroup(
            self,
            "datetime-lambda-deployment",
            alias=my_datetime_lambda.current_version.add_alias("live"),
            deployment_config=codedeploy.LambdaDeploymentConfig.ALL_AT_ONCE,
            alarms=[alarm],
            auto_rollback=codedeploy.AutoRollbackConfig(
                deployment_in_alarm=True),
            pre_hook=pre_traffic_lambda,
            post_hook=post_traffic_lambda)

        # Pass vpc, sgp and ecs cluster name to get ecs cluster info
        ecs_cluster = ecs.Cluster.from_cluster_attributes(
            self,
            "GetEcsCluster",
            cluster_name=cluster_name,
            vpc=ec2_vpc,
            security_groups=[ec2_sgp])

        # Fargate Service
        task_definition = ecs.FargateTaskDefinition(
            self,
            "TaskDef",
            memory_limit_mib=512,
            cpu=256,
        )

        container = task_definition.add_container(
            "web",
            image=ecs.ContainerImage.from_asset(
                os.path.join(work_dir, "container")),
            # Built custom health check for your application specific
            # and add them here. Ex: Pingcheck, Database etc.
            health_check=ecs.HealthCheck(command=["CMD-SHELL", "echo"]),
            # environment=dict(name="latest")
        )

        port_mapping = ecs.PortMapping(container_port=8000,
                                       protocol=ecs.Protocol.TCP)

        container.add_port_mappings(port_mapping)

        # Create Fargate Service
        # Current limitation: Blue/Green deployment
        # https://github.com/aws/aws-cdk/issues/1559
        service = ecs.FargateService(
            self,
            "Service",
            cluster=ecs_cluster,
            task_definition=task_definition,
            assign_public_ip=True,
            deployment_controller=ecs.DeploymentController(
                type=ecs.DeploymentControllerType.ECS),
            desired_count=2,
            min_healthy_percent=50)

        # Create Application LoadBalancer
        lb = elbv2.ApplicationLoadBalancer(self,
                                           "LB",
                                           vpc=ec2_vpc,
                                           internet_facing=True)

        # Add listener to the LB
        listener = lb.add_listener("Listener", port=80, open=True)

        # Default to Lambda
        listener.add_targets(
            "Lambda", targets=[elb_targets.LambdaTarget(my_datetime_lambda)])

        # Additionally route to container
        listener.add_targets("Fargate",
                             port=8000,
                             path_pattern="/container",
                             priority=10,
                             targets=[service])

        # add an output with a well-known name to read it from the integ tests
        self.load_balancer_dns_name = lb.load_balancer_dns_name
コード例 #29
0
    def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        """Default values if not specified via context variables from CLI
        logging_level = 'INFO'
        slack_webhook_secret_name = 'aws-to-slack/dev/webhooks'
        """
        if self.node.try_get_context('logging_level') is None:
            LOGGING_LEVEL = 'INFO'
        else:
            LOGGING_LEVEL = self.node.try_get_context('logging_level')
        if self.node.try_get_context('slack_webhook_secret_name') is None:
            WEBHOOK_SECRET_NAME = 'aws-to-slack/dev/webhooks'
        else:
            WEBHOOK_SECRET_NAME = self.node.try_get_context(
                'slack_webhook_secret_name')

        """Create CloudFormation parameters so we can easily use the
        template this CDK app generates and convert it to a SAM
        application.
        """
        webhook_secret_name_param = cdk.CfnParameter(
            self, 'WebhookSecretName',
            description=('The name of the Secrets Manager secret '
                         'which stores the Slack webhook URL'),
            type='String',
            default=WEBHOOK_SECRET_NAME,
            allowed_pattern='[a-zA-Z0-9/_+=.@-]+',
            min_length=1,
            max_length=512
        ).value_as_string
        whats_new_rss_feed = cdk.CfnParameter(
            self, 'WhatsNewRSSFeed',
            description='The RSS feed of all AWS new releases',
            type='String',
            default=self.node.try_get_context(
                'whats_new_rss_feed')
        ).value_as_string
        whats_new_search_api = cdk.CfnParameter(
            self, 'WhatsNewSearchAPI',
            description='The search API url of new releases',
            type='String',
            default=self.node.try_get_context(
                'whats_new_search_api')
        ).value_as_string
        logging_level = cdk.CfnParameter(
            self, 'LoggingLevel',
            description='The verbosity of the logs in the Lambda function',
            type='String',
            allowed_values=['INFO', 'ERROR', 'DEBUG', 'WARN'],
            default=LOGGING_LEVEL,
        ).value_as_string

        """DynamoDB table which stores a history of messages sent"""
        ddb_table = dynamodb.Table(
            self, 'SlackMessageHistory',
            partition_key=dynamodb.Attribute(
                name='url', type=dynamodb.AttributeType.STRING),
            read_capacity=1,
            write_capacity=1
        )

        """Lambda function that queries the AWS What's New RSS feed
        and sends each release to Slack if it has not already been sent.
        """
        new_release_function = lambda_python.PythonFunction(
            self, 'AWSReleasesFunction',
            entry='lambda',
            handler='main',
            index='new_releases.py',
            runtime=lambda_.Runtime.PYTHON_3_8,
            description='Queries https://aws.amazon.com/new/ and sends new release info to a Slack channel via AWS Chatbot',
            environment=dict(
                WHATS_NEW_RSS_FEED=whats_new_rss_feed,
                WHATS_NEW_SEARCH_API=whats_new_search_api,
                WEBHOOK_SECRET_NAME=webhook_secret_name_param,
                DDB_TABLE=ddb_table.table_name,
                LOG_LEVEL=logging_level,
                POWERTOOLS_SERVICE_NAME='aws-to-slack'
            ),
            memory_size=512,
            tracing=lambda_.Tracing.ACTIVE,
            timeout=cdk.Duration.seconds(30),
            log_retention=logs.RetentionDays.SIX_MONTHS
        )
        """Imports the SecretsManager secret which contains the Slack webhook url(s)
        and adds read access to the Lambda execution role
        """
        slack_webhook_urls = secretsmanager.Secret.from_secret_name_v2(
            self, "SlackWebhookURLSecrets",
            secret_name=webhook_secret_name_param
        )
        slack_webhook_urls.grant_read(new_release_function.role)

        """Invoke this function every X minutes"""
        rule = events.Rule(
            self, 'AWSReleaseToSlackRule',
            description='Schedule to invoke Lambda function that sends new AWS releases to Slack',
            schedule=events.Schedule.rate(cdk.Duration.minutes(5))
        )
        rule.add_target(events_targets.LambdaFunction(new_release_function))

        """Grant the Lambda function Query and PutItem access to the DDB table"""
        ddb_table.grant(
            new_release_function,
            'dynamodb:Query',
            'dynamodb:PutItem'
        )
コード例 #30
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        aws_region = os.environ.get("CDK_DEPLOY_REGION", os.environ["CDK_DEFAULT_REGION"])
        account_id = os.environ.get("CDK_DEPLOY_ACCOUNT", os.environ["CDK_DEFAULT_ACCOUNT"])

        ssm_client = boto3.client('ssm', aws_region)
        # Prepare pipeline config details in SSM parameters
        if prefix == 'us':
            self.qs_reports_env_config = {"Permissions":
                                              [{"Group_Name": "critical",
                                                "Reports": ["Sales Results - Critical"],
                                                "ns_name": "default"},
                                               {"Group_Name": "highlyconfidential",
                                                "Reports": ["Field Operations Dashboard",
                                                            "Sales Results - Highly Confidential"
                                                            ],
                                                "ns_name": "default"},
                                               {"Group_Name": "bi-developer",
                                                "Reports": ["all"],
                                                "ns_name": "default"},
                                               {"Group_Name": "bi-admin",
                                                "Reports": ["all"],
                                                "ns_name": "default"},
                                               {"Group_Name": "power-reader",
                                                "Reports": ["read-all"],
                                                "ns_name": "default"},
                                               {"Group_Name": "3rd-party",
                                                "Reports": ["Marketing KPIs"],
                                                "ns_name": "3rd-party"},
                                               {"Group_Name": "3rd-party-reader",
                                                "Reports": ["Marketing KPIs"],
                                                "ns_name": "3rd-party"}
                                               ]
                                          }
        if prefix == 'eu':
            self.qs_reports_env_config = {"Permissions":
                                              [{"Group_Name": "eu-critical",
                                                "Reports": ["EUResults - Critical"]},
                                               {"Group_Name": "bi-developer",
                                                "Reports": ["all"]},
                                               {"Group_Name": "bi-admin",
                                                "Reports": ["all"]},
                                               {"Group_Name": "eu-highlyconfidential",
                                                "Reports": ["EUField Operations Dashboard",
                                                            "EUResults - Highly Confidential"]},
                                               {"Group_Name": "power-reader",
                                                "Reports": ["read-all"]}]}

        self.qs_reports_env_config_ssm = ssm.StringParameter(
            self, '/qs/config/access',
            string_value=json.dumps(self.qs_reports_env_config),
            parameter_name='/qs/config/access'
        )

        #group-user mapping information is stored in s3 bucket. A ssm parameter stores the bucket name.
        self.qs_user_group_config = {'bucket-name':f'qs-granular-access-demo-{account_id}'}

        bucket = s3.Bucket(self, f'qs-granular-access-demo-{account_id}',
                           bucket_name=f'qs-granular-access-demo-{account_id}',
                           versioned=True,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           auto_delete_objects=True)

        s3deploy.BucketDeployment(self, "DeployMembership",
                                  sources=[s3deploy.Source.asset('membership.zip')],
                                destination_bucket=bucket,
                                destination_key_prefix='membership',
                                                  prune=False)

        self.qs_user_group_config_ssm = ssm.StringParameter(
            self, '/qs/config/groups',
            string_value=json.dumps(self.qs_user_group_config),
            parameter_name='/qs/config/groups'
        )

        # group-role mapping information is stored in a ssm parameter.
        self.qs_role_config = {'default_bi-developer': 'AUTHOR',
                               'default_bi-admin': 'ADMIN',
                               'default_power-reader': 'AUTHOR',
                               'default_critical': 'READER',
                               'default_highlyconfidential': 'READER',
                               'default_marketing': 'AUTHOR',
                               '3rd-party_3rd-party': 'AUTHOR',
                               '3rd-party_3rd-party-reader': 'READER'
                               }

        self.qs_role_config_ssm = ssm.StringParameter(
            self, '/qs/config/roles',
            string_value=json.dumps(self.qs_role_config),
            parameter_name='/qs/config/roles'
        )

        # group-namespace mapping information is stored in a ssm parameter.
        self.qs_ns_config = {"ns":['default',
                             '3rd-party']}

        self.qs_ns_config_ssm = ssm.StringParameter(
            self, '/qs/config/ns',
            string_value=json.dumps(self.qs_ns_config),
            parameter_name='/qs/config/ns'
        )

        lambda_role = iam.Role(
            self,
            id='lambda-role',
            description='Role for the quicksight lambda',
            role_name=f'{aws_region}-role-quicksight-lambda',
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={
                'AllowS3Access': iam.PolicyDocument(
                    statements=[
                        iam.PolicyStatement(
                            effect=iam.Effect.ALLOW,
                            actions=["kms:GetParametersForImport",
                                     "kms:GetPublicKey",
                                     "kms:ListKeyPolicies",
                                     "kms:ListRetirableGrants",
                                     "kms:GetKeyPolicy",
                                     "kms:ListResourceTags",
                                     "kms:ListGrants",
                                     "kms:GetParametersForImport",
                                     "kms:GetKeyRotationStatus",
                                     "kms:DescribeKey",
                                     "kms:CreateGrant",
                                     "kms:ListAliases",
                                     "kms:ListKeys",
                                     "kms:DescribeCustomKeyStores",
                                     "ssm:GetParameters",
                                     "ssm:GetParameter",
                                     "ssm:GetParametersByPath"
                                     ],
                            resources=['*']
                        ),
                        iam.PolicyStatement(
                            effect=iam.Effect.ALLOW,
                            actions=["lambda:InvokeFunction",
                                     "logs:CreateLogStream",
                                     "logs:CreateLogGroup",
                                     "logs:PutLogEvents",
                                     "quicksight:*",
                                     "s3:HeadBucket",
                                     "s3:ListAllMyBuckets",
                                     "s3:PutObject",
                                     "s3:GetObject",
                                     "s3:ListBucket",
                                     "s3:GetObjectVersionForReplication",
                                     "s3:GetBucketPolicy",
                                     "s3:GetObjectVersion",
                                     "cloudwatch:PutMetricData",
                                     "sts:GetCallerIdentity"],
                            resources=['*']
                        )
                    ]
                )
            }
        )

        user_init = _lambda.Function(self, 'user_init',
                                           handler='user_init.lambda_handler',
                                           runtime=_lambda.Runtime.PYTHON_3_7,
                                           code=_lambda.Code.from_asset(os.path.join(current_dir,
                                                                                     '../lambda_functions/user_init/')),
                                           function_name='user_init',
                                           role=lambda_role,
                                           timeout=core.Duration.minutes(15),
                                           memory_size=512
                                           )

        check_team_members = _lambda.Function(self, 'check_team_members',
                                                    handler='check_team_members.lambda_handler',
                                                    runtime=_lambda.Runtime.PYTHON_3_7,
                                                    code=_lambda.Code.from_asset(os.path.join(current_dir,
                                                                                              '../lambda_functions/check_team_members/')),
                                                    function_name='check_team_members',
                                                    role=lambda_role,
                                                    timeout=core.Duration.minutes(15),
                                                    memory_size=512,
                                                    environment={'aws_region': f'{core.Aws.REGION}'}
                                                    )

        downgrade_user = _lambda.Function(self, 'downgrade_user',
                                                handler='downgrade_user.lambda_handler',
                                                runtime=_lambda.Runtime.PYTHON_3_8,
                                                code=_lambda.Code.from_asset(os.path.join(current_dir,
                                                                                          '../lambda_functions/downgrade_user/')),
                                                function_name='downgrade_user',
                                                role=lambda_role,
                                                timeout=core.Duration.minutes(15),
                                                memory_size=2048,
                                                environment={'aws_region': f'{core.Aws.REGION}'}
                                                )

        granular_user_govenance = _lambda.Function(self, 'granular_user_govenance',
                                                 handler='granular_user_govenance.lambda_handler',
                                                 runtime=_lambda.Runtime.PYTHON_3_7,
                                                 code=_lambda.Code.from_asset(os.path.join(current_dir,
                                                                                           '../lambda_functions/granular_user_govenance')),
                                                 function_name='granular_user_govenance',
                                                 role=lambda_role,
                                                 timeout=core.Duration.minutes(15),
                                                 memory_size=2048,
                                                 environment={'aws_region': f'{core.Aws.REGION}'}
                                                )

        granular_access_assets_govenance = _lambda.Function(self, 'granular_access_assets_govenance',
                                                   handler='granular_access_assets_govenance.lambda_handler',
                                                   runtime=_lambda.Runtime.PYTHON_3_7,
                                                   code=_lambda.Code.from_asset(os.path.join(current_dir,
                                                                                             '../lambda_functions/granular_access_assets_govenance')),
                                                   function_name='granular_access_assets_govenance',
                                                   role=lambda_role,
                                                   timeout=core.Duration.minutes(15),
                                                   memory_size=2048,
                                                   environment={'aws_region': f'{core.Aws.REGION}'}
                                                   )

        quicksight_event_rule = events.Rule(self, 'QuickSightCWEventRule',
                                             description='CloudWatch rule to detect new QuickSight user creation',
                                             rule_name='qs-gc-user-creation',
                                             targets=[targets.LambdaFunction(user_init)],
                                             event_pattern=events.EventPattern(source=['aws.quicksight'],
                                                                               detail_type=[
                                                                                   'AWS Service Event via CloudTrail'],
                                                                               detail={
                                                                                   "eventSource": [
                                                                                       "quicksight.amazonaws.com"],
                                                                                   "eventName": ["CreateUser"]
                                                                               }
                                                                               )
                                             )

        quicksight_schedule_rule = events.Rule(self, 'quicksight_schedule_rule',
                                               description='CloudWatch rule to run QS objects/groups assignment every hour',
                                               rule_name='qs-gc-every-hour',
                                               schedule=events.Schedule.cron(minute="0"),
                                               targets=[targets.LambdaFunction(granular_user_govenance)]
                                               )

        quicksight_assume_condition_object = {"StringEquals": {
            "SAML:aud": "https://signin.aws.amazon.com/saml"}}

        quicksight_federated_prin_with_conditionb_obj = iam.FederatedPrincipal(
            f'arn:aws:iam::{core.Aws.ACCOUNT_ID}:saml-provider/saml', quicksight_assume_condition_object,
            'sts:AssumeRoleWithSAML')

        quicksight_resource_scope = '${aws:userid}'
        quicksight_reader_saml_inline_policies = {
            'AllowQuicksightAccessSAML': iam.PolicyDocument(
                statements=[
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=['quicksight:CreateReader'],
                        resources=[
                            f'arn:aws:quicksight::{core.Aws.ACCOUNT_ID}:user/{quicksight_resource_scope}']
                    )
                ]
            )
        }

        quicksight_users = iam.Role(
            self,
            id=f"quicksight-fed-{prefix}-users",  # this is the default group with no access
            description='Role for the quicksight reader SAML',
            role_name=f"quicksight-fed-{prefix}-users",
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=quicksight_federated_prin_with_conditionb_obj,
            inline_policies=quicksight_reader_saml_inline_policies
        )