예제 #1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        email_subscription_parameter = core.CfnParameter(
            self,
            "EmailSubscriptionParameter",
            description="Email Address for Notification Subscription")
        email_subscription = email_subscription_parameter.value_as_string

        ar1 = accessanalyzer.CfnAnalyzer.ArchiveRuleProperty(
            rule_name="test",
            filter=[
                accessanalyzer.CfnAnalyzer.FilterProperty(
                    property="principal.AWS", eq=["123456789123"])
            ])
        analyzer = accessanalyzer.CfnAnalyzer(
            self,
            id="accessanalyzer",
            type="ACCOUNT",
            tags=[core.CfnTag(key="AccessAnalyzerType", value="ACCOUNT")],
            archive_rules=[ar1])

        runtime = aws_lambda.Runtime.PYTHON_3_8

        boto3_lambda_layer = aws_lambda.LayerVersion(
            self,
            "Boto3LambdaLayer",
            code=aws_lambda.AssetCode("./layers/boto3"),
            compatible_runtimes=[runtime],
            description="Boto3 Lambda Layer")

        context_enrichment = aws_lambda.Function(
            self,
            "context_enrichment",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode("./functions/context-enrichment"),
            layers=[boto3_lambda_layer])
        handler_statement = iam.PolicyStatement(actions=[
            "iam:ListRoleTags", "s3:GetBucketTagging", "lambda:ListTags",
            "sqs:ListQueueTags", "kms:ListAliases", "kms:ListResourceTags"
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        context_enrichment.add_to_role_policy(handler_statement)

        cmk_key = kms.Key(
            self,
            "SNSEncryptionAtRestKey",
            description="SNS Encryption at rest key",
            alias="sns-encryption-at-rest",
            enable_key_rotation=True,
        )

        email_topic = sns.Topic(
            self,
            "AccessAnalyzerNotificationTopic",
            display_name="Access Analyzer Finding Notification Topic",
            master_key=cmk_key)
        email_topic.add_subscription(
            subscriptions.EmailSubscription(email_subscription))

        notification = aws_lambda.Function(
            self,
            "notification",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode("./functions/notification"),
            layers=[boto3_lambda_layer],
            environment={"SNS_TOPIC_ARN": email_topic.topic_arn})
        notification_statement = iam.PolicyStatement(actions=[
            "sns:Publish",
        ],
                                                     effect=iam.Effect.ALLOW,
                                                     resources=["*"])
        notification.add_to_role_policy(notification_statement)
        cmk_key.grant_encrypt_decrypt(notification)

        archive_access_analyzer_finding = aws_lambda.Function(
            self,
            "archive-access-analyzer-finding",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode(
                "./functions/archive-access-analyzer-finding"),
            layers=[boto3_lambda_layer])
        archive_statement = iam.PolicyStatement(actions=[
            "access-analyzer:UpdateFindings",
        ],
                                                effect=iam.Effect.ALLOW,
                                                resources=["*"])
        archive_access_analyzer_finding.add_to_role_policy(archive_statement)

        evaluate_access_analyzer_finding = aws_lambda.Function(
            self,
            "evaluate-access-analyzer-finding",
            runtime=runtime,
            handler="app.handler",
            code=aws_lambda.AssetCode(
                "./functions/evaluate-access-analyzer-finding"),
            layers=[boto3_lambda_layer])

        #https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html
        access_analyzer_handler_task = sfn.Task(
            self,
            "Context Enrichment",
            task=sfn_tasks.InvokeFunction(context_enrichment),
            result_path="$.guid",
        )

        notification_task = sfn.Task(
            self,
            "Send Notification",
            task=sfn_tasks.InvokeFunction(notification),
            result_path="$.guid",
        )

        archive_task = sfn.Task(
            self,
            "Archive Finding",
            task=sfn_tasks.InvokeFunction(archive_access_analyzer_finding),
            result_path="$.guid",
        )

        evaluate_task = sfn.Task(
            self,
            "Evaluate Risk Level",
            task=sfn_tasks.InvokeFunction(evaluate_access_analyzer_finding),
            result_path="$.guid",
        )

        definition=access_analyzer_handler_task. \
          next(evaluate_task). \
          next(sfn.Choice(self, "Archive?"). \
            when(sfn.Condition.string_equals("$.guid.status", "ARCHIVE"), archive_task). \
            when(sfn.Condition.string_equals("$.guid.status", "NOTIFY"), notification_task) \
          )

        state_machine = sfn.StateMachine(
            self,
            "Access-Analyzer-Automatic-Finding-Archive-State-Machine",
            definition=definition,
            timeout=core.Duration.minutes(5),
        )

        #https://docs.aws.amazon.com/step-functions/latest/dg/tutorial-cloudwatch-events-s3.html
        access_analyzer_finding_rule = aws_events.Rule(
            self,
            "AccessAnalzyerFindingActiveEventRule",
            description="Access Analyzer Finding Event Active",
            enabled=True,
            event_pattern=aws_events.EventPattern(
                source=["aws.access-analyzer"],
                detail_type=["Access Analyzer Finding"],
                detail={"status": ["ACTIVE"]}),
            targets=[
                aws_events_targets.SfnStateMachine(state_machine),
                aws_events_targets.LambdaFunction(context_enrichment)
            ])
예제 #2
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # create s3 bucket for artifacts
        artifacts_bucket = aws_s3.Bucket(
            self,
            "artifacts-bucket",
            block_public_access=aws_s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            encryption=aws_s3.BucketEncryption.S3_MANAGED)

        # create codebuild project that executes a long-running script
        build_project = aws_codebuild.Project(
            self,
            "long-running-script-build-project",
            environment_variables={
                "S3_ARTIFACTS_BUCKET": {
                    "value": artifacts_bucket.bucket_name
                },
                "S3_ARTIFACTS_OBJECT": {
                    "value": "script.py"
                }
            },
            environment=aws_codebuild.BuildEnvironment(
                build_image=aws_codebuild.LinuxBuildImage.STANDARD_3_0, ),
            timeout=cdk.Duration.hours(1),
            build_spec=aws_codebuild.BuildSpec.from_object({
                "version": "0.2",
                "phases": {
                    "install": {
                        "runtime-versions": {
                            "python": 3.8
                        }
                    },
                    "build": {
                        "commands": [
                            "aws s3 cp s3://$S3_ARTIFACTS_BUCKET/$S3_ARTIFACTS_OBJECT $S3_ARTIFACTS_OBJECT",
                            "python $S3_ARTIFACTS_OBJECT"
                        ]
                    }
                }
            }))
        # grant read access of the artifacts bucket to the codebuild role
        artifacts_bucket.grant_read(build_project.role)

        # create eventbridge rule to trigger codebuild project
        long_running_script_rule = aws_events.Rule(
            self,
            "long-running-script-build-trigger",
            schedule=aws_events.Schedule.rate(cdk.Duration.hours(1)))
        long_running_script_rule.add_target(
            targets.CodeBuildProject(build_project))

        # create sns topic as part of downstream services after codebuild project completes
        sns_topic = aws_sns.Topic(self, "script-completes-topic")

        # create eventbridge rule to publish to sns topic once codebuild project finishes (either succeeded, failed or stopped)
        codebuild_completes_rule = aws_events.Rule(
            self,
            "codebuild-scripts-complete-rule",
            event_pattern=aws_events.EventPattern(
                source=["aws.codebuild"],
                detail_type=["CodeBuild Build State Change"],
                detail={
                    "build-status": ["SUCCEEDED", "FAILED", "STOPPED"],
                    "project-name": [build_project.project_name]
                }))
        codebuild_completes_rule.add_target(targets.SnsTopic(sns_topic))

        cdk.CfnOutput(self,
                      "artifacts-bucket-output",
                      value=artifacts_bucket.bucket_name)
        cdk.CfnOutput(self,
                      "script-complete-topic-output",
                      value=sns_topic.topic_arn)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        aws_region = os.environ.get("CDK_DEPLOY_REGION", os.environ["CDK_DEFAULT_REGION"])
        account_id = os.environ.get("CDK_DEPLOY_ACCOUNT", os.environ["CDK_DEFAULT_ACCOUNT"])

        ssm_client = boto3.client('ssm', aws_region)
        # Prepare pipeline config details in SSM parameters
        if prefix == 'us':
            self.qs_reports_env_config = {"Permissions":
                                              [{"Group_Name": "critical",
                                                "Reports": ["Sales Results - Critical"],
                                                "ns_name": "default"},
                                               {"Group_Name": "highlyconfidential",
                                                "Reports": ["Field Operations Dashboard",
                                                            "Sales Results - Highly Confidential"
                                                            ],
                                                "ns_name": "default"},
                                               {"Group_Name": "bi-developer",
                                                "Reports": ["all"],
                                                "ns_name": "default"},
                                               {"Group_Name": "bi-admin",
                                                "Reports": ["all"],
                                                "ns_name": "default"},
                                               {"Group_Name": "power-reader",
                                                "Reports": ["read-all"],
                                                "ns_name": "default"},
                                               {"Group_Name": "3rd-party",
                                                "Reports": ["Marketing KPIs"],
                                                "ns_name": "3rd-party"},
                                               {"Group_Name": "3rd-party-reader",
                                                "Reports": ["Marketing KPIs"],
                                                "ns_name": "3rd-party"}
                                               ]
                                          }
        if prefix == 'eu':
            self.qs_reports_env_config = {"Permissions":
                                              [{"Group_Name": "eu-critical",
                                                "Reports": ["EUResults - Critical"]},
                                               {"Group_Name": "bi-developer",
                                                "Reports": ["all"]},
                                               {"Group_Name": "bi-admin",
                                                "Reports": ["all"]},
                                               {"Group_Name": "eu-highlyconfidential",
                                                "Reports": ["EUField Operations Dashboard",
                                                            "EUResults - Highly Confidential"]},
                                               {"Group_Name": "power-reader",
                                                "Reports": ["read-all"]}]}

        self.qs_reports_env_config_ssm = ssm.StringParameter(
            self, '/qs/config/access',
            string_value=json.dumps(self.qs_reports_env_config),
            parameter_name='/qs/config/access'
        )

        #group-user mapping information is stored in s3 bucket. A ssm parameter stores the bucket name.
        self.qs_user_group_config = {'bucket-name':f'qs-granular-access-demo-{account_id}'}

        bucket = s3.Bucket(self, f'qs-granular-access-demo-{account_id}',
                           bucket_name=f'qs-granular-access-demo-{account_id}',
                           versioned=True,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           auto_delete_objects=True)

        s3deploy.BucketDeployment(self, "DeployMembership",
                                  sources=[s3deploy.Source.asset('membership.zip')],
                                destination_bucket=bucket,
                                destination_key_prefix='membership',
                                                  prune=False)

        self.qs_user_group_config_ssm = ssm.StringParameter(
            self, '/qs/config/groups',
            string_value=json.dumps(self.qs_user_group_config),
            parameter_name='/qs/config/groups'
        )

        # group-role mapping information is stored in a ssm parameter.
        self.qs_role_config = {'default_bi-developer': 'AUTHOR',
                               'default_bi-admin': 'ADMIN',
                               'default_power-reader': 'AUTHOR',
                               'default_critical': 'READER',
                               'default_highlyconfidential': 'READER',
                               'default_marketing': 'AUTHOR',
                               '3rd-party_3rd-party': 'AUTHOR',
                               '3rd-party_3rd-party-reader': 'READER'
                               }

        self.qs_role_config_ssm = ssm.StringParameter(
            self, '/qs/config/roles',
            string_value=json.dumps(self.qs_role_config),
            parameter_name='/qs/config/roles'
        )

        # group-namespace mapping information is stored in a ssm parameter.
        self.qs_ns_config = {"ns":['default',
                             '3rd-party']}

        self.qs_ns_config_ssm = ssm.StringParameter(
            self, '/qs/config/ns',
            string_value=json.dumps(self.qs_ns_config),
            parameter_name='/qs/config/ns'
        )

        lambda_role = iam.Role(
            self,
            id='lambda-role',
            description='Role for the quicksight lambda',
            role_name=f'{aws_region}-role-quicksight-lambda',
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            inline_policies={
                'AllowS3Access': iam.PolicyDocument(
                    statements=[
                        iam.PolicyStatement(
                            effect=iam.Effect.ALLOW,
                            actions=["kms:GetParametersForImport",
                                     "kms:GetPublicKey",
                                     "kms:ListKeyPolicies",
                                     "kms:ListRetirableGrants",
                                     "kms:GetKeyPolicy",
                                     "kms:ListResourceTags",
                                     "kms:ListGrants",
                                     "kms:GetParametersForImport",
                                     "kms:GetKeyRotationStatus",
                                     "kms:DescribeKey",
                                     "kms:CreateGrant",
                                     "kms:ListAliases",
                                     "kms:ListKeys",
                                     "kms:DescribeCustomKeyStores",
                                     "ssm:GetParameters",
                                     "ssm:GetParameter",
                                     "ssm:GetParametersByPath"
                                     ],
                            resources=['*']
                        ),
                        iam.PolicyStatement(
                            effect=iam.Effect.ALLOW,
                            actions=["lambda:InvokeFunction",
                                     "logs:CreateLogStream",
                                     "logs:CreateLogGroup",
                                     "logs:PutLogEvents",
                                     "quicksight:*",
                                     "s3:HeadBucket",
                                     "s3:ListAllMyBuckets",
                                     "s3:PutObject",
                                     "s3:GetObject",
                                     "s3:ListBucket",
                                     "s3:GetObjectVersionForReplication",
                                     "s3:GetBucketPolicy",
                                     "s3:GetObjectVersion",
                                     "cloudwatch:PutMetricData",
                                     "sts:GetCallerIdentity"],
                            resources=['*']
                        )
                    ]
                )
            }
        )

        user_init = _lambda.Function(self, 'user_init',
                                           handler='user_init.lambda_handler',
                                           runtime=_lambda.Runtime.PYTHON_3_7,
                                           code=_lambda.Code.from_asset(os.path.join(current_dir,
                                                                                     '../lambda_functions/user_init/')),
                                           function_name='user_init',
                                           role=lambda_role,
                                           timeout=core.Duration.minutes(15),
                                           memory_size=512
                                           )

        check_team_members = _lambda.Function(self, 'check_team_members',
                                                    handler='check_team_members.lambda_handler',
                                                    runtime=_lambda.Runtime.PYTHON_3_7,
                                                    code=_lambda.Code.from_asset(os.path.join(current_dir,
                                                                                              '../lambda_functions/check_team_members/')),
                                                    function_name='check_team_members',
                                                    role=lambda_role,
                                                    timeout=core.Duration.minutes(15),
                                                    memory_size=512,
                                                    environment={'aws_region': f'{core.Aws.REGION}'}
                                                    )

        downgrade_user = _lambda.Function(self, 'downgrade_user',
                                                handler='downgrade_user.lambda_handler',
                                                runtime=_lambda.Runtime.PYTHON_3_8,
                                                code=_lambda.Code.from_asset(os.path.join(current_dir,
                                                                                          '../lambda_functions/downgrade_user/')),
                                                function_name='downgrade_user',
                                                role=lambda_role,
                                                timeout=core.Duration.minutes(15),
                                                memory_size=2048,
                                                environment={'aws_region': f'{core.Aws.REGION}'}
                                                )

        granular_user_govenance = _lambda.Function(self, 'granular_user_govenance',
                                                 handler='granular_user_govenance.lambda_handler',
                                                 runtime=_lambda.Runtime.PYTHON_3_7,
                                                 code=_lambda.Code.from_asset(os.path.join(current_dir,
                                                                                           '../lambda_functions/granular_user_govenance')),
                                                 function_name='granular_user_govenance',
                                                 role=lambda_role,
                                                 timeout=core.Duration.minutes(15),
                                                 memory_size=2048,
                                                 environment={'aws_region': f'{core.Aws.REGION}'}
                                                )

        granular_access_assets_govenance = _lambda.Function(self, 'granular_access_assets_govenance',
                                                   handler='granular_access_assets_govenance.lambda_handler',
                                                   runtime=_lambda.Runtime.PYTHON_3_7,
                                                   code=_lambda.Code.from_asset(os.path.join(current_dir,
                                                                                             '../lambda_functions/granular_access_assets_govenance')),
                                                   function_name='granular_access_assets_govenance',
                                                   role=lambda_role,
                                                   timeout=core.Duration.minutes(15),
                                                   memory_size=2048,
                                                   environment={'aws_region': f'{core.Aws.REGION}'}
                                                   )

        quicksight_event_rule = events.Rule(self, 'QuickSightCWEventRule',
                                             description='CloudWatch rule to detect new QuickSight user creation',
                                             rule_name='qs-gc-user-creation',
                                             targets=[targets.LambdaFunction(user_init)],
                                             event_pattern=events.EventPattern(source=['aws.quicksight'],
                                                                               detail_type=[
                                                                                   'AWS Service Event via CloudTrail'],
                                                                               detail={
                                                                                   "eventSource": [
                                                                                       "quicksight.amazonaws.com"],
                                                                                   "eventName": ["CreateUser"]
                                                                               }
                                                                               )
                                             )

        quicksight_schedule_rule = events.Rule(self, 'quicksight_schedule_rule',
                                               description='CloudWatch rule to run QS objects/groups assignment every hour',
                                               rule_name='qs-gc-every-hour',
                                               schedule=events.Schedule.cron(minute="0"),
                                               targets=[targets.LambdaFunction(granular_user_govenance)]
                                               )

        quicksight_assume_condition_object = {"StringEquals": {
            "SAML:aud": "https://signin.aws.amazon.com/saml"}}

        quicksight_federated_prin_with_conditionb_obj = iam.FederatedPrincipal(
            f'arn:aws:iam::{core.Aws.ACCOUNT_ID}:saml-provider/saml', quicksight_assume_condition_object,
            'sts:AssumeRoleWithSAML')

        quicksight_resource_scope = '${aws:userid}'
        quicksight_reader_saml_inline_policies = {
            'AllowQuicksightAccessSAML': iam.PolicyDocument(
                statements=[
                    iam.PolicyStatement(
                        effect=iam.Effect.ALLOW,
                        actions=['quicksight:CreateReader'],
                        resources=[
                            f'arn:aws:quicksight::{core.Aws.ACCOUNT_ID}:user/{quicksight_resource_scope}']
                    )
                ]
            )
        }

        quicksight_users = iam.Role(
            self,
            id=f"quicksight-fed-{prefix}-users",  # this is the default group with no access
            description='Role for the quicksight reader SAML',
            role_name=f"quicksight-fed-{prefix}-users",
            max_session_duration=core.Duration.seconds(3600),
            assumed_by=quicksight_federated_prin_with_conditionb_obj,
            inline_policies=quicksight_reader_saml_inline_policies
        )
    def __init__(self, scope: core.Construct, construct_id: str,
                 stack_log_level: str, orders_bus, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Add your stack resources below)

        # Read Lambda Code
        try:
            with open(
                    "stacks/back_end/serverless_eventbridge_consumer_stack/lambda_src/eventbridge_data_consumer.py",
                    encoding="utf-8",
                    mode="r") as f:
                msg_consumer_fn_code = f.read()
        except OSError:
            print("Unable to read Lambda Function Code")
            raise
        msg_consumer_fn = _lambda.Function(
            self,
            "msgConsumerFn",
            function_name=f"events_consumer_fn",
            description="Process messages in EventBridge queue",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(msg_consumer_fn_code),
            handler="index.lambda_handler",
            timeout=core.Duration.seconds(3),
            reserved_concurrent_executions=1,
            environment={
                "LOG_LEVEL": f"{stack_log_level}",
                "APP_ENV": "Production"
            })

        msg_consumer_fn_version = msg_consumer_fn.latest_version
        msg_consumer_fn_version_alias = _lambda.Alias(
            self,
            "msgConsumerFnAlias",
            alias_name="MystiqueAutomation",
            version=msg_consumer_fn_version)

        # Create Custom Loggroup for Producer
        msg_consumer_fn_lg = _logs.LogGroup(
            self,
            "msgConsumerFnLogGroup",
            log_group_name=f"/aws/lambda/{msg_consumer_fn.function_name}",
            removal_policy=core.RemovalPolicy.DESTROY,
            retention=_logs.RetentionDays.ONE_DAY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        msg_consumer_fn.add_permission("restrictLambdaInvocationToOwnAccount",
                                       principal=_iam.AccountRootPrincipal(),
                                       action="lambda:InvokeFunction",
                                       source_account=core.Aws.ACCOUNT_ID,
                                       source_arn=orders_bus.event_bus_arn)

        # Event Pattern
        self.orders_pattern = _evnts.EventPattern(detail_type=["sales-events"])

        # EventBridge Routing Rule
        self.orders_routing = _evnts.Rule(
            self,
            f"ordersEventRoutingRule01",
            description="A simple events routing rule",
            enabled=True,
            event_bus=orders_bus,
            event_pattern=self.orders_pattern,
            rule_name="orders_routing_to_consumer",
            targets=[_evnts_tgt.LambdaFunction(handler=msg_consumer_fn)])

        self.orders_routing.apply_removal_policy(core.RemovalPolicy.DESTROY)

        # Restrict Produce Lambda to be invoked only from the stack owner account
        data_producer_fn.add_permission(
            "restrictLambdaInvocationToFhInOwnAccount",
            principal=_iam.AccountRootPrincipal(),
            action="lambda:InvokeFunction",
            source_account=core.Aws.ACCOUNT_ID)

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_2 = core.CfnOutput(
            self,
            "msgConsumer",
            value=
            f"https://console.aws.amazon.com/lambda/home?region={core.Aws.REGION}#/functions/{msg_consumer_fn.function_name}",
            description="Process events received from eventbridge event bus")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        eventTargets = []

        policyStatement = _iam.PolicyStatement(
            resources = ['*'],
            actions = [
                "cloudwatch:PutMetricAlarm",
                "cloudwatch:ListMetrics",
                "cloudwatch:DeleteAlarms",
                "ec2:CreateTags",
                "ec2:Describe*",
                "ec2:Attach*",
                "elasticloadbalancing:Describe*",
                "elasticloadbalancing:Create*",
                "elasticloadbalancing:AddTags"
            ],
            effect = _iam.Effect.ALLOW
        )

        glom_layer = _lambda.LayerVersion.from_layer_version_attributes(
            self,
            "glom_api_layer",
            layer_version_arn="arn:aws:lambda:us-east-1:<AWS ACCOUNT>:layer:python-glom-layer:1",
            compatible_runtimes=[
                _lambda.Runtime.PYTHON_3_6,
                _lambda.Runtime.PYTHON_3_7
            ]
        )

        eventHandler = _lambda.Function(
            self,
            'resourceTagger',
            runtime = _lambda.Runtime.PYTHON_3_7,
            code = _lambda.Code.asset('lambda'),
            handler = 'auto_tag.handler',
            layers=[glom_layer]
        )

        eventHandler.add_to_role_policy(policyStatement)

        eventTargets.append(_targets.LambdaFunction(handler = eventHandler))

        pattern = _events.EventPattern(
            source = ['aws.ec2', 'aws.elasticloadbalancing'],
            detail_type = [ "AWS API Call via CloudTrail"],
            detail = {
                "eventSource": [
                  "ec2.amazonaws.com",
                  "elasticloadbalancing.amazonaws.com"
                ],
                "eventName": [
                    "RunInstances",
                    "CreateSnapshot",
                    "CreateVolume",
                    "CreateImage",
                    "CreateLoadBalancer",
                    "AttachNetworkInterface"
                ]
            }
        )

        _events.Rule(
            scope = self,
            id = 'AutoTagsRule',
            description = 'Monitor EC2 and ELB events',
            rule_name = 'AutoTagsRule',
            event_pattern = pattern,
            targets = eventTargets
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # DynamoDB Table
        # This will store our error records
        # TTL Docs - https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/time-to-live-ttl-how-to.html
        table = dynamo_db.Table(self,
                                "CircuitBreaker",
                                partition_key=dynamo_db.Attribute(
                                    name="RequestID",
                                    type=dynamo_db.AttributeType.STRING),
                                sort_key=dynamo_db.Attribute(
                                    name="ExpirationTime",
                                    type=dynamo_db.AttributeType.NUMBER),
                                time_to_live_attribute='ExpirationTime')

        # Add an index that lets us query on site url and Expiration Time
        table.add_global_secondary_index(
            index_name='UrlIndex',
            partition_key=dynamo_db.Attribute(
                name="SiteUrl", type=dynamo_db.AttributeType.STRING),
            sort_key=dynamo_db.Attribute(name="ExpirationTime",
                                         type=dynamo_db.AttributeType.NUMBER))

        # defines an Integration Lambda to call our failing web service
        integration_lambda = _lambda.Function(
            self,
            "WebserviceIntegrationLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="lambda.handler",
            code=_lambda.Code.from_asset("lambda_fns/webservice"),
            timeout=core.Duration.seconds(20),
            environment=dict(TABLE_NAME=table.table_name))

        # grant the lambda role read/write permissions to our table
        table.grant_read_data(integration_lambda)

        # We need to give your lambda permission to put events on our EventBridge
        event_policy = iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                           resources=['*'],
                                           actions=['events:PutEvents'])
        integration_lambda.add_to_role_policy(event_policy)

        # defines a lambda to insert errors into dynamoDB
        error_lambda = _lambda.Function(
            self,
            "ErrorLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="lambda.handler",
            code=_lambda.Code.from_asset("lambda_fns/error"),
            timeout=core.Duration.seconds(3),
            environment=dict(TABLE_NAME=table.table_name))

        table.grant_write_data(error_lambda)

        # Create EventBridge rule to route failures
        error_rule = events.Rule(
            self,
            'webserviceErrorRule',
            description='Failed Webservice Call',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.eventbridge.circuitbreaker'],
                detail_type=['httpcall'],
                detail={"status": ["fail"]}))

        error_rule.add_target(targets.LambdaFunction(handler=error_lambda))

        # defines an API Gateway REST API resource backed by our "integration_lambda" function
        api_gw.LambdaRestApi(self,
                             'CircuitBreakerGateway',
                             handler=integration_lambda)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        table = ddb.Table(self,
                          'StripeSampleCustomers',
                          partition_key={
                              'name': 'CustomerID',
                              'type': ddb.AttributeType.STRING
                          })

        bus = events.EventBus(self,
                              'stripeAppEventBus',
                              event_bus_name='stripeAppEventBus')

        lambda_role_for_go = iam.Role(
            self,
            "Role",
            role_name='stripeAppRole',
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonEventBridgeFullAccess"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "SecretsManagerReadWrite")
            ])

        customer_created_handler = _lambda.Function(
            self,
            "createStripeCustomerHandler",
            runtime=_lambda.Runtime.GO_1_X,
            code=_lambda.Code.asset('lambda/stripe-create-customer'),
            handler='createCustomerHandler',
            timeout=core.Duration.seconds(8),
            role=lambda_role_for_go,
            environment={
                'CUSTOMER_TABLE_NAME': table.table_name,
            })
        table.grant_read_write_data(customer_created_handler)

        go_lambda = _lambda.Function(
            self,
            "stripeWebhookEventHandler",
            runtime=_lambda.Runtime.GO_1_X,
            code=_lambda.Code.asset('lambda/stripe-webhook-handler'),
            handler='stripeWebhookHandler',
            timeout=core.Duration.seconds(8),
            role=lambda_role_for_go,
        )

        _apigw.LambdaRestApi(self, "stripeWebhookAPI", handler=go_lambda)

        customer_created_handler.add_permission(
            "createStripeCustomerHandlerPermission",
            principal=iam.ServicePrincipal("events.amazonaws.com"),
            action='lambda:InvokeFunction',
            source_arn=go_lambda.function_arn)

        go_lambda.add_permission(
            "stripeWebhookHandlerPermission",
            principal=iam.ServicePrincipal("lambda.amazonaws.com"),
            action='lambda:InvokeFunction',
            source_arn=customer_created_handler.function_arn)

        event = events.Rule(
            self,
            'stripeWebhookEventRule',
            rule_name='stripeWebhookEventRule',
            enabled=True,
            event_bus=bus,
            description=
            'all success events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                detail={"stripeEvent": ["customer.subscription.created"]},
                source=["stripeWebHookHandler.lambda"]))

        event.add_target(targets.LambdaFunction(customer_created_handler))
예제 #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        pvt_bkt = _s3.Bucket(self, "s3bucket")
        core.Tag.add(pvt_bkt, key="isMonitoredBucket", value="True")

        # Lets create a cloudtrail to track s3 data events
        s3_data_event_trail = _cloudtrail.Trail(
            self,
            "s3DataEventTrailId",
            is_multi_region_trail=False,
            include_global_service_events=False,
            enable_file_validation=True)

        # Lets capture S3 Data Events only for our bucket- TO REDUCE COST
        s3_data_event_trail.add_s3_event_selector(
            prefixes=[f"{pvt_bkt.bucket_arn}/"],
            include_management_events=True,
            read_write_type=_cloudtrail.ReadWriteType.ALL)

        # Defines an AWS Lambda resource
        """
        with open("lambda_src/make_object_private.py", encoding="utf8") as fp:
            make_object_private_fn_handler_code = fp.read()

        remediate_object_acl_fn = _lambda.Function(
            self,
            id='remediateObjAclFn',
            function_name="remediate_object_acl_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(make_object_private_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(10)
            )

        # Lets add the necessary permission for the lambda function
        remediate_object_acl_fn_perms=_iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "arn:aws:s3:::*",
                ],
            actions=[
                "s3:GetObjectAcl",
                "s3:PutObjectAcl"
            ]
            )
        remediate_object_acl_fn_perms.sid="PutBucketPolicy"
        remediate_object_acl_fn.add_to_role_policy( remediate_object_acl_fn_perms )
        """

        with open("lambda_src/is_object_private.py", encoding="utf8") as fp:
            is_object_private_fn_handler_code = fp.read()

        is_object_private_fn = _lambda.Function(
            self,
            id='isObjPrivateFn',
            function_name="is_object_private_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(is_object_private_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(3))

        # Lets add the necessary permission for the lambda function
        is_object_private_fn_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "arn:aws:s3:::*",
            ],
            actions=["s3:GetObjectAcl"])
        is_object_private_fn.sid = "CheckObjectAcl"
        is_object_private_fn.add_to_role_policy(is_object_private_fn_perms)

        with open("lambda_src/make_object_private.py", encoding="utf8") as fp:
            make_object_private_fn_handler_code = fp.read()

        remediate_object_acl_fn = _lambda.Function(
            self,
            id='remediateObjAclFn',
            function_name="remediate_object_acl_fn",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode(make_object_private_fn_handler_code),
            handler='index.lambda_handler',
            timeout=core.Duration.seconds(10))

        # Lets add the necessary permission for the lambda function
        remediate_object_acl_fn_perms = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            resources=[
                "arn:aws:s3:::*",
            ],
            actions=["s3:PutObjectAcl"])
        remediate_object_acl_fn_perms.sid = "PutObjectAcl"
        remediate_object_acl_fn.add_to_role_policy(
            remediate_object_acl_fn_perms)

        info_sec_ops_topic = _sns.Topic(self,
                                        "infoSecOpsTopicId",
                                        display_name="InfoSecTopic",
                                        topic_name="InfoSecOpsTopic")

        # Subscribe InfoSecOps Email to topic
        info_sec_ops_topic.add_subscription(
            _subs.EmailSubscription(global_args.INFO_SEC_OPS_EMAIL))

        # Grant Lambda permission to publish to topic
        # info_sec_ops_topic.grant_publish(lambda_notifier)

        # State Machine for notifying failed ACLs
        # Ref: https://docs.aws.amazon.com/cdk/api/latest/docs/aws-stepfunctions-readme.html
        ###############################################################################
        ################# STEP FUNCTIONS EXPERIMENTAL CODE - UNSTABLE #################
        ###############################################################################

        is_object_private_task = _sfn.Task(
            self,
            "isObjectPrivate?",
            task=_tasks.InvokeFunction(is_object_private_fn),
            result_path="$",
            output_path="$")

        remediate_object_acl_task = _sfn.Task(
            self,
            "RemediateObjectAcl",
            task=_tasks.InvokeFunction(remediate_object_acl_fn),
            result_path="$",
            output_path="$")

        notify_secops_task = _sfn.Task(
            self,
            "Notify InfoSecOps",
            task=_tasks.PublishToTopic(
                info_sec_ops_topic,
                integration_pattern=_sfn.ServiceIntegrationPattern.
                FIRE_AND_FORGET,
                message=_sfn.TaskInput.from_data_at("$.sns_message"),
                subject="Object Acl Remediation"))

        acl_remediation_failed_task = _sfn.Fail(self,
                                                "Acl Remediation Failed",
                                                cause="Acl Remediation Failed",
                                                error="Check Logs")

        acl_compliant_task = _sfn.Succeed(self,
                                          "Object Acl Compliant",
                                          comment="Object Acl is Compliant")

        remediate_object_acl_sfn_definition = is_object_private_task\
            .next(_sfn.Choice(self, "Is Object Private?")\
                .when(_sfn.Condition.boolean_equals("$.is_private", True), acl_compliant_task)\
                .when(_sfn.Condition.boolean_equals("$.is_private", False), remediate_object_acl_task\
                    .next(_sfn.Choice(self, "Object Remediation Complete?")\
                        .when(_sfn.Condition.boolean_equals("$.status", True),acl_compliant_task)\
                        .when(_sfn.Condition.boolean_equals("$.status", False), notify_secops_task.next(acl_remediation_failed_task))\
                        .otherwise(acl_remediation_failed_task)\
                        )
                    )
                .otherwise(acl_remediation_failed_task)
            )

        remediate_object_acl_statemachine = _sfn.StateMachine(
            self,
            "stateMachineId",
            definition=remediate_object_acl_sfn_definition,
            timeout=core.Duration.minutes(3))

        # Cloudwatch Event Triggers
        put_object_acl_event_targets = []
        """
        put_object_acl_event_targets.append(
            _targets.LambdaFunction( 
                handler=remediate_object_acl_fn
                )
            )
        """
        put_object_acl_event_targets.append(
            _targets.SfnStateMachine(
                machine=remediate_object_acl_statemachine))

        put_object_acl_event_pattern = _events.EventPattern(
            source=["aws.s3"],
            detail_type=["AWS API Call via CloudTrail"],
            detail={
                "eventSource": ["s3.amazonaws.com"],
                "eventName": ["PutObjectAcl", "PutObject"],
                "requestParameters": {
                    "bucketName": [f"{pvt_bkt.bucket_name}"]
                }
            })

        put_object_acl_event_pattern_rule = _events.Rule(
            self,
            "putObjectAclEventId",
            event_pattern=put_object_acl_event_pattern,
            rule_name=f"put_s3_policy_event_{global_args.OWNER}",
            enabled=True,
            description="Trigger an event for S3 PutObjectAcl or PutObject",
            targets=put_object_acl_event_targets)

        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output0 = core.CfnOutput(
            self,
            "SecuirtyAutomationFrom",
            value=f"{global_args.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output1 = core.CfnOutput(
            self,
            "MonitoredS3Bucket",
            value=(f"https://console.aws.amazon.com/s3/buckets/"
                   f"{pvt_bkt.bucket_name}"),
            description=f"S3 Bucket for testing purposes")

        output2 = core.CfnOutput(
            self,
            "Helpercommands",
            value=
            (f"aws s3api get-object-acl  --bucket ${pvt_bkt.bucket_name} --key OBJECT-KEY-NAME"
             ),
            description=
            f"Commands to set object to public, Update OBJECT-KEY-NAME to your needs"
        )
예제 #9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = create_notification_email_parameter(self)
        template_zip_name = create_template_zip_name_parameter(self)
        template_file_name = create_template_file_name_parameter(self)
        dev_params_file_name = create_stage_params_file_name_parameter(self, "DEV_PARAMS_NAME", "development")
        staging_params_file_name = create_stage_params_file_name_parameter(self, "STAGING_PARAMS_NAME", "staging")
        prod_params_file_name = create_stage_params_file_name_parameter(self, "PROD_PARAMS_NAME", "production")
        # create development parameters
        account_type = "development"
        dev_account_id = create_account_id_parameter(self, "DEV_ACCOUNT_ID", account_type)
        dev_org_id = create_org_id_parameter(self, "DEV_ORG_ID", account_type)
        # create staging parameters
        account_type = "staging"
        staging_account_id = create_account_id_parameter(self, "STAGING_ACCOUNT_ID", account_type)
        staging_org_id = create_org_id_parameter(self, "STAGING_ORG_ID", account_type)
        # create production parameters
        account_type = "production"
        prod_account_id = create_account_id_parameter(self, "PROD_ACCOUNT_ID", account_type)
        prod_org_id = create_org_id_parameter(self, "PROD_ORG_ID", account_type)
        # assets parameters
        blueprint_bucket_name = create_blueprint_bucket_name_parameter(self)
        assets_bucket_name = create_assets_bucket_name_parameter(self)
        stack_name = create_stack_name_parameter(self)

        # Resources #
        assets_bucket = s3.Bucket.from_bucket_name(self, "AssetsBucket", assets_bucket_name.value_as_string)

        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(self, "BlueprintBucket", blueprint_bucket_name.value_as_string)

        # create sns topic and subscription
        pipeline_notification_topic = sns.Topic(
            self,
            "PipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns()
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(email_address=notification_email.value_as_string)
        )

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_template(template_zip_name, assets_bucket)

        # DeployDev stage
        dev_deploy_lambda_arn, dev_stackset_action = create_stackset_action(
            self,
            "DeployDevStackSet",
            blueprint_bucket,
            source_output,
            "Artifact_Source_S3Source",
            template_file_name.value_as_string,
            dev_params_file_name.value_as_string,
            [dev_account_id.value_as_string],
            [dev_org_id.value_as_string],
            [core.Aws.REGION],
            assets_bucket,
            f"{stack_name.value_as_string}-dev",
        )

        # DeployStaging manual approval
        deploy_staging_approval = approval_action(
            "DeployStaging",
            pipeline_notification_topic,
            [notification_email.value_as_string],
            "Please approve to deploy to staging account",
        )

        # DeployStaging stage
        staging_deploy_lambda_arn, staging_stackset_action = create_stackset_action(
            self,
            "DeployStagingStackSet",
            blueprint_bucket,
            source_output,
            "Artifact_Source_S3Source",
            template_file_name.value_as_string,
            staging_params_file_name.value_as_string,
            [staging_account_id.value_as_string],
            [staging_org_id.value_as_string],
            [core.Aws.REGION],
            assets_bucket,
            f"{stack_name.value_as_string}-staging",
        )

        # DeployProd manual approval
        deploy_prod_approval = approval_action(
            "DeployProd",
            pipeline_notification_topic,
            [notification_email.value_as_string],
            "Please approve to deploy to production account",
        )

        # DeployProd stage
        prod_deploy_lambda_arn, prod_stackset_action = create_stackset_action(
            self,
            "DeployProdStackSet",
            blueprint_bucket,
            source_output,
            "Artifact_Source_S3Source",
            template_file_name.value_as_string,
            prod_params_file_name.value_as_string,
            [prod_account_id.value_as_string],
            [prod_org_id.value_as_string],
            [core.Aws.REGION],
            assets_bucket,
            f"{stack_name.value_as_string}-prod",
        )

        # create invoking lambda policy
        invoke_lambdas_policy = iam.PolicyStatement(
            actions=[
                "lambda:InvokeFunction",
            ],
            resources=[dev_deploy_lambda_arn, staging_deploy_lambda_arn, prod_deploy_lambda_arn],
        )

        # createing pipeline stages
        source_stage = codepipeline.StageProps(stage_name="Source", actions=[source_action_definition])

        deploy_dev_stage = codepipeline.StageProps(
            stage_name="DeployDev",
            actions=[dev_stackset_action, deploy_staging_approval],
        )

        deploy_staging_stage = codepipeline.StageProps(
            stage_name="DeployStaging",
            actions=[staging_stackset_action, deploy_prod_approval],
        )

        deploy_prod_stage = codepipeline.StageProps(
            stage_name="DeployProd",
            actions=[prod_stackset_action],
        )

        # constructing multi-account pipeline
        multi_account_pipeline = codepipeline.Pipeline(
            self,
            "MultiAccountPipeline",
            stages=[source_stage, deploy_dev_stage, deploy_staging_stage, deploy_prod_stage],
            cross_account_keys=False,
        )
        # add notification to the development stackset action
        dev_stackset_action.on_state_change(
            "NotifyUserDevDeployment",
            description="Notify user of the outcome of the DeployDev action",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"DeployDev action {events.EventField.from_path('$.detail.action')} in the Pipeline "
                        f"{events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Action execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )

        # add notification to the staging stackset action
        staging_stackset_action.on_state_change(
            "NotifyUserStagingDeployment",
            description="Notify user of the outcome of the DeployStaging action",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"DeployStaging action {events.EventField.from_path('$.detail.action')} in the Pipeline "
                        f"{events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Action execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )

        # add notification to the production stackset action
        prod_stackset_action.on_state_change(
            "NotifyUserProdDeployment",
            description="Notify user of the outcome of the DeployProd action",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"DeployProd action {events.EventField.from_path('$.detail.action')} in the Pipeline "
                        f"{events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Action execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )

        # add notification to the multi-account pipeline
        multi_account_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text(
                    (
                        f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                        f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                    )
                ),
            ),
            event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}),
        )
        multi_account_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            )
        )

        # add lambda permissons
        multi_account_pipeline.add_to_role_policy(invoke_lambdas_policy)

        # add cfn supressions

        pipeline_child_nodes = multi_account_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket()
        pipeline_child_nodes[6].node.default_child.cfn_options.metadata = suppress_iam_complex()
        pipeline_child_nodes[19].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[32].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        pipeline_child_nodes[45].node.default_child.cfn_options.metadata = suppress_list_function_policy()
        # attaching iam permissions to the pipelines
        pipeline_permissions(multi_account_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(
            self,
            id="Pipelines",
            value=(
                f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
                f"{multi_account_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
            ),
        )
예제 #10
0
    def __init__(self, scope: Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open("stack/config.yml", 'r') as stream:
            configs = yaml.safe_load(stream)

        ### S3 core
        images_S3_bucket = _s3.Bucket(self, "ICS_IMAGES")

        images_S3_bucket.add_cors_rule(
            allowed_methods=[_s3.HttpMethods.POST],
            allowed_origins=["*"]  # add API gateway web resource URL
        )

        ### SQS core
        image_deadletter_queue = _sqs.Queue(self,
                                            "ICS_IMAGES_DEADLETTER_QUEUE")
        image_queue = _sqs.Queue(
            self,
            "ICS_IMAGES_QUEUE",
            dead_letter_queue={
                "max_receive_count":
                configs["DeadLetterQueue"]["MaxReceiveCount"],
                "queue": image_deadletter_queue
            })

        ### api gateway core
        api_gateway = RestApi(self,
                              'ICS_API_GATEWAY',
                              rest_api_name='ImageContentSearchApiGateway')
        api_gateway_resource = api_gateway.root.add_resource(
            configs["ProjectName"])
        api_gateway_landing_page_resource = api_gateway_resource.add_resource(
            'web')
        api_gateway_get_signedurl_resource = api_gateway_resource.add_resource(
            'signedUrl')
        api_gateway_image_search_resource = api_gateway_resource.add_resource(
            'search')

        ### landing page function
        get_landing_page_function = Function(
            self,
            "ICS_GET_LANDING_PAGE",
            function_name="ICS_GET_LANDING_PAGE",
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.from_asset("./src/landingPage"))

        get_landing_page_integration = LambdaIntegration(
            get_landing_page_function,
            proxy=True,
            integration_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin':
                    "'*'",
                }
            }])

        api_gateway_landing_page_resource.add_method(
            'GET',
            get_landing_page_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        ### cognito
        required_attribute = _cognito.StandardAttribute(required=True)

        users_pool = _cognito.UserPool(
            self,
            "ICS_USERS_POOL",
            auto_verify=_cognito.AutoVerifiedAttrs(
                email=True),  #required for self sign-up
            standard_attributes=_cognito.StandardAttributes(
                email=required_attribute),  #required for self sign-up
            self_sign_up_enabled=configs["Cognito"]["SelfSignUp"])

        user_pool_app_client = _cognito.CfnUserPoolClient(
            self,
            "ICS_USERS_POOL_APP_CLIENT",
            supported_identity_providers=["COGNITO"],
            allowed_o_auth_flows=["implicit"],
            allowed_o_auth_scopes=configs["Cognito"]["AllowedOAuthScopes"],
            user_pool_id=users_pool.user_pool_id,
            callback_ur_ls=[api_gateway.url_for_path('/web')],
            allowed_o_auth_flows_user_pool_client=True,
            explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"])

        user_pool_domain = _cognito.UserPoolDomain(
            self,
            "ICS_USERS_POOL_DOMAIN",
            user_pool=users_pool,
            cognito_domain=_cognito.CognitoDomainOptions(
                domain_prefix=configs["Cognito"]["DomainPrefix"]))

        ### get signed URL function
        get_signedurl_function = Function(
            self,
            "ICS_GET_SIGNED_URL",
            function_name="ICS_GET_SIGNED_URL",
            environment={
                "ICS_IMAGES_BUCKET":
                images_S3_bucket.bucket_name,
                "DEFAULT_SIGNEDURL_EXPIRY_SECONDS":
                configs["Functions"]["DefaultSignedUrlExpirySeconds"]
            },
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.from_asset("./src/getSignedUrl"))

        get_signedurl_integration = LambdaIntegration(
            get_signedurl_function,
            proxy=True,
            integration_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin':
                    "'*'",
                }
            }])

        api_gateway_get_signedurl_authorizer = CfnAuthorizer(
            self,
            "ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            rest_api_id=api_gateway_get_signedurl_resource.api.rest_api_id,
            name="ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            type="COGNITO_USER_POOLS",
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_get_signedurl_resource.add_method(
            'GET',
            get_signedurl_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]).node.find_child('Resource').add_property_override(
                'AuthorizerId', api_gateway_get_signedurl_authorizer.ref)

        images_S3_bucket.grant_put(get_signedurl_function,
                                   objects_key_pattern="new/*")

        ### image massage function
        image_massage_function = Function(
            self,
            "ICS_IMAGE_MASSAGE",
            function_name="ICS_IMAGE_MASSAGE",
            timeout=Duration.seconds(6),
            runtime=Runtime.PYTHON_3_7,
            environment={"ICS_IMAGE_MASSAGE": image_queue.queue_name},
            handler="main.handler",
            code=Code.from_asset("./src/imageMassage"))

        images_S3_bucket.grant_write(image_massage_function, "processed/*")
        images_S3_bucket.grant_delete(image_massage_function, "new/*")
        images_S3_bucket.grant_read(image_massage_function, "new/*")

        new_image_added_notification = _s3notification.LambdaDestination(
            image_massage_function)

        images_S3_bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED, new_image_added_notification,
            _s3.NotificationKeyFilter(prefix="new/"))

        image_queue.grant_send_messages(image_massage_function)

        ### image analyzer function
        image_analyzer_function = Function(
            self,
            "ICS_IMAGE_ANALYSIS",
            function_name="ICS_IMAGE_ANALYSIS",
            runtime=Runtime.PYTHON_3_7,
            timeout=Duration.seconds(10),
            environment={
                "ICS_IMAGES_BUCKET":
                images_S3_bucket.bucket_name,
                "DEFAULT_MAX_CALL_ATTEMPTS":
                configs["Functions"]["DefaultMaxApiCallAttempts"],
                "REGION":
                Aws.REGION,
            },
            handler="main.handler",
            code=Code.from_asset("./src/imageAnalysis"))

        image_analyzer_function.add_event_source(
            _lambda_event_source.SqsEventSource(queue=image_queue,
                                                batch_size=10))
        image_queue.grant_consume_messages(image_massage_function)

        lambda_rekognition_access = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "rekognition:DetectLabels",
                "rekognition:DetectModerationLabels"
            ],
            resources=["*"])

        image_analyzer_function.add_to_role_policy(lambda_rekognition_access)
        images_S3_bucket.grant_read(image_analyzer_function, "processed/*")

        ### API gateway finalizing
        self.add_cors_options(api_gateway_get_signedurl_resource)
        self.add_cors_options(api_gateway_landing_page_resource)
        self.add_cors_options(api_gateway_image_search_resource)

        ### database
        database_secret = _secrets_manager.Secret(
            self,
            "ICS_DATABASE_SECRET",
            secret_name="rds-db-credentials/image-content-search-rds-secret",
            generate_secret_string=_secrets_manager.SecretStringGenerator(
                generate_string_key='password',
                secret_string_template='{"username": "******"}',
                exclude_punctuation=True,
                exclude_characters='/@\" \\\'',
                require_each_included_type=True))

        database = _rds.CfnDBCluster(
            self,
            "ICS_DATABASE",
            engine=_rds.DatabaseClusterEngine.aurora_mysql(
                version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
            engine_mode="serverless",
            database_name=configs["Database"]["Name"],
            enable_http_endpoint=True,
            deletion_protection=configs["Database"]["DeletionProtection"],
            master_username=database_secret.secret_value_from_json(
                "username").to_string(),
            master_user_password=database_secret.secret_value_from_json(
                "password").to_string(),
            scaling_configuration=_rds.CfnDBCluster.
            ScalingConfigurationProperty(
                auto_pause=configs["Database"]["Scaling"]["AutoPause"],
                min_capacity=configs["Database"]["Scaling"]["Min"],
                max_capacity=configs["Database"]["Scaling"]["Max"],
                seconds_until_auto_pause=configs["Database"]["Scaling"]
                ["SecondsToAutoPause"]),
        )

        database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(
            Aws.REGION, Aws.ACCOUNT_ID, database.ref)

        secret_target = _secrets_manager.CfnSecretTargetAttachment(
            self,
            "ICS_DATABASE_SECRET_TARGET",
            target_type="AWS::RDS::DBCluster",
            target_id=database.ref,
            secret_id=database_secret.secret_arn)

        secret_target.node.add_dependency(database)

        ### database function
        image_data_function_role = _iam.Role(
            self,
            "ICS_IMAGE_DATA_FUNCTION_ROLE",
            role_name="ICS_IMAGE_DATA_FUNCTION_ROLE",
            assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaVPCAccessExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonRDSDataFullAccess")
            ])

        image_data_function = Function(
            self,
            "ICS_IMAGE_DATA",
            function_name="ICS_IMAGE_DATA",
            runtime=Runtime.PYTHON_3_7,
            timeout=Duration.seconds(5),
            role=image_data_function_role,
            environment={
                "DEFAULT_MAX_CALL_ATTEMPTS":
                configs["Functions"]["DefaultMaxApiCallAttempts"],
                "CLUSTER_ARN":
                database_cluster_arn,
                "CREDENTIALS_ARN":
                database_secret.secret_arn,
                "DB_NAME":
                database.database_name,
                "REGION":
                Aws.REGION
            },
            handler="main.handler",
            code=Code.from_asset("./src/imageData"))

        image_search_integration = LambdaIntegration(
            image_data_function,
            proxy=True,
            integration_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin':
                    "'*'",
                }
            }])

        api_gateway_image_search_authorizer = CfnAuthorizer(
            self,
            "ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            rest_api_id=api_gateway_image_search_resource.api.rest_api_id,
            name="ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            type="COGNITO_USER_POOLS",
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_image_search_resource.add_method(
            'POST',
            image_search_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]).node.find_child('Resource').add_property_override(
                'AuthorizerId', api_gateway_image_search_authorizer.ref)

        lambda_access_search = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=["translate:TranslateText"],
            resources=["*"])

        image_data_function.add_to_role_policy(lambda_access_search)

        ### custom resource
        lambda_provider = Provider(self,
                                   'ICS_IMAGE_DATA_PROVIDER',
                                   on_event_handler=image_data_function)

        CustomResource(self,
                       'ICS_IMAGE_DATA_RESOURCE',
                       service_token=lambda_provider.service_token,
                       pascal_case_properties=False,
                       resource_type="Custom::SchemaCreation",
                       properties={"source": "Cloudformation"})

        ### event bridge
        event_bus = _events.EventBus(self,
                                     "ICS_IMAGE_CONTENT_BUS",
                                     event_bus_name="ImageContentBus")

        event_rule = _events.Rule(
            self,
            "ICS_IMAGE_CONTENT_RULE",
            rule_name="ICS_IMAGE_CONTENT_RULE",
            description="The event from image analyzer to store the data",
            event_bus=event_bus,
            event_pattern=_events.EventPattern(
                resources=[image_analyzer_function.function_arn]),
        )

        event_rule.add_target(
            _event_targets.LambdaFunction(image_data_function))

        event_bus.grant_all_put_events(image_analyzer_function)
        image_analyzer_function.add_environment("EVENT_BUS",
                                                event_bus.event_bus_name)

        ### outputs
        CfnOutput(
            self,
            'CognitoHostedUILogin',
            value=
            'https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'
            .format(user_pool_domain.domain_name, Aws.REGION,
                    user_pool_app_client.ref,
                    '+'.join(user_pool_app_client.allowed_o_auth_scopes),
                    api_gateway.url_for_path('/web')),
            description='The Cognito Hosted UI Login Page')
예제 #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_iam/Role.html
        iamRole = aws_iam.Role(
            self,
            'aws-synctags-role',
            role_name=RESOURCE_NAME_PREFIX + '-role',
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com'),
            path='/service-role/')

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        lambdaHandler = _lambda.Function(self,
                                         'aws-synctags-event-handler',
                                         function_name=RESOURCE_NAME_PREFIX +
                                         '-event-handler',
                                         code=_lambda.Code.asset('./lambda'),
                                         handler='synctags.lambda_handler',
                                         timeout=core.Duration.seconds(30),
                                         runtime=_lambda.Runtime.PYTHON_3_7,
                                         role=iamRole)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_sns/Topic.html
        snsTopic = aws_sns.Topic(self,
                                 'aws-synctags-topic',
                                 topic_name=RESOURCE_NAME_PREFIX + '-topic',
                                 display_name=RESOURCE_NAME_PREFIX + '-topic')

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_sns_subscriptions.html
        snsTopic.add_subscription(
            aws_sns_subscriptions.EmailSubscription(NOTIFY_EMAIL))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_iam/Role.html
        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_iam/PolicyStatement.html
        iamRole.attach_inline_policy(
            aws_iam.Policy(
                self,
                'aws-synctags-policy',
                policy_name=RESOURCE_NAME_PREFIX + '-role-policy',
                statements=[
                    aws_iam.PolicyStatement(resources=[
                        self.format_arn(service='logs', resource='*')
                    ],
                                            actions=['logs:CreateLogGroup']),
                    aws_iam.PolicyStatement(
                        resources=[
                            self.format_arn(service='logs',
                                            resource='log-group',
                                            sep=':',
                                            resource_name='/aws/lambda/' +
                                            RESOURCE_NAME_PREFIX +
                                            '-event-handler:*')
                        ],
                        actions=['logs:CreateLogStream', 'logs:PutLogEvents']),
                    aws_iam.PolicyStatement(
                        resources=['*'],
                        actions=[
                            'ec2:DescribeInstances', 'ec2:DescribeVolumes',
                            'ec2:CreateTags', 'ec2:DeleteTags',
                            'ec2:DescribeNetworkInterfaces',
                            'ec2:DescribeTags', 'ec2:DescribeSnapshots',
                            'ec2:DescribeAddresses'
                        ]),
                    aws_iam.PolicyStatement(
                        resources=[snsTopic.topic_arn],
                        actions=['sns:Publish', 'sns:Subscribe']),
                    aws_iam.PolicyStatement(resources=['*'],
                                            actions=['iam:ListAccountAliases'])
                ]))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_events.html
        eventRule = aws_events.Rule(
            self,
            'aws-synctags-event-rule',
            rule_name=RESOURCE_NAME_PREFIX + '-event-rule',
            enabled=True,
            event_pattern=aws_events.EventPattern(
                source=['aws.tag'],
                detail_type=['Tag Change on Resource'],
                detail={
                    'service': ['ec2'],
                    'resource-type': ['instance']
                }),
            targets=[aws_events_targets.LambdaFunction(lambdaHandler)])

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html#aws_cdk.aws_lambda.Function.add_permission
        #lambdaHandler.add_permission('aws-synctags-lambda-permission',
        #principal=aws_iam.ServicePrincipal('events.amazonaws.com'),
        #action='lambda:InvokeFunction',
        #source_arn=eventRule.rule_arn)

        lambdaHandler.add_environment('SyncTagKeys', SYNCTAG_KEYS)
        lambdaHandler.add_environment('NotifyTopicArn', snsTopic.topic_arn)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ###
        # Let's create our own Event Bus for this rather than using default
        ###
        bus = events.EventBus(self,
                              'DestinedEventBus',
                              event_bus_name='the-destined-lambda')

        ###
        # Destinations need invoked Asynchronously so let's use SNS
        ###
        topic = sns.Topic(self,
                          'theDestinedLambdaTopic',
                          display_name='The Destined Lambda CDK Pattern Topic')

        ###
        # Lambda configured with success and failure destinations
        # Note the actual lambda has no EventBridge code inside it
        ###
        destined_lambda = _lambda.Function(
            self,
            "destinedLambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="destinedLambda.handler",
            code=_lambda.Code.from_asset("lambdas"),
            retry_attempts=0,
            on_success=destinations.EventBridgeDestination(event_bus=bus),
            on_failure=destinations.EventBridgeDestination(event_bus=bus))
        topic.add_subscription(
            subscriptions.LambdaSubscription(destined_lambda))

        ###
        # This is a lambda that will be called by onSuccess for destinedLambda
        # It simply prints the event it receives to the cloudwatch logs
        ###
        success_lambda = _lambda.Function(
            self,
            "successLambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="success.handler",
            code=_lambda.Code.from_asset("lambdas"),
            timeout=core.Duration.seconds(3))
        ###
        # EventBridge Rule to send events to our success lambda
        # Notice how we can still do event filtering based on the json payload returned by the destined lambda
        ###
        success_rule = events.Rule(
            self,
            'successRule',
            event_bus=bus,
            description=
            'all success events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                detail={
                    "requestContext": {
                        "condition": ["Success"]
                    },
                    "responsePayload": {
                        "source": ["cdkpatterns.the-destined-lambda"],
                        "action": ["message"]
                    }
                }))
        success_rule.add_target(targets.LambdaFunction(success_lambda))

        ###
        # This is a lambda that will be called by onFailure for destinedLambda
        # It simply prints the event it receives to the cloudwatch logs.
        # Notice how it includes the message that came into destined lambda to make it fail so you have
        # everything you need to do retries or manually investigate
        ###
        failure_lambda = _lambda.Function(
            self,
            "failureLambda",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="failure.handler",
            code=_lambda.Code.from_asset("lambdas"),
            timeout=core.Duration.seconds(3))

        ###
        # EventBridge Rule to send events to our failure lambda
        ###
        failure_rule = events.Rule(
            self,
            'failureRule',
            event_bus=bus,
            description=
            'all failure events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                detail={"responsePayload": {
                    "errorType": ["Error"]
                }}))
        failure_rule.add_target(targets.LambdaFunction(failure_lambda))

        ###
        # API Gateway Creation
        # This is complicated because it transforms the incoming json payload into a query string url
        # this url is used to post the payload to sns without a lambda inbetween
        ###

        gateway = api_gw.RestApi(
            self,
            'theDestinedLambdaAPI',
            deploy_options=api_gw.StageOptions(
                metrics_enabled=True,
                logging_level=api_gw.MethodLoggingLevel.INFO,
                data_trace_enabled=True,
                stage_name='prod'))
        # Give our gateway permissions to interact with SNS
        api_gw_sns_role = iam.Role(
            self,
            'ApiGatewaySNSRole',
            assumed_by=iam.ServicePrincipal('apigateway.amazonaws.com'))
        topic.grant_publish(api_gw_sns_role)

        # shortening the lines of later code
        schema = api_gw.JsonSchema
        schema_type = api_gw.JsonSchemaType

        # Because this isn't a proxy integration, we need to define our response model
        response_model = gateway.add_model(
            'ResponseModel',
            content_type='application/json',
            model_name='ResponseModel',
            schema=schema(
                schema=api_gw.JsonSchemaVersion.DRAFT4,
                title='pollResponse',
                type=schema_type.OBJECT,
                properties={'message': schema(type=schema_type.STRING)}))

        error_response_model = gateway.add_model(
            'ErrorResponseModel',
            content_type='application/json',
            model_name='ErrorResponseModel',
            schema=schema(schema=api_gw.JsonSchemaVersion.DRAFT4,
                          title='errorResponse',
                          type=schema_type.OBJECT,
                          properties={
                              'state': schema(type=schema_type.STRING),
                              'message': schema(type=schema_type.STRING)
                          }))

        request_template = "Action=Publish&" + \
                           "TargetArn=$util.urlEncode('" + topic.topic_arn + "')&" + \
                           "Message=please $input.params().querystring.get('mode')&" + \
                           "Version=2010-03-31"

        # This is the VTL to transform the error response
        error_template = {
            "state": 'error',
            "message": "$util.escapeJavaScript($input.path('$.errorMessage'))"
        }
        error_template_string = json.dumps(error_template,
                                           separators=(',', ':'))

        # This is how our gateway chooses what response to send based on selection_pattern
        integration_options = api_gw.IntegrationOptions(
            credentials_role=api_gw_sns_role,
            request_parameters={
                'integration.request.header.Content-Type':
                "'application/x-www-form-urlencoded'"
            },
            request_templates={"application/json": request_template},
            passthrough_behavior=api_gw.PassthroughBehavior.NEVER,
            integration_responses=[
                api_gw.IntegrationResponse(
                    status_code='200',
                    response_templates={
                        "application/json":
                        json.dumps({"message": 'Message added to SNS topic'})
                    }),
                api_gw.IntegrationResponse(
                    selection_pattern="^\[Error\].*",
                    status_code='400',
                    response_templates={
                        "application/json": error_template_string
                    },
                    response_parameters={
                        'method.response.header.Content-Type':
                        "'application/json'",
                        'method.response.header.Access-Control-Allow-Origin':
                        "'*'",
                        'method.response.header.Access-Control-Allow-Credentials':
                        "'true'"
                    })
            ])

        # Add an SendEvent endpoint onto the gateway
        gateway.root.add_resource('SendEvent') \
            .add_method('GET', api_gw.Integration(type=api_gw.IntegrationType.AWS,
                                                  integration_http_method='POST',
                                                  uri='arn:aws:apigateway:us-east-1:sns:path//',
                                                  options=integration_options
                                                  ),
                        method_responses=[
                            api_gw.MethodResponse(status_code='200',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': response_model
                                                  }),
                            api_gw.MethodResponse(status_code='400',
                                                  response_parameters={
                                                      'method.response.header.Content-Type': True,
                                                      'method.response.header.Access-Control-Allow-Origin': True,
                                                      'method.response.header.Access-Control-Allow-Credentials': True
                                                  },
                                                  response_models={
                                                      'application/json': error_response_model
                                                  }),
                        ]
                        )
예제 #13
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Model all required resources
        '''
        Define IAM role that will be used for AWS Lambda Functions
        '''
        lambda_role = _iam.Role(
            self,
            id='lab1-bdc-lambda-role',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'))
        '''
        Define Amazon EventBridge Construct and also pattern to be included later. 
        '''
        eb = _eb.EventBus(self,
                          id="lab1-bdc-eventbus",
                          event_bus_name="lab1-bdc-eventbus")
        eb_pattern = _eb.EventPattern(detail_type=["message-received"], )
        '''
        These lines below define construct for our AWS Lambda Functions. There are 2 Lambda functions that we need to create: dispatch and consume.
        As the dispatch function need to add environment variable, noticed that we add an env_var into AWS Lambda Function that later could be retrieved within the function. 
        '''
        fnLambda_dispatch = _lambda.Function(
            self,
            "lab1-bdc-function-dispatch",
            code=_lambda.AssetCode("../lambda-functions/dispatch-function"),
            handler="app.handler",
            timeout=core.Duration.seconds(60),
            role=lambda_role,
            runtime=_lambda.Runtime.PYTHON_3_8)
        fnLambda_dispatch.add_environment("EVENT_BUS_NAME", eb.event_bus_name)

        fnLambda_consume = _lambda.Function(
            self,
            "lab1-bdc-function-consume",
            code=_lambda.AssetCode("../lambda-functions/consume-function"),
            handler="app.handler",
            role=lambda_role,
            timeout=core.Duration.seconds(60),
            runtime=_lambda.Runtime.PYTHON_3_8)

        cw_policy_statement = _iam.PolicyStatement(effect=_iam.Effect.ALLOW)
        cw_policy_statement.add_actions("logs:CreateLogGroup")
        cw_policy_statement.add_actions("logs:CreateLogStream")
        cw_policy_statement.add_actions("logs:PutLogEvents")
        cw_policy_statement.add_actions("logs:DescribeLogStreams")
        cw_policy_statement.add_resources("*")
        lambda_role.add_to_policy(cw_policy_statement)

        eb_policy_statement = _iam.PolicyStatement(effect=_iam.Effect.ALLOW)
        eb_policy_statement.add_actions("events:PutEvents")
        eb_policy_statement.add_resources(eb.event_bus_arn)
        lambda_role.add_to_policy(eb_policy_statement)

        _eb.Rule(self,
                 id="lab1-bdc-eventRule",
                 description="A basic rule sample",
                 enabled=True,
                 event_bus=eb,
                 event_pattern=eb_pattern,
                 rule_name="BDC-BasicDispatchConsume",
                 targets=[_ebt.LambdaFunction(handler=fnLambda_consume)])
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # If left unchecked this pattern could "fan out" on the transform and load
        # lambdas to the point that it consumes all resources on the account. This is
        # why we are limiting concurrency to 2 on all 3 lambdas. Feel free to raise this.
        lambda_throttle_size = 2

        ####
        # DynamoDB Table
        # This is where our transformed data ends up
        ####
        table = dynamo_db.Table(self,
                                "TransformedData",
                                partition_key=dynamo_db.Attribute(
                                    name="id",
                                    type=dynamo_db.AttributeType.STRING))

        ####
        # S3 Landing Bucket
        # This is where the user uploads the file to be transformed
        ####
        bucket = s3.Bucket(self, "LandingBucket")

        ####
        # Queue that listens for S3 Bucket events
        ####
        queue = sqs.Queue(self,
                          'newObjectInLandingBucketEventQueue',
                          visibility_timeout=core.Duration.seconds(300))

        bucket.add_event_notification(s3.EventType.OBJECT_CREATED,
                                      s3n.SqsDestination(queue))

        # EventBridge Permissions
        event_bridge_put_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=['*'],
            actions=['events:PutEvents'])

        ####
        # Fargate ECS Task Creation to pull data from S3
        #
        # Fargate is used here because if you had a seriously large file,
        # you could stream the data to fargate for as long as needed before
        # putting the data onto eventbridge or up the memory/storage to
        # download the whole file. Lambda has limitations on runtime and
        # memory/storage
        ####
        vpc = ec2.Vpc(self, "Vpc", max_azs=2)

        logging = ecs.AwsLogDriver(stream_prefix='TheEventBridgeETL',
                                   log_retention=logs.RetentionDays.ONE_WEEK)

        cluster = ecs.Cluster(self, 'Ec2Cluster', vpc=vpc)

        task_definition = ecs.TaskDefinition(
            self,
            'FargateTaskDefinition',
            memory_mib="512",
            cpu="256",
            compatibility=ecs.Compatibility.FARGATE)

        # We need to give our fargate container permission to put events on our EventBridge
        task_definition.add_to_task_role_policy(event_bridge_put_policy)
        # Grant fargate container access to the object that was uploaded to s3
        bucket.grant_read(task_definition.task_role)

        container = task_definition.add_container(
            'AppContainer',
            image=ecs.ContainerImage.from_asset(
                'container/s3DataExtractionTask'),
            logging=logging,
            environment={
                'S3_BUCKET_NAME': bucket.bucket_name,
                'S3_OBJECT_KEY': ''
            })

        ####
        # Lambdas
        #
        # These are used for 4 phases:
        #
        # Extract    - kicks of ecs fargate task to download data and splinter to eventbridge events
        # Transform  - takes the two comma separated strings and produces a json object
        # Load       - inserts the data into dynamodb
        # Observe    - This is a lambda that subscribes to all events and logs them centrally
        ####

        subnet_ids = []
        for subnet in vpc.private_subnets:
            subnet_ids.append(subnet.subnet_id)

        ####
        # Extract
        # defines an AWS Lambda resource to trigger our fargate ecs task
        ####
        extract_lambda = _lambda.Function(
            self,
            "extractLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="s3SqsEventConsumer.handler",
            code=_lambda.Code.from_asset("lambdas/extract"),
            reserved_concurrent_executions=lambda_throttle_size,
            environment={
                "CLUSTER_NAME": cluster.cluster_name,
                "TASK_DEFINITION": task_definition.task_definition_arn,
                "SUBNETS": json.dumps(subnet_ids),
                "CONTAINER_NAME": container.container_name
            })
        queue.grant_consume_messages(extract_lambda)
        extract_lambda.add_event_source(_event.SqsEventSource(queue=queue))
        extract_lambda.add_to_role_policy(event_bridge_put_policy)

        run_task_policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[task_definition.task_definition_arn],
            actions=['ecs:RunTask'])
        extract_lambda.add_to_role_policy(run_task_policy_statement)

        task_execution_role_policy_statement = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            resources=[
                task_definition.obtain_execution_role().role_arn,
                task_definition.task_role.role_arn
            ],
            actions=['iam:PassRole'])
        extract_lambda.add_to_role_policy(task_execution_role_policy_statement)

        ####
        # Transform
        # defines a lambda to transform the data that was extracted from s3
        ####

        transform_lambda = _lambda.Function(
            self,
            "TransformLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="transform.handler",
            code=_lambda.Code.from_asset("lambdas/transform"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3))
        transform_lambda.add_to_role_policy(event_bridge_put_policy)

        # Create EventBridge rule to route extraction events
        transform_rule = events.Rule(
            self,
            'transformRule',
            description='Data extracted from S3, Needs transformed',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl'],
                detail_type=['s3RecordExtraction'],
                detail={"status": ["extracted"]}))
        transform_rule.add_target(
            targets.LambdaFunction(handler=transform_lambda))

        ####
        # Load
        # load the transformed data in dynamodb
        ####

        load_lambda = _lambda.Function(
            self,
            "LoadLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="load.handler",
            code=_lambda.Code.from_asset("lambdas/load"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3),
            environment={"TABLE_NAME": table.table_name})
        load_lambda.add_to_role_policy(event_bridge_put_policy)
        table.grant_read_write_data(load_lambda)

        load_rule = events.Rule(
            self,
            'loadRule',
            description='Data transformed, Needs loaded into dynamodb',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl'],
                detail_type=['transform'],
                detail={"status": ["transformed"]}))
        load_rule.add_target(targets.LambdaFunction(handler=load_lambda))

        ####
        # Observe
        # Watch for all cdkpatterns.the-eventbridge-etl events and log them centrally
        ####

        observe_lambda = _lambda.Function(
            self,
            "ObserveLambdaHandler",
            runtime=_lambda.Runtime.NODEJS_12_X,
            handler="observe.handler",
            code=_lambda.Code.from_asset("lambdas/observe"),
            reserved_concurrent_executions=lambda_throttle_size,
            timeout=core.Duration.seconds(3))

        observe_rule = events.Rule(
            self,
            'observeRule',
            description='all events are caught here and logged centrally',
            event_pattern=events.EventPattern(
                source=['cdkpatterns.the-eventbridge-etl']))

        observe_rule.add_target(targets.LambdaFunction(handler=observe_lambda))
예제 #15
0
    def __init__(
        self,
        scope: core.Construct,
        construct_id: str,
        # deployment_asset: s3_assets.Asset,
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create Required parameters for sagemaker projects
        # see: https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-projects-templates-custom.html
        # see also: # https://docs.aws.amazon.com/cdk/latest/guide/parameters.html
        project_name = core.CfnParameter(
            self,
            "SageMakerProjectName",
            type="String",
            description="The name of the SageMaker project.",
            min_length=1,
            max_length=32,
        )
        project_id = core.CfnParameter(
            self,
            "SageMakerProjectId",
            type="String",
            min_length=1,
            max_length=16,
            description="Service generated Id of the project.",
        )
        stage_name = core.CfnParameter(
            self,
            "StageName",
            type="String",
            min_length=1,
            max_length=8,
            description="The stage name.",
            default="dev",
        )
        seed_bucket = core.CfnParameter(
            self,
            "CodeCommitSeedBucket",
            type="String",
            description="The optional s3 seed bucket",
            min_length=1,
        )
        seed_key = core.CfnParameter(
            self,
            "CodeCommitSeedKey",
            type="String",
            description="The optional s3 seed key",
            min_length=1,
        )
        register_lambda = core.CfnParameter(
            self,
            "RegisterLambda",
            type="String",
            description="The AWS Lambda to invoke when registering this model",
            min_length=1,
        )

        # Get the service catalog role for all permssions (if None CDK will create new roles)
        # CodeBuild and CodePipeline resources need to start with "sagemaker-" to be within default policy
        service_catalog_role = aws_iam.Role.from_role_arn(
            self,
            "PipelineRole",
            f"arn:{self.partition}:iam::{self.account}:role/service-role/AmazonSageMakerServiceCatalogProductsUseRole",
        )

        # Define the repository name and branch
        branch_name = "main"

        # Create source repo from seed bucket/key
        repo = codecommit.CfnRepository(
            self,
            "CodeRepo",
            repository_name="sagemaker-{}-repo".format(
                project_name.value_as_string),
            repository_description="Amazon SageMaker A/B testing pipeline",
            code=codecommit.CfnRepository.CodeProperty(
                s3=codecommit.CfnRepository.S3Property(
                    bucket=seed_bucket.value_as_string,
                    key=seed_key.value_as_string,
                    object_version=None,
                ),
                branch_name=branch_name,
            ),
            tags=[
                core.CfnTag(key="sagemaker:deployment-stage",
                            value=stage_name.value_as_string),
                core.CfnTag(key="sagemaker:project-id",
                            value=project_id.value_as_string),
                core.CfnTag(key="sagemaker:project-name",
                            value=project_name.value_as_string),
            ],
        )

        # Reference the newly created repository
        code = codecommit.Repository.from_repository_name(
            self, "ImportedRepo", repo.attr_name)

        cdk_build = codebuild.PipelineProject(
            self,
            "CdkBuild",
            project_name="sagemaker-{}-cdk-{}".format(
                project_name.value_as_string, stage_name.value_as_string),
            role=service_catalog_role,
            build_spec=codebuild.BuildSpec.from_object(
                dict(
                    version="0.2",
                    phases=dict(
                        install=dict(commands=[
                            "npm install aws-cdk",
                            "npm update",
                            "python -m pip install -r requirements.txt",
                        ]),
                        build=dict(commands=[
                            "npx cdk synth -o dist --path-metadata false",
                        ]),
                    ),
                    artifacts={
                        "base-directory": "dist",
                        "files": ["*.template.json"],
                    },
                    environment=dict(
                        buildImage=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3,
                    ),
                )),
            environment_variables={
                "SAGEMAKER_PROJECT_NAME":
                codebuild.BuildEnvironmentVariable(
                    value=project_name.value_as_string),
                "SAGEMAKER_PROJECT_ID":
                codebuild.BuildEnvironmentVariable(
                    value=project_id.value_as_string),
                "STAGE_NAME":
                codebuild.BuildEnvironmentVariable(
                    value=stage_name.value_as_string),
            },
        )

        register_build = codebuild.PipelineProject(
            self,
            "RegisterBuild",
            project_name="sagemaker-{}-register-{}".format(
                project_name.value_as_string, stage_name.value_as_string),
            role=service_catalog_role,
            build_spec=codebuild.BuildSpec.from_object(
                dict(
                    version="0.2",
                    phases=dict(build=dict(commands=[
                        "python register.py > output.txt",
                    ]), ),
                    artifacts={
                        "files": ["output.txt"],
                    },
                    environment=dict(
                        buildImage=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3,
                    ),
                )),
            environment_variables={
                "SAGEMAKER_PROJECT_NAME":
                codebuild.BuildEnvironmentVariable(
                    value=project_name.value_as_string),
                "STAGE_NAME":
                codebuild.BuildEnvironmentVariable(
                    value=stage_name.value_as_string),
                "REGISTER_LAMBDA":
                codebuild.BuildEnvironmentVariable(
                    value=register_lambda.value_as_string),
            },
        )

        source_output = codepipeline.Artifact()
        cdk_build_output = codepipeline.Artifact()
        register_build_output = codepipeline.Artifact()

        # Create the s3 artifact (name must be < 63 chars)
        s3_artifact = s3.Bucket(
            self,
            "S3Artifact",
            bucket_name="sagemaker-{}-artifact-{}-{}".format(
                project_id.value_as_string, stage_name.value_as_string,
                self.region),
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        deploy_pipeline = codepipeline.Pipeline(
            self,
            "Pipeline",
            role=service_catalog_role,
            artifact_bucket=s3_artifact,
            pipeline_name="sagemaker-{}-pipeline-{}".format(
                project_name.value_as_string, stage_name.value_as_string),
            stages=[
                codepipeline.StageProps(
                    stage_name="Source",
                    actions=[
                        codepipeline_actions.CodeCommitSourceAction(
                            action_name="CodeCommit_Source",
                            repository=code,
                            trigger=codepipeline_actions.CodeCommitTrigger.
                            NONE,  # Created below
                            event_role=service_catalog_role,
                            output=source_output,
                            branch=branch_name,
                            role=service_catalog_role,
                        )
                    ],
                ),
                codepipeline.StageProps(
                    stage_name="Build",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            action_name="CDK_Build",
                            project=cdk_build,
                            input=source_output,
                            outputs=[
                                cdk_build_output,
                            ],
                            role=service_catalog_role,
                        ),
                    ],
                ),
                codepipeline.StageProps(
                    stage_name="Deploy",
                    actions=[
                        codepipeline_actions.
                        CloudFormationCreateUpdateStackAction(
                            action_name="SageMaker_CFN_Deploy",
                            run_order=1,
                            template_path=cdk_build_output.at_path(
                                "ab-testing-sagemaker.template.json"),
                            stack_name="sagemaker-{}-deploy-{}".format(
                                project_name.value_as_string,
                                stage_name.value_as_string),
                            admin_permissions=False,
                            role=service_catalog_role,
                            deployment_role=service_catalog_role,
                            replace_on_failure=True,
                        ),
                    ],
                ),
                codepipeline.StageProps(
                    stage_name="Register",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            action_name="Register_Build",
                            project=register_build,
                            input=source_output,
                            outputs=[
                                register_build_output,
                            ],
                            role=service_catalog_role,
                        ),
                    ],
                ),
            ],
        )

        # Add deploy role to target the code pipeline when model package is approved
        deploy_rule = events.Rule(
            self,
            "DeployRule",
            rule_name="sagemaker-{}-model-{}".format(
                project_name.value_as_string, stage_name.value_as_string),
            description=
            "Rule to trigger a deployment when SageMaker Model registry is updated with a new model package. For example, a new model package is registered with Registry",
            event_pattern=events.EventPattern(
                source=["aws.sagemaker"],
                detail_type=["SageMaker Model Package State Change"],
                detail={
                    "ModelPackageGroupName": [
                        f"{project_name.value_as_string}-champion",
                        f"{project_name.value_as_string}-challenger",
                    ]
                },
            ),
            targets=[
                targets.CodePipeline(
                    pipeline=deploy_pipeline,
                    event_role=service_catalog_role,
                )
            ],
        )

        code_rule = events.Rule(
            self,
            "CodeRule",
            rule_name="sagemaker-{}-code-{}".format(
                project_name.value_as_string, stage_name.value_as_string),
            description=
            "Rule to trigger a deployment when SageMaker Model registry is updated with a new model package. For example, a new model package is registered with Registry",
            event_pattern=events.EventPattern(
                source=["aws.codecommit"],
                detail_type=["CodeCommit Repository State Change"],
                detail={
                    "event": ["referenceCreated", "referenceUpdated"],
                    "referenceType": ["branch"],
                    "referenceName": [branch_name],
                },
                resources=[code.repository_arn],
            ),
            targets=[
                targets.CodePipeline(
                    pipeline=deploy_pipeline,
                    event_role=service_catalog_role,
                )
            ],
        )
예제 #16
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        dynamodb_table = _ddb.Table(
            self,
            id="lab2-cm-ddb",
            table_name="lab2-cm-order-status",
            partition_key=Attribute(name='ID', type=AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY  # NOT for production
        )

        eb = _eb.EventBus(self,
                          id="lab2-cm-eventbus",
                          event_bus_name="lab2-cm-eventbus")

        lambda_role = _iam.Role(
            self,
            id='lab2-cm-role',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com'))

        dynamodb_policy_statement = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW)
        dynamodb_policy_statement.add_actions("dynamodb:*")
        dynamodb_policy_statement.add_resources(dynamodb_table.table_arn)
        lambda_role.add_to_policy(dynamodb_policy_statement)

        eventbridge_policy_statement = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW)
        eventbridge_policy_statement.add_actions("events:*")
        eventbridge_policy_statement.add_resources(eb.event_bus_arn)
        lambda_role.add_to_policy(eventbridge_policy_statement)

        cloudwatch_policy_statement = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW)
        cloudwatch_policy_statement.add_actions("logs:CreateLogGroup")
        cloudwatch_policy_statement.add_actions("logs:CreateLogStream")
        cloudwatch_policy_statement.add_actions("logs:PutLogEvents")
        cloudwatch_policy_statement.add_actions("logs:DescribeLogStreams")
        cloudwatch_policy_statement.add_resources("*")
        lambda_role.add_to_policy(cloudwatch_policy_statement)

        fn_lambda_invoice_service = aws_lambda.Function(
            self,
            "lab2-cm-invoiceService",
            code=aws_lambda.AssetCode("../lambda-functions/invoice-service/"),
            handler="app.lambda_handler",
            tracing=aws_lambda.Tracing.ACTIVE,
            timeout=core.Duration.seconds(30),
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        fn_lambda_invoice_service.add_environment("TABLE_NAME",
                                                  dynamodb_table.table_name)

        fn_lambda_fulfilment_service = aws_lambda.Function(
            self,
            "lab2-cm-fulfilmentService",
            code=aws_lambda.AssetCode(
                "../lambda-functions/fulfilment-service/"),
            handler="app.lambda_handler",
            tracing=aws_lambda.Tracing.ACTIVE,
            timeout=core.Duration.seconds(30),
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        fn_lambda_fulfilment_service.add_environment("TABLE_NAME",
                                                     dynamodb_table.table_name)
        fn_lambda_fulfilment_service.add_environment("EVENTBUS_NAME",
                                                     eb.event_bus_name)

        fn_lambda_forecasting_service = aws_lambda.Function(
            self,
            "lab2-cm-forecastingService",
            code=aws_lambda.AssetCode(
                "../lambda-functions/forecasting-service/"),
            handler="app.lambda_handler",
            tracing=aws_lambda.Tracing.ACTIVE,
            timeout=core.Duration.seconds(30),
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        fn_lambda_forecasting_service.add_environment(
            "TABLE_NAME", dynamodb_table.table_name)

        fn_lambda_order_service = aws_lambda.Function(
            self,
            "lab2-cm-orderService",
            code=aws_lambda.AssetCode("../lambda-functions/order-service/"),
            handler="app.lambda_handler",
            timeout=core.Duration.seconds(30),
            tracing=aws_lambda.Tracing.ACTIVE,
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        fn_lambda_order_service.add_environment("TABLE_NAME",
                                                dynamodb_table.table_name)
        fn_lambda_order_service.add_environment("EVENTBUS_NAME",
                                                eb.event_bus_name)

        fn_lambda_logistic_service = aws_lambda.Function(
            self,
            "lab2-cm-logisticService",
            code=aws_lambda.AssetCode("../lambda-functions/logistic-service/"),
            handler="app.lambda_handler",
            timeout=core.Duration.seconds(30),
            tracing=aws_lambda.Tracing.ACTIVE,
            role=lambda_role,
            runtime=aws_lambda.Runtime.PYTHON_3_8)
        fn_lambda_logistic_service.add_environment("TABLE_NAME",
                                                   dynamodb_table.table_name)

        eb_order_created_pattern = _eb.EventPattern(
            detail_type=["order_created"], )
        eb_fulfilment_completed_pattern = _eb.EventPattern(
            detail_type=["fulfilment_completed"], )
        eb_order_created_rule = _eb.Rule(
            self,
            id="lab2-cm-eventRule-order-created",
            description="Order created event",
            enabled=True,
            event_bus=eb,
            event_pattern=eb_order_created_pattern,
            rule_name="lab2-OrderCreated",
            targets=[
                _ebt.LambdaFunction(handler=fn_lambda_invoice_service),
                _ebt.LambdaFunction(handler=fn_lambda_fulfilment_service),
                _ebt.LambdaFunction(handler=fn_lambda_forecasting_service)
            ])

        eb_fulfilment_completed_rule = _eb.Rule(
            self,
            id="lab2-cm-eventRule-fulfilment-completed",
            description="Fulfilment completedevent",
            enabled=True,
            event_bus=eb,
            event_pattern=eb_fulfilment_completed_pattern,
            rule_name="lab2-FulfilmentCompleted",
            targets=[_ebt.LambdaFunction(handler=fn_lambda_logistic_service)])
        api = _ag.RestApi(
            self,
            id='lab2-cm-api-gateway',
        )
        api_lambda_integration = _ag.LambdaIntegration(fn_lambda_order_service)
        api.root.add_resource('order').add_method('GET',
                                                  api_lambda_integration)
예제 #17
0
    def __init__(self, scope: core.Construct, id: str, params: dict, **kwargs):  # pylint: disable=W0622
        super().__init__(scope, id, **kwargs)
        # pylint: disable=no-value-for-parameter
        stack = core.Stack.of(self)
        _pipeline = _codepipeline.Pipeline.from_pipeline_arn(
            self, 'pipeline', params["pipeline"])
        _source_account = params.get('source', {}).get('account_id')
        _provider = params.get('source', {}).get('provider')
        _add_trigger_on_changes = (
            _provider == 'codecommit' and _source_account
            and params.get('source', {}).get('trigger_on_changes')
            and not params.get('source', {}).get('poll_for_changes'))

        name = params.get('name')
        account_id = params['source']['account_id']
        repo_name = params['source']['repo_name']

        if _add_trigger_on_changes:
            _event = _events.Rule(
                self,
                f'trigger_{name}',
                description=
                f'Triggers {name} on changes in source CodeCommit repository',
                event_pattern=_events.
                EventPattern(resources=[
                    f'arn:{stack.partition}:codecommit:{ADF_DEPLOYMENT_REGION}:{account_id}:{repo_name}'
                ],
                             source=["aws.codecommit"],
                             detail_type=[
                                 'CodeCommit Repository State Change'
                             ],
                             detail={
                                 "event":
                                 ["referenceCreated", "referenceUpdated"],
                                 "referenceType": ["branch"],
                                 "referenceName": [params['source']['branch']]
                             }))
            _event.add_target(_targets.CodePipeline(pipeline=_pipeline))
        if params.get('topic_arn'):
            # pylint: disable=no-value-for-parameter
            _topic = _sns.Topic.from_topic_arn(self, 'topic_arn',
                                               params["topic_arn"])
            _event = _events.Rule(
                self,
                f'pipeline_state_{name}',
                description=
                f"{name} | Trigger notifications based on pipeline state changes",
                enabled=True,
                event_pattern=_events.EventPattern(
                    detail={
                        "state": ["FAILED", "STARTED", "SUCCEEDED"],
                        "pipeline": [
                            f"{ADF_PIPELINE_PREFIX}{name}",
                        ]
                    },
                    detail_type=[
                        "CodePipeline Pipeline Execution State Change"
                    ],
                    source=["aws.codepipeline"]))
            _event.add_target(
                _targets.SnsTopic(
                    topic=_topic,
                    message=_events.RuleTargetInput.from_text(
                        # Need to parse and get the pipeline: "$.detail.pipeline" state: "$.detail.state"
                        f"The pipeline {_events.EventField.from_path('$.detail.pipeline')} "
                        f"from account {_events.EventField.account} "
                        f"has {_events.EventField.from_path('$.detail.state')} "
                        f"at {_events.EventField.time}.")))
        if params.get('completion_trigger'):
            # There might be other types of completion triggers later, eg lambda..
            for index, pipeline in enumerate(params['completion_trigger'].get(
                    'pipelines', [])):
                _event = _events.Rule(
                    self,
                    f'completion_{pipeline}',
                    description=
                    "Triggers {pipeline} on completion of {params['pipeline']}",
                    enabled=True,
                    event_pattern=_events.EventPattern(
                        detail={
                            "state": ["SUCCEEDED"],
                            "pipeline": [
                                f"{ADF_PIPELINE_PREFIX}{name}",
                            ]
                        },
                        detail_type=[
                            "CodePipeline Pipeline Execution State Change"
                        ],
                        source=["aws.codepipeline"]))
                # pylint: disable=no-value-for-parameter
                _completion_pipeline = _codepipeline.Pipeline.from_pipeline_arn(
                    self, f'pipeline-{index}',
                    f'arn:{stack.partition}:codepipeline:'
                    f'{ADF_DEPLOYMENT_REGION}:{ADF_DEPLOYMENT_ACCOUNT_ID}:'
                    f'{ADF_PIPELINE_PREFIX}{pipeline}')
                _event.add_target(
                    _targets.CodePipeline(pipeline=_completion_pipeline))
        if params.get('schedule'):
            _event = _events.Rule(
                self,
                f'schedule_{params["name"]}',
                description=
                f"Triggers {params['name']} on a schedule of {params['schedule']}",
                enabled=True,
                # pylint: disable=no-value-for-parameter
                schedule=_events.Schedule.expression(params['schedule']))
            _target_pipeline = _targets.CodePipeline(pipeline=_pipeline)
            _event.add_target(_target_pipeline)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = create_notification_email_parameter(self)
        assets_bucket_name = create_assets_bucket_name_parameter(self)
        custom_container = create_custom_container_parameter(self)
        ecr_repo_name = create_ecr_repo_name_parameter(self)
        image_tag = create_image_tag_parameter(self)

        # Resources #
        assets_bucket = s3.Bucket.from_bucket_name(
            self, "AssetsBucket", assets_bucket_name.value_as_string)

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_custom(
            assets_bucket, custom_container)

        # build stage
        build_action_definition, container_uri = build_action(
            self, ecr_repo_name.value_as_string, image_tag.value_as_string,
            source_output)

        pipeline_notification_topic = sns.Topic(
            self,
            "PipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns(
        )
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(
                email_address=notification_email.value_as_string))

        # createing pipeline stages
        source_stage = codepipeline.StageProps(
            stage_name="Source", actions=[source_action_definition])
        build_stage = codepipeline.StageProps(
            stage_name="Build", actions=[build_action_definition])

        image_builder_pipeline = codepipeline.Pipeline(
            self,
            "BYOMPipelineReatimeBuild",
            stages=[source_stage, build_stage],
            cross_account_keys=False,
        )
        image_builder_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text((
                    f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                    f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                )),
            ),
            event_pattern=events.EventPattern(
                detail={"state": ["SUCCEEDED", "FAILED"]}),
        )

        image_builder_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            ))

        # add cfn nag supressions
        pipeline_child_nodes = image_builder_pipeline.node.find_all()
        pipeline_child_nodes[
            1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket(
            )
        pipeline_child_nodes[
            6].node.default_child.cfn_options.metadata = suppress_iam_complex(
            )
        # attaching iam permissions to the pipelines
        pipeline_permissions(image_builder_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(
            self,
            id="Pipelines",
            value=
            (f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
             f"{image_builder_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
             ),
        )
        core.CfnOutput(
            self,
            id="CustomAlgorithmImageURI",
            value=container_uri,
        )
예제 #19
0
    def __init__(self, scope: core.Construct, id: str, group_name: str,
                 minute_duration: int, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        # TODO: Setup alerting of failure to an SNS
        # TODO: Failure is not the same as a student not in a group
        # TODO: Streamline input data so that lambda's only get the info they really need
        # TODO: Comment
        # TODO: Need to separate unexpected errors from regular errors
        # Setting up monitoring

        schedule_stop = lambda_.Function(
            self,
            id="ScheduleStopLambda",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.from_inline(
                open("./resources/schedule-termination.py", 'r').read()),
            handler="index.handler",
            log_retention=logs.RetentionDays.ONE_DAY,
            environment=dict(GROUP_NAME=group_name),
            timeout=core.Duration.seconds(30))
        schedule_stop.add_to_role_policy(
            statement=iam.PolicyStatement(actions=[
                "ec2:Describe*", "iam:ListGroupsForUser", "iam:ListUsers"
            ],
                                          effect=iam.Effect.ALLOW,
                                          resources=["*"]))

        terminate_ec2 = lambda_.Function(
            self,
            id="TerminateEC2",
            runtime=lambda_.Runtime.PYTHON_3_7,
            code=lambda_.Code.from_inline(
                open("./resources/terminate-ec2.py", 'r').read()),
            handler="index.handler",
            log_retention=logs.RetentionDays.ONE_DAY,
            timeout=core.Duration.seconds(30))
        terminate_ec2.add_to_role_policy(
            statement=iam.PolicyStatement(actions=[
                "ec2:DescribeInstance*",
                "ec2:TerminateInstances",
            ],
                                          effect=iam.Effect.ALLOW,
                                          resources=["*"]))

        # The lambda object that will see if we should schedule.
        schedule_stop_task = tasks.LambdaInvoke(
            self,
            id='schedule stop',
            lambda_function=schedule_stop,
            input_path="$.detail.userIdentity",
            result_path="$.Payload",
        )
        # TODO: Need to change this based on the configuration info above
        # Wait state to try and delete
        # wait_x = sfn.Wait(self, 'Wait x minutes', time=sfn.WaitTime.seconds_path("10"))
        wait_x = sfn.Wait(self,
                          id='Wait x minutes',
                          time=sfn.WaitTime.duration(
                              core.Duration.minutes(minute_duration)))

        job_failed = sfn.Fail(self,
                              id="Failed Job",
                              cause="Error in the input",
                              error="Error")
        job_finished = sfn.Succeed(self, id="Job Finished")
        choice = sfn.Choice(self, 'Can I delete')
        choice.when(sfn.Condition.boolean_equals('$.Payload.Payload', False),
                    job_finished)
        choice.otherwise(wait_x)
        terminate_ec2_task = tasks.LambdaInvoke(
            self,
            'terminate',
            lambda_function=terminate_ec2,
            input_path="$.detail.responseElements.instancesSet")
        wait_x.next(terminate_ec2_task).next(job_finished)

        state_definition = schedule_stop_task \
            .next(choice)
        terminate_machine = sfn.StateMachine(self,
                                             id="State Machine",
                                             definition=state_definition)
        cloudwatch.Alarm(self,
                         "EC2ScheduleAlarm",
                         metric=terminate_machine.metric_failed(),
                         threshold=1,
                         evaluation_periods=1)
        # TODO Build Rule that monitors for EC2 creation
        # Any new creation, the EC2 will have to be destroyed.  Including
        # other things?
        create_event = events.Rule(
            self,
            id='detect-ec2-start',
            description="Detects if an EC2 is created",
            enabled=True,
            event_pattern=events.EventPattern(
                detail_type=["AWS API Call via CloudTrail"],
                source=["aws.ec2"],
                detail={
                    "eventName": ["RunInstances"],
                    "eventSource": ["ec2.amazonaws.com"]
                }),
            targets=[targets.SfnStateMachine(terminate_machine)])
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = core.CfnParameter(
            self,
            "NOTIFICATION_EMAIL",
            type="String",
            description="email for pipeline outcome notifications",
            allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            constraint_description=
            "Please enter an email address with correct format ([email protected])",
            min_length=5,
            max_length=320,
        )
        blueprint_bucket_name = core.CfnParameter(
            self,
            "BLUEPRINT_BUCKET",
            type="String",
            description=
            "Bucket name for blueprints of different types of ML Pipelines.",
            min_length=3,
        )
        assets_bucket_name = core.CfnParameter(
            self,
            "ASSETS_BUCKET",
            type="String",
            description="Bucket name for access logs.",
            min_length=3)
        custom_container = core.CfnParameter(
            self,
            "CUSTOM_CONTAINER",
            default="",
            type="String",
            description=
            ("Should point to a zip file containing dockerfile and assets for building a custom model. "
             "If empty it will beusing containers from SageMaker Registry"),
        )
        model_framework = core.CfnParameter(
            self,
            "MODEL_FRAMEWORK",
            default="",
            type="String",
            description=
            "The ML framework which is used for training the model. E.g., xgboost, kmeans, etc.",
        )
        model_framework_version = core.CfnParameter(
            self,
            "MODEL_FRAMEWORK_VERSION",
            default="",
            type="String",
            description=
            "The version of the ML framework which is used for training the model. E.g., 1.1-2",
        )
        model_name = core.CfnParameter(
            self,
            "MODEL_NAME",
            type="String",
            description="An arbitrary name for the model.",
            min_length=1)
        model_artifact_location = core.CfnParameter(
            self,
            "MODEL_ARTIFACT_LOCATION",
            type="String",
            description="Path to model artifact inside assets bucket.",
        )
        inference_instance = core.CfnParameter(
            self,
            "INFERENCE_INSTANCE",
            type="String",
            description=
            "Inference instance that inference requests will be running on. E.g., ml.m5.large",
            allowed_pattern="^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            min_length=7,
        )
        inference_type = core.CfnParameter(
            self,
            "INFERENCE_TYPE",
            type="String",
            allowed_values=["batch", "realtime"],
            default="realtime",
            description="Type of inference. Possible values: batch | realtime",
        )
        batch_inference_data = core.CfnParameter(
            self,
            "BATCH_INFERENCE_DATA",
            type="String",
            default="",
            description=
            ("Location of batch inference data if inference type is set to batch. Otherwise, can be left empty."
             ),
        )

        # Resources #
        assets_bucket = s3.Bucket.from_bucket_name(
            self, "AssetsBucket", assets_bucket_name.value_as_string)
        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(
            self, "BlueprintBucket", blueprint_bucket_name.value_as_string)

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_custom(
            model_artifact_location, assets_bucket, custom_container)

        # build stage
        build_action_definition, container_uri = build_action(
            self, source_output)

        # deploy stage
        sm_layer = sagemaker_layer(self, blueprint_bucket)
        # creating a sagemaker model
        model_lambda_arn, create_model_definition = create_model(
            self,
            blueprint_bucket,
            assets_bucket,
            model_name,
            model_artifact_location,
            custom_container,
            model_framework,
            model_framework_version,
            container_uri,
            sm_layer,
        )
        # creating a batch transform job
        batch_lambda_arn, batch_transform_definition = batch_transform(
            self,
            blueprint_bucket,
            assets_bucket,
            model_name,
            inference_instance,
            batch_inference_data,
            sm_layer,
        )

        # create invoking lambda policy
        invoke_lambdas_policy = iam.PolicyStatement(
            actions=[
                "lambda:InvokeFunction",
            ],
            resources=[model_lambda_arn, batch_lambda_arn],
        )

        pipeline_notification_topic = sns.Topic(
            self,
            "PipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns(
        )
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(
                email_address=notification_email.value_as_string))

        # createing pipeline stages
        source_stage = codepipeline.StageProps(
            stage_name="Source", actions=[source_action_definition])
        build_stage = codepipeline.StageProps(
            stage_name="Build", actions=[build_action_definition])
        deploy_stage_batch = codepipeline.StageProps(
            stage_name="Deploy",
            actions=[create_model_definition, batch_transform_definition],
        )
        batch_build_pipeline = codepipeline.Pipeline(
            self,
            "BYOMPipelineBatchBuild",
            stages=[source_stage, build_stage, deploy_stage_batch],
            cross_account_keys=False,
        )
        batch_build_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text((
                    f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                    f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                )),
            ),
            event_pattern=events.EventPattern(
                detail={"state": ["SUCCEEDED", "FAILED"]}),
        )
        batch_build_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            ))
        # add lambda permissons
        batch_build_pipeline.add_to_role_policy(invoke_lambdas_policy)

        # Enhancement: This is to find CDK object nodes so that unnecessary cfn-nag warnings can be suppressed
        # There is room for improving the method in future versions to find CDK nodes without having to use
        # hardocded index numbers
        pipeline_child_nodes = batch_build_pipeline.node.find_all()
        pipeline_child_nodes[
            1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket(
            )
        pipeline_child_nodes[
            6].node.default_child.cfn_options.metadata = suppress_iam_complex(
            )
        pipeline_child_nodes[
            13].node.default_child.cfn_options.metadata = suppress_list_function_policy(
            )
        pipeline_child_nodes[
            19].node.default_child.cfn_options.metadata = suppress_list_function_policy(
            )
        pipeline_child_nodes[
            25].node.default_child.cfn_options.metadata = suppress_list_function_policy(
            )
        pipeline_child_nodes[
            30].node.default_child.cfn_options.metadata = suppress_list_function_policy(
            )
        # attaching iam permissions to the pipelines
        pipeline_permissions(batch_build_pipeline, assets_bucket)

        core.CfnOutput(
            self,
            id="Pipelines",
            value=
            (f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
             f"{batch_build_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
             ),
        )
        core.CfnOutput(
            self,
            id="BatchTransformOutputLocation",
            value=
            f"https://s3.console.aws.amazon.com/s3/buckets/{assets_bucket.bucket_name}/batch_transform/output",
            description=
            "Output location of the batch transform. Output will be saved under the job name",
        )
예제 #21
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        notification_email = create_notification_email_parameter(self)
        template_zip_name = create_template_zip_name_parameter(self)
        template_file_name = create_template_file_name_parameter(self)
        template_params_file_name = create_stage_params_file_name_parameter(
            self, "TEMPLATE_PARAMS_NAME", "main")
        assets_bucket_name = create_assets_bucket_name_parameter(self)
        stack_name = create_stack_name_parameter(self)

        # Resources #
        assets_bucket = s3.Bucket.from_bucket_name(
            self, "AssetsBucket", assets_bucket_name.value_as_string)

        # create sns topic and subscription
        pipeline_notification_topic = sns.Topic(
            self,
            "SinglePipelineNotification",
        )
        pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns(
        )
        pipeline_notification_topic.add_subscription(
            subscriptions.EmailSubscription(
                email_address=notification_email.value_as_string))

        # Defining pipeline stages
        # source stage
        source_output, source_action_definition = source_action_template(
            template_zip_name, assets_bucket)

        # create cloudformation action
        cloudformation_action = create_cloudformation_action(
            self,
            "deploy_stack",
            stack_name.value_as_string,
            source_output,
            template_file_name.value_as_string,
            template_params_file_name.value_as_string,
        )

        source_stage = codepipeline.StageProps(
            stage_name="Source", actions=[source_action_definition])
        deploy = codepipeline.StageProps(
            stage_name="DeployCloudFormation",
            actions=[cloudformation_action],
        )

        single_account_pipeline = codepipeline.Pipeline(
            self,
            "SingleAccountPipeline",
            stages=[source_stage, deploy],
            cross_account_keys=False,
        )

        # Add CF suppressions to the action
        deployment_policy = cloudformation_action.deployment_role.node.find_all(
        )[2]
        deployment_policy.node.default_child.cfn_options.metadata = suppress_cloudformation_action(
        )

        # add notification to the single-account pipeline
        single_account_pipeline.on_state_change(
            "NotifyUser",
            description="Notify user of the outcome of the pipeline",
            target=targets.SnsTopic(
                pipeline_notification_topic,
                message=events.RuleTargetInput.from_text((
                    f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. "
                    f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}"
                )),
            ),
            event_pattern=events.EventPattern(
                detail={"state": ["SUCCEEDED", "FAILED"]}),
        )
        single_account_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["events:PutEvents"],
                resources=[
                    f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                ],
            ))

        # add cfn supressions
        pipeline_child_nodes = single_account_pipeline.node.find_all()
        pipeline_child_nodes[
            1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket(
            )
        pipeline_child_nodes[
            6].node.default_child.cfn_options.metadata = suppress_iam_complex(
            )
        # attaching iam permissions to the pipelines
        pipeline_permissions(single_account_pipeline, assets_bucket)

        # Outputs #
        core.CfnOutput(
            self,
            id="Pipelines",
            value=
            (f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/"
             f"{single_account_pipeline.pipeline_name}/view?region={core.Aws.REGION}"
             ),
        )