def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ipam_endpoint = self.node.try_get_context("apiEndpoint") + "vpc"

        custom_resource_handler = PythonFunction(self, "CreateLambda",
            entry="./src_custom_resource", 
            runtime=aws_lambda.Runtime.PYTHON_3_6
            )
        custom_resource_handler.add_environment("VENDING_MACHINE_API", ipam_endpoint)

# By default Lambda execution role doesn't allow cross-account API invoke
        custom_resource_handler.role.add_to_policy(aws_iam.PolicyStatement(
            resources=["*"],
            actions=["execute-api:Invoke"]
            ))

# First Custom Resource, get free CIDR from the Vending Machine
        cr_create = core.CustomResource(self, "Resource1", 
            resource_type="Custom::GetSubnet", 
            service_token=custom_resource_handler.function_arn
            )
# Then provision a new VPC with private subnets to the given CIDR range
# NOTE: we are using L1 construct for VPC. L2 construct didn't work with custom resources.
        cidr = cr_create.get_att("vpcCidrBlock").to_string()
        subnet0_cidr = cr_create.get_att("subnet0CidrBlock").to_string()
        subnet1_cidr = cr_create.get_att("subnet1CidrBlock").to_string()
        subnet2_cidr = cr_create.get_att("subnet2CidrBlock").to_string()
        subnet3_cidr = cr_create.get_att("subnet3CidrBlock").to_string()
        
        vpc = aws_ec2.CfnVPC(self, "VPC", cidr_block=cidr)

        aws_ec2.CfnSubnet(self, "Private0", 
            vpc_id=vpc.ref, 
            cidr_block=subnet0_cidr)

        aws_ec2.CfnSubnet(self, "Private1", 
            vpc_id=vpc.ref, 
            cidr_block=subnet1_cidr)

        aws_ec2.CfnSubnet(self, "Private2", 
            vpc_id=vpc.ref, 
            cidr_block=subnet2_cidr)

        aws_ec2.CfnSubnet(self, "Private3", 
            vpc_id=vpc.ref, 
            cidr_block=subnet3_cidr)


# Lastly update the Vpc Id to the Vending Machine
        cr_update = core.CustomResource(self, "Resource2", 
            resource_type="Custom::PutVpcId", 
            properties={
                "vpcId":vpc.ref,
                "cidrBlock":cidr
            },
            service_token=custom_resource_handler.function_arn
            )

        core.CfnOutput(self, "cidrBlock", value=cidr)
    def setup_iot_endpoint_provider(self):
        describe_endpoint_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=["iot:DescribeEndpoint"],
            resources=["*"],
        )

        provider_lambda = aws_lambda.SingletonFunction(
            self,
            "iot_data_ats_endpoint_provider_lambda",
            uuid="iot_data_ats_endpoint_provider_lambda_20200507150213",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset("custom_resources/iot_endpoint"),
            handler="iot_endpoint_provider.on_event",
            description="Returns iot:Data-ATS endpoint for this account",
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[describe_endpoint_policy],
        )

        provider = custom_resources.Provider(self,
                                             "iot_data_ats_endpoint_provider",
                                             on_event_handler=provider_lambda)

        iot_endpoint = core.CustomResource(
            self,
            "iot_data_ats_endpoint",
            resource_type="Custom::IotDataAtsEndpoint",
            service_token=provider.service_token,
        )

        endpoint_address = iot_endpoint.get_att("EndpointAddress").to_string()

        self._parameters_to_save["iot_endpoint_address"] = endpoint_address
Exemple #3
0
    def wrapper(*args, **kwargs):

      hashed = hashlib.md5(bytes(json.dumps(args) + json.dumps(kwargs), 'UTF-8'))
      hashi = hashed.hexdigest()[:9]
      function_name = func.__name__ + hashi

      remote = lamb.Function(self._scope, f'LambdaFunction-{function_name}',
        code=lamb.Code.from_inline(self._create_function_code(func)),
        runtime=lamb.Runtime.PYTHON_3_7,
        handler='index.handler',
        timeout=cdk.Duration.minutes(15),
        memory_size=256,
      )

      provider = cr.Provider(self._scope, f'CustomResourceProvider-{function_name}', on_event_handler=remote)

      # stack = cdk.Stack.of(self._scope)
      resource = cdk.CustomResource(self._scope, f'CustomResource-{function_name}',
        service_token=provider.service_token,
        properties={

          # avoid any CFN type conversion quirks by serializing
          # on output and deserializing on input
          'args': json.dumps(list(args)),
          'kwargs': json.dumps(dict(kwargs))
        })

      return resource.get_att_string('Value')
def create_invoke_lambda_custom_resource(
    scope,  # NOSONAR:S107 this function is designed to take many arguments
    id,
    lambda_function_arn,
    lambda_function_name,
    blueprint_bucket,
    custom_resource_properties,
):
    """
    create_invoke_lambda_custom_resource creates a custom resource to invoke lambda function

    :scope: CDK Construct scope that's needed to create CDK resources
    :id: the logicalId of teh CDK resource
    :lambda_function_arn: arn of the lambda function to be invoked (str)
    :lambda_function_name: name of the lambda function to be invoked (str)
    :blueprint_bucket: CDK object of the blueprint bucket that contains resources for BYOM pipeline
    :custom_resource_properties: user provided properties (dict)

    :return: CDK Custom Resource
    """
    custom_resource_lambda_fn = lambda_.Function(
        scope,
        id,
        code=lambda_.Code.from_bucket(
            blueprint_bucket,
            "blueprints/byom/lambdas/invoke_lambda_custom_resource.zip"),
        handler="index.handler",
        runtime=lambda_.Runtime.PYTHON_3_8,
        timeout=core.Duration.minutes(5),
    )

    custom_resource_lambda_fn.add_to_role_policy(
        iam.PolicyStatement(
            actions=[
                "lambda:InvokeFunction",
            ],
            resources=[lambda_function_arn],
        ))
    custom_resource_lambda_fn.node.default_child.cfn_options.metadata = suppress_lambda_policies(
    )

    invoke_lambda_custom_resource = core.CustomResource(
        scope,
        f"{id}CustomeResource",
        service_token=custom_resource_lambda_fn.function_arn,
        properties={
            "function_name": lambda_function_name,
            "message": f"Invoking lambda function: {lambda_function_name}",
            **custom_resource_properties,
        },
        resource_type="Custom::InvokeLambda",
    )

    return invoke_lambda_custom_resource
    def __init__(self, scope: core.Construct, id: str, bucket_name: str,
                 uuid: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        bucket_storage = _s3.LifecycleRule(transitions=[
            _s3.Transition(storage_class=_s3.StorageClass.INTELLIGENT_TIERING,
                           transition_after=core.Duration.days(1))
        ])

        self.__bucket = _s3.Bucket(self,
                                   'S3Bucket',
                                   bucket_name=bucket_name,
                                   removal_policy=core.RemovalPolicy.DESTROY,
                                   encryption=_s3.BucketEncryption.KMS_MANAGED,
                                   lifecycle_rules=[bucket_storage])

        with open('common/common_cdk/lambda/empty_bucket.py', 'r') as f:
            lambda_source = f.read()

        empty_bucket_lambda = _lambda.SingletonFunction(
            self,
            'EmptyBucketLambda',
            uuid=uuid,
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            timeout=core.Duration.minutes(15))

        empty_bucket_lambda.role.add_to_policy(
            _iam.PolicyStatement(actions=[
                's3:DeleteObject', 's3:DeleteObjectVersion',
                's3:ListBucketVersions', 's3:ListBucket'
            ],
                                 resources=[
                                     self.__bucket.bucket_arn + '/*',
                                     self.__bucket.bucket_arn
                                 ]))

        empty_bucket_lambda_provider = _custom_resources.Provider(
            self,
            'EmptyBucketLambdaProvider',
            on_event_handler=empty_bucket_lambda)

        custom_resource = core.CustomResource(
            self,
            'EmptyBucketCustomResource',
            service_token=empty_bucket_lambda_provider.service_token,
            properties={"bucket_name": self.__bucket.bucket_name})

        custom_resource.node.add_dependency(self.__bucket)
    def create_custom_authorizer_signing_key_generic(
            self, unique_id, description, token_value) -> core.CustomResource:
        """
        Uses a Lambda to create an asymmetric key pair, since neither CFn nor CDK support that as of
        this writing (2020-05-09)
        https://github.com/aws-cloudformation/aws-cloudformation-coverage-roadmap/issues/337

        After creating the key, it signs the token value using the private key, and stores all of
        `token_value`, `token_value`'s signature, and the public key in the stack's parameter store.

        :return: the CustomResource for the signing key
        """
        create_authorizer_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "kms:CreateKey", "kms:GetPublicKey", "kms:ScheduleKeyDeletion",
                "kms:Sign"
            ],
            resources=["*"],
        )
        provider_lambda = aws_lambda.SingletonFunction(
            self,
            f"iot_custom_authorizer_key_provider_lambda_{unique_id}",
            uuid=
            f"iot_custom_authorizer_key_provider_lambda_20200507150213_{unique_id}",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset(
                "custom_resources/iot_custom_authorizer_key_provider"),
            handler="iot_custom_authorizer_key_provider.on_event",
            description=description,
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[create_authorizer_policy],
        )

        provider = custom_resources.Provider(
            self,
            f"iot_custom_authorizer_key_provider_{unique_id}",
            on_event_handler=provider_lambda,
        )

        iot_custom_authorizer_key = core.CustomResource(
            self,
            f"iot_custom_authorizer_key_{unique_id}",
            resource_type="Custom::IoTCustomAuthorizer",
            service_token=provider.service_token,
            properties={"token_value": token_value},
        )

        return iot_custom_authorizer_key
    def __init__(self, scope: cdk.Construct, construct_id: str, stage: str,
                 table: _dynamo.Table, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        initial_data_role = iam.Role(
            self,
            "InitialDataRole",
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole")
            ],
        )

        initial_data_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AWSLambdaInvocation-DynamoDB"))
        initial_data_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "AmazonDynamoDBFullAccess"))

        on_event = _lambda.Function(
            self,
            "DataHandler",
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset("lambda"),
            handler="initial_data.lambda_handler",
            timeout=cdk.Duration.minutes(5),
            environment={
                "TABLE_NAME": table.table_name,
                "STAGE": stage
            },
        )

        table.grant_full_access(on_event)

        initial_data_provider = _resources.Provider(
            self,
            "InitialDataProvider",
            on_event_handler=on_event,
            log_retention=logs.RetentionDays.ONE_DAY,
            role=initial_data_role,
        )

        cdk.CustomResource(
            self,
            "InitialDataResource",
            service_token=initial_data_provider.service_token,
        )
    def __init__(self, scope: core.Construct, id: str, secgroup_name: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        with open('common/common_cdk/lambda/empty_security_group.py', 'r') as f:
            lambda_source = f.read()

        # lambda utils to empty security group before deletion
        empty_secgroup_lambda = _lambda.SingletonFunction(self, 'EmptySecurityGroupLambda',
                                                          uuid="dfs3k8730-4ee1-11e8-9c2d-fdfs65dfsc",
                                                          runtime=_lambda.Runtime.PYTHON_3_7,
                                                          code=_lambda.Code.inline(lambda_source),
                                                          handler='index.handler',
                                                          function_name='ara-auto-empty-secgroup'
                                                          )

        empty_secgroup_lambda_role = _iam.Role(
            self, 'AutoEmptyBucketLambdaRole',
            assumed_by=_iam.ServicePrincipal('lambda.amazonaws.com')
        )

        empty_secgroup_lambda_role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'ec2:RevokeSecurityGroupIngress',
                    'ec2:RevokeSecurityGroupEgress'
                ],
                resources=['arn:aws:ec2::'+core.Aws.ACCOUNT_ID+':security-group/'+secgroup_name]
            )
        )

        empty_secgroup_lambda_provider = _custom_resources.Provider(
            self, 'EmptyBucketLambdaProvider',
            on_event_handler=empty_secgroup_lambda
        )

        core.CustomResource(
            self, 'EmptyBucketCustomResource',
            service_token=empty_secgroup_lambda_provider.service_token,
            properties={
                "secgroup_name": secgroup_name
            }
        )
    def setup_custom_authorizer(self):
        # These values are used in the custom authorizer setup, and exported to Parameter Store
        # for use by integration tests
        custom_authorizer_name = "iot_custom_authorizer"
        self._parameters_to_save[
            "custom_authorizer_name"] = custom_authorizer_name

        # Note: "key" is a bit overloaded here. In the context of the custom authorizer, "key name"
        # refers to the HTTP header field that the custom authorizer looks for a token value in.
        #
        # In the case of the custom authorizer key provider, the "key" is the KMS asymmetric CMK
        # used to sign the token value passed in the `token_key_name` header. In order to keep the
        # terminology consistent between client integ tests that are expecting to pass something for
        # a "key name" field, we'll let the ambiguity stand.
        token_key_name = "iot_custom_authorizer_token"
        self._parameters_to_save[
            "custom_authorizer_token_key_name"] = token_key_name

        token_value = "allow"
        self._parameters_to_save["custom_authorizer_token_value"] = token_value

        iot_custom_authorizer_key_resource = self.create_custom_authorizer_signing_key_generic(
            "1",
            "Manages an asymmetric CMK and token signature for iot custom authorizer.",
            token_value,
        )

        custom_authorizer_token_signature = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_token_signature").to_string()
        self._parameters_to_save[
            "custom_authorizer_token_signature"] = custom_authorizer_token_signature

        authorizer_function_arn = self.setup_custom_authorizer_function(
            "1",
            "custom_resources/iot_custom_authorizer_function",
            "iot_custom_authorizer.handler",
            "Sample custom authorizer that allows or denies based on 'token' value",
            {},
            self.region,
        )

        create_authorizer_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iot:CreateAuthorizer", "iot:UpdateAuthorizer",
                "iot:DeleteAuthorizer"
            ],
            resources=["*"],
        )
        provider_lambda = aws_lambda.SingletonFunction(
            self,
            "iot_custom_authorizer_provider_lambda",
            uuid=self.custom_auth_user_pass_uuid,
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset(
                "custom_resources/iot_custom_authorizer_provider"),
            handler="iot_custom_authorizer_provider.on_event",
            description="Sets up an IoT custom authorizer",
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[create_authorizer_policy],
        )

        provider = custom_resources.Provider(self,
                                             "iot_custom_authorizer_provider",
                                             on_event_handler=provider_lambda)

        public_key = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_public_key").to_string()

        core.CustomResource(
            self,
            "iot_custom_authorizer",
            resource_type="Custom::IoTCustomAuthorizer",
            service_token=provider.service_token,
            properties={
                "authorizer_function_arn": authorizer_function_arn,
                "authorizer_name": custom_authorizer_name,
                "public_key": public_key,
                "token_key_name": token_key_name,
            },
        )
Exemple #10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open("stack/config.yml", 'r') as stream:
            configs = yaml.safe_load(stream)

        ### S3 core
        images_S3_bucket = _s3.Bucket(self, "ICS_IMAGES")

        images_S3_bucket.add_cors_rule(
            allowed_methods=[_s3.HttpMethods.POST],
            allowed_origins=["*"] # add API gateway web resource URL
        )

        ### SQS core
        image_deadletter_queue = _sqs.Queue(self, "ICS_IMAGES_DEADLETTER_QUEUE")
        image_queue = _sqs.Queue(self, "ICS_IMAGES_QUEUE",
            dead_letter_queue={
                "max_receive_count": configs["DeadLetterQueue"]["MaxReceiveCount"],
                "queue": image_deadletter_queue
            })

        ### api gateway core
        api_gateway = RestApi(self, 'ICS_API_GATEWAY', rest_api_name='ImageContentSearchApiGateway')
        api_gateway_resource = api_gateway.root.add_resource(configs["ProjectName"])
        api_gateway_landing_page_resource = api_gateway_resource.add_resource('web')
        api_gateway_get_signedurl_resource = api_gateway_resource.add_resource('signedUrl')
        api_gateway_image_search_resource = api_gateway_resource.add_resource('search')

        ### landing page function
        get_landing_page_function = Function(self, "ICS_GET_LANDING_PAGE",
            function_name="ICS_GET_LANDING_PAGE",
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/landingPage"))

        get_landing_page_integration = LambdaIntegration(
            get_landing_page_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_landing_page_resource.add_method('GET', get_landing_page_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        ### cognito
        required_attribute = _cognito.StandardAttribute(required=True)

        users_pool = _cognito.UserPool(self, "ICS_USERS_POOL",
            auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up
            standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up
            self_sign_up_enabled=configs["Cognito"]["SelfSignUp"])

        user_pool_app_client = _cognito.CfnUserPoolClient(self, "ICS_USERS_POOL_APP_CLIENT", 
            supported_identity_providers=["COGNITO"],
            allowed_o_auth_flows=["implicit"],
            allowed_o_auth_scopes=configs["Cognito"]["AllowedOAuthScopes"],
            user_pool_id=users_pool.user_pool_id,
            callback_ur_ls=[api_gateway_landing_page_resource.url],
            allowed_o_auth_flows_user_pool_client=True,
            explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"])

        user_pool_domain = _cognito.UserPoolDomain(self, "ICS_USERS_POOL_DOMAIN", 
            user_pool=users_pool, 
            cognito_domain=_cognito.CognitoDomainOptions(domain_prefix=configs["Cognito"]["DomainPrefix"]))

        ### get signed URL function
        get_signedurl_function = Function(self, "ICS_GET_SIGNED_URL",
            function_name="ICS_GET_SIGNED_URL",
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_SIGNEDURL_EXPIRY_SECONDS": configs["Functions"]["DefaultSignedUrlExpirySeconds"]
            },
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/getSignedUrl"))

        get_signedurl_integration = LambdaIntegration(
            get_signedurl_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_get_signedurl_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            rest_api_id=api_gateway_get_signedurl_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            type="COGNITO_USER_POOLS",
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_get_signedurl_resource.add_method('GET', get_signedurl_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_get_signedurl_authorizer.ref)

        images_S3_bucket.grant_put(get_signedurl_function, objects_key_pattern="new/*")

        ### image massage function
        image_massage_function = Function(self, "ICS_IMAGE_MASSAGE",
            function_name="ICS_IMAGE_MASSAGE",
            timeout=core.Duration.seconds(6),
            runtime=Runtime.PYTHON_3_7,
            environment={"ICS_IMAGE_MASSAGE": image_queue.queue_name},
            handler="main.handler",
            code=Code.asset("./src/imageMassage"))

        images_S3_bucket.grant_write(image_massage_function, "processed/*")
        images_S3_bucket.grant_delete(image_massage_function, "new/*")
        images_S3_bucket.grant_read(image_massage_function, "new/*")
        
        new_image_added_notification = _s3notification.LambdaDestination(image_massage_function)

        images_S3_bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, 
            new_image_added_notification, 
            _s3.NotificationKeyFilter(prefix="new/")
            )

        image_queue.grant_send_messages(image_massage_function)

        ### image analyzer function
        image_analyzer_function = Function(self, "ICS_IMAGE_ANALYSIS",
            function_name="ICS_IMAGE_ANALYSIS",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(10),
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "REGION": core.Aws.REGION,
                },
            handler="main.handler",
            code=Code.asset("./src/imageAnalysis")) 

        image_analyzer_function.add_event_source(_lambda_event_source.SqsEventSource(queue=image_queue, batch_size=10))
        image_queue.grant_consume_messages(image_massage_function)

        lambda_rekognition_access = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"],
            resources=["*"]                    
        )

        image_analyzer_function.add_to_role_policy(lambda_rekognition_access)
        images_S3_bucket.grant_read(image_analyzer_function, "processed/*")

        ### API gateway finalizing
        self.add_cors_options(api_gateway_get_signedurl_resource)
        self.add_cors_options(api_gateway_landing_page_resource)
        self.add_cors_options(api_gateway_image_search_resource)

        ### database 
        database_secret = _secrets_manager.Secret(self, "ICS_DATABASE_SECRET",
            secret_name="rds-db-credentials/image-content-search-rds-secret",
            generate_secret_string=_secrets_manager.SecretStringGenerator(
                generate_string_key='password',
                secret_string_template='{"username": "******"}',
                exclude_punctuation=True,
                exclude_characters='/@\" \\\'',
                require_each_included_type=True
            )
        )

        database = _rds.CfnDBCluster(self, "ICS_DATABASE",
            engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
            engine_mode="serverless",
            database_name=configs["Database"]["Name"],
            enable_http_endpoint=True,
            deletion_protection=configs["Database"]["DeletionProtection"],
            master_username=database_secret.secret_value_from_json("username").to_string(),
            master_user_password=database_secret.secret_value_from_json("password").to_string(),
            scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty(
                auto_pause=configs["Database"]["Scaling"]["AutoPause"],
                min_capacity=configs["Database"]["Scaling"]["Min"],
                max_capacity=configs["Database"]["Scaling"]["Max"],
                seconds_until_auto_pause=configs["Database"]["Scaling"]["SecondsToAutoPause"]
            ),
        )

        database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref)
   
        secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"ICS_DATABASE_SECRET_TARGET",
            target_type="AWS::RDS::DBCluster",
            target_id=database.ref,
            secret_id=database_secret.secret_arn
        )

        secret_target.node.add_dependency(database)

        ### database function
        image_data_function_role = _iam.Role(self, "ICS_IMAGE_DATA_FUNCTION_ROLE",
            role_name="ICS_IMAGE_DATA_FUNCTION_ROLE",
            assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess")
            ]
        )
        
        image_data_function = Function(self, "ICS_IMAGE_DATA",
            function_name="ICS_IMAGE_DATA",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(5),
            role=image_data_function_role,
            environment={
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "CLUSTER_ARN": database_cluster_arn,
                "CREDENTIALS_ARN": database_secret.secret_arn,
                "DB_NAME": database.database_name,
                "REGION": core.Aws.REGION
                },
            handler="main.handler",
            code=Code.asset("./src/imageData")
        ) 

        image_search_integration = LambdaIntegration(
            image_data_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_image_search_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            rest_api_id=api_gateway_image_search_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            type="COGNITO_USER_POOLS", 
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_image_search_resource.add_method('POST', image_search_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_image_search_authorizer.ref)


        lambda_access_search = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["translate:TranslateText"],
            resources=["*"]            
        ) 

        image_data_function.add_to_role_policy(lambda_access_search)

        ### custom resource
        lambda_provider = Provider(self, 'ICS_IMAGE_DATA_PROVIDER', 
            on_event_handler=image_data_function
        )

        core.CustomResource(self, 'ICS_IMAGE_DATA_RESOURCE', 
            service_token=lambda_provider.service_token,
            pascal_case_properties=False,
            resource_type="Custom::SchemaCreation",
            properties={
                "source": "Cloudformation"
            }
        )

        ### event bridge
        event_bus = _events.EventBus(self, "ICS_IMAGE_CONTENT_BUS")

        event_rule = _events.Rule(self, "ICS_IMAGE_CONTENT_RULE",
            rule_name="ICS_IMAGE_CONTENT_RULE",
            description="The event from image analyzer to store the data",
            event_bus=event_bus,
            event_pattern=_events.EventPattern(resources=[image_analyzer_function.function_arn]),
        )

        event_rule.add_target(_event_targets.LambdaFunction(image_data_function))

        event_bus.grant_put_events(image_analyzer_function)
        image_analyzer_function.add_environment("EVENT_BUS", event_bus.event_bus_name)

        ### outputs
        core.CfnOutput(self, 'CognitoHostedUILogin',
            value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(user_pool_domain.domain_name, core.Aws.REGION, user_pool_app_client.ref, '+'.join(user_pool_app_client.allowed_o_auth_scopes), api_gateway_landing_page_resource.url),
            description='The Cognito Hosted UI Login Page'
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get stack parameters: email and repo address
        notification_email = core.CfnParameter(
            self,
            "Email Address",
            type="String",
            description=
            "Specify an email to receive notifications about pipeline outcomes.",
            allowed_pattern='^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$',
            min_length=5,
            max_length=320,
            constraint_description=
            "Please enter an email address with correct format ([email protected])"
        )
        git_address = core.CfnParameter(
            self,
            "CodeCommit Repo Address",
            type="String",
            description=
            "AWS CodeCommit repository clone URL to connect to the framework.",
            allowed_pattern=
            '^(((https:\/\/|ssh:\/\/)(git\-codecommit)\.[a-zA-Z0-9_.+-]+(amazonaws\.com\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+$)|)',
            min_length=0,
            max_length=320,
            constraint_description=
            "CodeCommit address must follow the pattern: ssh or https://git-codecommit.REGION.amazonaws.com/version/repos/REPONAME"
        )

        # Conditions
        git_address_provided = core.CfnCondition(
            self,
            "GitAddressProvided",
            expression=core.Fn.condition_not(
                core.Fn.condition_equals(git_address, "")),
        )

        # Constants
        pipeline_stack_name = "MLOps-pipeline"

        # CDK Resources setup
        access_logs_bucket = s3.Bucket(
            self,
            "accessLogs",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)
        access_logs_bucket.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W35",
                        "reason": "This is the access bucket."
                    },
                    {
                        "id": "W51",
                        "reason":
                        "This S3 bucket does not need a bucket policy.",
                    },
                ]
            }
        }
        source_bucket = s3.Bucket.from_bucket_name(self, "BucketByName",
                                                   "%%BUCKET_NAME%%")

        blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4())
        blueprint_repository_bucket = s3.Bucket(
            self,
            blueprints_bucket_name,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix=blueprints_bucket_name,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)
        blueprint_repository_bucket.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W51",
                    "reason":
                    "This S3 bucket does not need a bucket policy. All access to this bucket is restricted by IAM (CDK grant_read method)",
                }]
            }
        }

        # Custom resource to copy source bucket content to blueprints bucket
        custom_resource_lambda_fn = lambda_.Function(
            self,
            "CustomResourceLambda",
            code=lambda_.Code.from_asset("lambdas/custom_resource"),
            handler="index.on_event",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                "source_bucket": "https://%%BUCKET_NAME%%-" + core.Aws.REGION +
                ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%",
                "destination_bucket": blueprint_repository_bucket.bucket_name,
                "LOG_LEVEL": "INFO",
            },
            timeout=core.Duration.seconds(60),
        )
        custom_resource_lambda_fn.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W58",
                    "reason":
                    "The lambda functions role already has permissions to write cloudwatch logs",
                }]
            }
        }
        blueprint_repository_bucket.grant_write(custom_resource_lambda_fn)
        custom_resource = core.CustomResource(
            self,
            "CustomResourceCopyAssets",
            service_token=custom_resource_lambda_fn.function_arn,
        )
        custom_resource.node.add_dependency(blueprint_repository_bucket)
        ### IAM policies setup ###
        cloudformation_role = iam.Role(
            self,
            "mlopscloudformationrole",
            assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"),
        )
        # Cloudformation policy setup
        orchestrator_policy = iam.Policy(
            self,
            "lambdaOrchestratorPolicy",
            statements=[
                iam.PolicyStatement(
                    actions=[
                        "cloudformation:CreateStack",
                        "cloudformation:DeleteStack",
                        "cloudformation:UpdateStack",
                        "cloudformation:ListStackResources",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "iam:CreateRole",
                        "iam:DeleteRole",
                        "iam:DeleteRolePolicy",
                        "iam:GetRole",
                        "iam:GetRolePolicy",
                        "iam:PassRole",
                        "iam:PutRolePolicy",
                        "iam:AttachRolePolicy",
                        "iam:DetachRolePolicy",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*"
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "ecr:CreateRepository",
                        "ecr:DeleteRepository",
                        "ecr:DescribeRepositories",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:repository/awsmlopsmodels*"
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codebuild:CreateProject",
                        "codebuild:DeleteProject",
                        "codebuild:BatchGetProjects",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:project/ContainerFactory*",
                        f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*",
                        f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:report-group/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "lambda:CreateFunction",
                        "lambda:DeleteFunction",
                        "lambda:InvokeFunction",
                        "lambda:PublishLayerVersion",
                        "lambda:DeleteLayerVersion",
                        "lambda:GetLayerVersion",
                        "lambda:GetFunctionConfiguration",
                        "lambda:GetFunction",
                        "lambda:AddPermission",
                        "lambda:RemovePermission",
                        "lambda:UpdateFunctionConfiguration",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*",
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=["s3:GetObject"],
                    resources=[
                        blueprint_repository_bucket.bucket_arn,
                        blueprint_repository_bucket.arn_for_objects("*"),
                        f"arn:{core.Aws.PARTITION}:s3:::pipeline-assets-*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codepipeline:CreatePipeline",
                        "codepipeline:DeletePipeline",
                        "codepipeline:GetPipeline",
                        "codepipeline:GetPipelineState",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*"
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "apigateway:POST",
                        "apigateway:PATCH",
                        "apigateway:DELETE",
                        "apigateway:GET",
                        "apigateway:PUT",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "logs:CreateLogGroup",
                        "logs:DescribeLogGroups",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "s3:CreateBucket",
                        "s3:PutEncryptionConfiguration",
                        "s3:PutBucketVersioning",
                        "s3:PutBucketPublicAccessBlock",
                        "s3:PutBucketLogging",
                    ],
                    resources=["arn:" + core.Aws.PARTITION + ":s3:::*"],
                ),
                iam.PolicyStatement(
                    actions=[
                        "sns:CreateTopic",
                        "sns:DeleteTopic",
                        "sns:Subscribe",
                        "sns:Unsubscribe",
                        "sns:GetTopicAttributes",
                        "sns:SetTopicAttributes",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*-PipelineNotification*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "events:PutRule",
                        "events:DescribeRule",
                        "events:PutTargets",
                        "events:RemoveTargets",
                        "events:DeleteRule",
                        "events:PutEvents",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*",
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                    ],
                ),
            ],
        )
        orchestrator_policy.attach_to_role(cloudformation_role)

        # Lambda function IAM setup
        lambda_passrole_policy = iam.PolicyStatement(
            actions=["iam:passrole"], resources=[cloudformation_role.role_arn])
        # API Gateway and lambda setup to enable provisioning pipelines through API calls
        provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda(
            self,
            "PipelineOrchestration",
            lambda_function_props={
                "runtime": lambda_.Runtime.PYTHON_3_8,
                "handler": "index.handler",
                "code":
                lambda_.Code.from_asset("lambdas/pipeline_orchestration"),
            },
            api_gateway_props={
                "defaultMethodOptions": {
                    "authorizationType": apigw.AuthorizationType.IAM,
                },
                "restApiName": f"{core.Aws.STACK_NAME}-orchestrator",
                "proxy": False
            },
        )
        provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource(
            'provisionpipeline')
        provision_resource.add_method('POST')
        status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource(
            'pipelinestatus')
        status_resource.add_method('POST')
        blueprint_repository_bucket.grant_read(
            provisioner_apigw_lambda.lambda_function)
        provisioner_apigw_lambda.lambda_function.add_to_role_policy(
            lambda_passrole_policy)
        orchestrator_policy.attach_to_role(
            provisioner_apigw_lambda.lambda_function.role)
        provisioner_apigw_lambda.lambda_function.add_to_role_policy(
            iam.PolicyStatement(actions=["xray:PutTraceSegments"],
                                resources=["*"]))
        lambda_node = provisioner_apigw_lambda.lambda_function.node.default_child
        lambda_node.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W12",
                    "reason":
                    "The xray permissions PutTraceSegments is not able to be bound to resources.",
                }]
            }
        }
        # Environment variables setup
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET_URL",
            value=str(blueprint_repository_bucket.bucket_regional_domain_name),
        )
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET",
            value=str(blueprint_repository_bucket.bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="PIPELINE_STACK_NAME", value=pipeline_stack_name)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="NOTIFICATION_EMAIL", value=notification_email.value_as_string)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="LOG_LEVEL", value="DEBUG")
        cfn_policy_for_lambda = orchestrator_policy.node.default_child
        cfn_policy_for_lambda.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W76",
                    "reason":
                    "A complex IAM policy is required for this resource.",
                }]
            }
        }

        ### Codepipeline with Git source definitions ###
        source_output = codepipeline.Artifact()
        # processing git_address to retrieve repo name
        repo_name_split = core.Fn.split("/", git_address.value_as_string)
        repo_name = core.Fn.select(5, repo_name_split)
        # getting codecommit repo cdk object using 'from_repository_name'
        repo = codecommit.Repository.from_repository_name(
            self, "AWSMLOpsFrameworkRepository", repo_name)
        codebuild_project = codebuild.PipelineProject(
            self,
            "Take config file",
            build_spec=codebuild.BuildSpec.from_object({
                "version": "0.2",
                "phases": {
                    "build": {
                        "commands": [
                            "ls -a",
                            "aws lambda invoke --function-name " +
                            provisioner_apigw_lambda.lambda_function.
                            function_name +
                            " --payload fileb://mlops-config.json response.json"
                            + " --invocation-type RequestResponse",
                        ]
                    }
                },
            }),
        )
        # Defining a Codepipeline project with CodeCommit as source
        codecommit_pipeline = codepipeline.Pipeline(
            self,
            "MLOpsCodeCommitPipeline",
            stages=[
                codepipeline.StageProps(
                    stage_name="Source",
                    actions=[
                        codepipeline_actions.CodeCommitSourceAction(
                            action_name="CodeCommit",
                            repository=repo,
                            output=source_output,
                        )
                    ],
                ),
                codepipeline.StageProps(
                    stage_name="TakeConfig",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            action_name="provision_pipeline",
                            input=source_output,
                            outputs=[],
                            project=codebuild_project,
                        )
                    ],
                ),
            ],
            cross_account_keys=False,
        )
        codecommit_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["lambda:InvokeFunction"],
                resources=[
                    provisioner_apigw_lambda.lambda_function.function_arn
                ],
            ))
        codebuild_project.add_to_role_policy(
            iam.PolicyStatement(
                actions=["lambda:InvokeFunction"],
                resources=[
                    provisioner_apigw_lambda.lambda_function.function_arn
                ],
            ))
        pipeline_child_nodes = codecommit_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id":
                        "W35",
                        "reason":
                        "This is a managed bucket generated by CDK for codepipeline.",
                    },
                    {
                        "id":
                        "W51",
                        "reason":
                        "This is a managed bucket generated by CDK for codepipeline.",
                    },
                ]
            }
        }

        ###custom resource for operational metrics###
        metricsMapping = core.CfnMapping(
            self,
            'AnonymousData',
            mapping={'SendAnonymousData': {
                'Data': 'Yes'
            }})
        metrics_condition = core.CfnCondition(
            self,
            'AnonymousDatatoAWS',
            expression=core.Fn.condition_equals(
                metricsMapping.find_in_map('SendAnonymousData', 'Data'),
                'Yes'))

        helper_function = lambda_.Function(
            self,
            "SolutionHelper",
            code=lambda_.Code.from_asset("lambdas/solution_helper"),
            handler="lambda_function.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(60),
        )

        createIdFunction = core.CustomResource(
            self,
            'CreateUniqueID',
            service_token=helper_function.function_arn,
            properties={'Resource': 'UUID'},
            resource_type='Custom::CreateUUID')

        sendDataFunction = core.CustomResource(
            self,
            'SendAnonymousData',
            service_token=helper_function.function_arn,
            properties={
                'Resource': 'AnonymousMetric',
                'UUID': createIdFunction.get_att_string('UUID'),
                'gitSelected': git_address.value_as_string,
                'Region': core.Aws.REGION,
                'SolutionId': 'SO0136',
                'Version': '%%VERSION%%',
            },
            resource_type='Custom::AnonymousData')

        core.Aspects.of(helper_function).add(
            ConditionalResources(metrics_condition))
        core.Aspects.of(createIdFunction).add(
            ConditionalResources(metrics_condition))
        core.Aspects.of(sendDataFunction).add(
            ConditionalResources(metrics_condition))
        helper_function.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W58",
                    "reason":
                    "The lambda functions role already has permissions to write cloudwatch logs",
                }]
            }
        }

        # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source
        core.Aspects.of(repo).add(ConditionalResources(git_address_provided))
        core.Aspects.of(codecommit_pipeline).add(
            ConditionalResources(git_address_provided))
        core.Aspects.of(codebuild_project).add(
            ConditionalResources(git_address_provided))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Add the VPC connection arn as an input parameter
        vpc_conn_arn = core.CfnParameter(
            self,
            "VpcConnectionArn",
            type="String",
            description="The Arn of the VPC connection to use for Redshift.")

        quicksight_group_arn = core.Fn.import_value('ara-QuickSight-Group-Arn')
        secret_arn = core.Fn.import_value('ara-QuickSight-Redshift-Secret-Arn')

        # Create the custom resource policy with the necessary permissions
        iam_policy = cr.AwsCustomResourcePolicy.from_statements([
            iam.PolicyStatement(actions=cfg.CDK_POLICY_ACTIONS,
                                resources=['*']),
        ])

        redshift_datasource_lambda = lambda_.SingletonFunction(
            self,
            id='RedshiftDatasourceLambda',
            uuid='b438edeb-f5dc-486a-ac2d-bc0918b975b8',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset('dataviz_redshift/lambda'),
            handler='redshift_datasource.handler',
            function_name='ara_redshift_datasource')

        redshift_datasource_lambda.role.add_to_policy(
            iam.PolicyStatement(actions=['secretsmanager:GetSecretValue'],
                                resources=[secret_arn]))

        redshift_datasource_lambda.role.add_to_policy(
            iam.PolicyStatement(actions=[
                'quicksight:CreateDataSource', 'quicksight:DeleteDataSource'
            ],
                                resources=['*']))

        lambda_provider = cr.Provider(
            self,
            id='LambdaProvider',
            on_event_handler=redshift_datasource_lambda)

        responseLamb = core.CustomResource(
            self,
            'RedshiftDatasourceResource',
            service_token=lambda_provider.service_token,
            properties={
                'Secret_arn': secret_arn,
                'Datasource_name': cfg.REDSHIFT_DATASOURCE_NAME,
                'Aws_account_id': self.account,
                'Quicksight_group_arn': quicksight_group_arn,
                'Datasource_actions': cfg.DATASOURCE_ACTIONS,
                'Vpc_conn_arn': vpc_conn_arn.value_as_string
            })

        redshift_datasource_arn = responseLamb.get_att_string('datasource_arn')

        core.CfnOutput(self,
                       "RedshiftDataSourceArn",
                       description="Redshift Data Source Arn",
                       value=redshift_datasource_arn)

        # Create a Redshift dataset with custom SQL
        redshift_dataset_arn = QuickSightRedshiftDataset(
            self,
            'RedshiftDataset',
            iam_policy=iam_policy,
            quicksight_group_arn=quicksight_group_arn,
            redshift_datasource_arn=redshift_datasource_arn,
            redshift_dataset_name=cfg.REDSHIFT_DATASET_NAME,
            dataset_actions=cfg.DATASET_ACTIONS,
            redshift_custom_sql=cfg.REDSHIFT_CUSTOM_SQL,
            redshift_columns=cfg.REDSHIFT_COLUMNS,
            redshift_data_transformations=cfg.REDSHIFT_DATA_TRANSFORMATIONS
        ).redshift_dataset_arn

        QuickSightRedshiftAnalysis(
            self,
            'RedshiftAnalysis',
            iam_policy=iam_policy,
            quicksight_group_arn=quicksight_group_arn,
            redshift_dataset_arn=redshift_dataset_arn,
            redshift_analysis_name=cfg.REDSHIFT_ANALYSIS_NAME,
            redshift_analysis_template_alias=cfg.
            REDSHIFT_ANALYSIS_TEMPLATE_ALIAS,
            analysis_actions=cfg.ANALYSIS_ACTIONS)
Exemple #13
0
    def __init__(self, scope: cdk.Construct, id: str,
                 cognito_user_pool: cognito.UserPool, s3_bucket_name: str,
                 create_configuration_lambda_role_arn: str,
                 redis: ec.CfnCacheCluster, domain_name: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        config_yaml = yaml.load(open("config.yaml"), Loader=yaml.FullLoader)
        spoke_accounts = config_yaml.get("spoke_accounts", [])

        cognito_user_pool_client = cognito.UserPoolClient(
            self,
            "UserPoolClient",
            user_pool=cognito_user_pool,
            generate_secret=True,
            supported_identity_providers=[
                cognito.UserPoolClientIdentityProvider.COGNITO
            ],
            prevent_user_existence_errors=True,
            o_auth=cognito.OAuthSettings(
                callback_urls=[
                    "https://" + domain_name + "/auth",
                    "https://" + domain_name + "/oauth2/idpresponse",
                ],
                logout_urls=["https://" + domain_name + "/logout"],
                flows=cognito.OAuthFlows(authorization_code_grant=True,
                                         implicit_code_grant=True),
                scopes=[cognito.OAuthScope.OPENID, cognito.OAuthScope.EMAIL],
            ),
            auth_flows=cognito.AuthFlow(user_password=True, user_srp=True),
        )

        describe_cognito_user_pool_client = cr.AwsCustomResource(
            self,
            "UserPoolClientIDResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_create=cr.AwsSdkCall(
                service="CognitoIdentityServiceProvider",
                action="describeUserPoolClient",
                parameters={
                    "UserPoolId": cognito_user_pool.user_pool_id,
                    "ClientId": cognito_user_pool_client.user_pool_client_id,
                },
                physical_resource_id=cr.PhysicalResourceId.of(
                    cognito_user_pool_client.user_pool_client_id),
            ),
            install_latest_aws_sdk=True,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        cognito_user_pool_client_secret = (
            describe_cognito_user_pool_client.get_response_field(
                "UserPoolClient.ClientSecret"))

        imported_create_configuration_lambda_role = iam.Role.from_role_arn(
            self,
            "ImportedCreateConfigurationFileLambdaRole",
            role_arn=create_configuration_lambda_role_arn,
        )

        jwt_secret = config_yaml["jwt_secret"]

        config_secret_dict = {
            "oidc_secrets": {
                "client_id": cognito_user_pool_client.user_pool_client_id,
                "secret": cognito_user_pool_client_secret,
                "client_scope": ["email", "openid"],
            },
            "jwt_secret": jwt_secret,
        }

        config_secret_yaml = yaml.dump(
            config_secret_dict,
            explicit_start=True,
            default_flow_style=False,
        )

        config_secret = cr.AwsCustomResource(
            self,
            "ConfigSecretResource",
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(
                resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE),
            on_update=cr.AwsSdkCall(
                service="SecretsManager",
                action="updateSecret",
                parameters={
                    "SecretId": CONFIG_SECRET_NAME,
                    "SecretString": config_secret_yaml,
                },
                physical_resource_id=cr.PhysicalResourceId.from_response(
                    "Name"),
            ),
            on_create=cr.AwsSdkCall(
                service="SecretsManager",
                action="createSecret",
                parameters={
                    "Name": CONFIG_SECRET_NAME,
                    "Description":
                    "Sensitive configuration parameters for ConsoleMe",
                    "SecretString": config_secret_yaml,
                },
                physical_resource_id=cr.PhysicalResourceId.from_response(
                    "Name"),
            ),
            on_delete=cr.AwsSdkCall(
                service="SecretsManager",
                action="deleteSecret",
                parameters={
                    "SecretId": CONFIG_SECRET_NAME,
                    "ForceDeleteWithoutRecovery": True,
                },
            ),
            install_latest_aws_sdk=True,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        create_configuration_lambda = lambda_.Function(
            self,
            "CreateConfigurationFileLambda",
            code=lambda_.Code.from_asset("resources/create_config_lambda"),
            handler="index.handler",
            timeout=cdk.Duration.seconds(30),
            layers=[create_dependencies_layer(self, "create_config_lambda")],
            runtime=lambda_.Runtime.PYTHON_3_8,
            role=imported_create_configuration_lambda_role,
            environment={
                "DEPLOYMENT_BUCKET":
                s3_bucket_name,
                "OIDC_METADATA_URL":
                "https://cognito-idp." + self.region + ".amazonaws.com/" +
                cognito_user_pool.user_pool_id +
                "/.well-known/openid-configuration",
                "REDIS_HOST":
                redis.attr_redis_endpoint_address,
                "SES_IDENTITY_ARN":
                "arn:aws:ses:" + self.region + ":" + self.account +
                ":identity/" + domain_name,
                "SUPPORT_CHAT_URL":
                "https://discord.gg/nQVpNGGkYu",
                "APPLICATION_ADMIN":
                "consoleme_admin",
                "ACCOUNT_NUMBER":
                self.account,
                "ISSUER":
                domain_name,
                "SPOKE_ACCOUNTS":
                ",".join(spoke_accounts),
                "CONFIG_SECRET_NAME":
                CONFIG_SECRET_NAME,
            },
        )

        create_configuration_resource_provider = cr.Provider(
            self,
            "CreateConfigurationFileProvider",
            on_event_handler=create_configuration_lambda,
            log_retention=logs.RetentionDays.ONE_WEEK,
        )

        create_configuration_lambda_resource = cdk.CustomResource(
            self,
            "CreateConfigurationFile",
            service_token=create_configuration_resource_provider.service_token,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            properties={"UUID": str(uuid4())},
        )

        create_configuration_lambda_resource.node.add_dependency(config_secret)
Exemple #14
0
	def __init__(self, scope: core.Construct, id: str, cluster: eks.Cluster, **kwargs) -> None:
		super().__init__(scope, id, **kwargs)

		maps= []
		self.roles=[]

		ecr_policy = iam.PolicyStatement(
			actions=[
				"ecr:DescribeImages",
				"ecr:ListImages",
				"ecr:BatchDeleteImage"
			], 
			effect=iam.Effect.ALLOW, 
			resources=[
				"arn:aws:ecr:%s:%s:repository/%s" % (core.Stack.of(self).region, core.Stack.of(self).account, namespace) for namespace in self.node.try_get_context("kubernetes")['namespaces']
			]
		)

		function = lbd.SingletonFunction(
			self,
			"ECRDeleteImagesFunction",
			uuid="19411b0e-0e80-4ad4-a316-3235940775e4",
			code=lbd.Code.from_asset(
				"custom_resources/kubernetes/"
			),
			handler="config.handler",
			runtime=lbd.Runtime.PYTHON_3_7,
			function_name="kubernetesConfig",
			initial_policy=[ecr_policy],
			log_retention=logs.RetentionDays.ONE_DAY,
			timeout=core.Duration.seconds(30)
		)

		provider = cr.Provider(
			self, "ECRDeleteImagesFunctionProvider",
    	on_event_handler=function,
    	log_retention=logs.RetentionDays.ONE_DAY
		)


		repositores = []
		for namespace in self.node.try_get_context("kubernetes")['namespaces']: 
			manifest = cluster.add_manifest(
				"eksConfigNamespace-%s" % namespace,
				{
					"apiVersion": "v1",
					"kind": "Namespace",
					"metadata": {
						"name": namespace
					}
				}
			)

			sa = cluster.add_service_account(
				"service-account-%s" % namespace,
				name="statement-demo",
				namespace=namespace
			)
			sa.node.add_dependency(manifest)
			self.roles.append(sa.role)

			repository = ecr.Repository(
				self, ("repository-%s" % namespace),
				removal_policy=core.RemovalPolicy.DESTROY,
				repository_name=namespace,
				lifecycle_rules=[ecr.LifecycleRule(max_image_count=1)]
			)

			repositores.append(repository.repository_arn)

			maps.append({
				"apiVersion": "v1",
				"kind": "ConfigMap",
				"metadata": {
					"name": "application.properties",
					"namespace": namespace
				},
				"data": {
					"application-aws.properties":  Path("../%s/src/main/resources/application-aws.properties" % namespace).read_text()
				}
			})

			core.CustomResource(
				self, "ECRDeleteImagesFunction-%s" % namespace, 
				service_token=provider.service_token,
				properties={
					"repository": namespace
				}
			).node.add_dependency(repository)

		eks.KubernetesManifest(
			self, 
			"eksConfigMaps", 
			cluster=cluster, 
			manifest=maps
		)

		iam.Policy(
			self, "saPolicy", 
			force=True, 
			policy_name="EKSSAPolicy", 
			roles=self.roles, 
			statements=[
				iam.PolicyStatement(
					actions=["cloudwatch:PutMetricData"], 
					conditions={
						"StringEquals": {
							"cloudwatch:namespace": "statement12"
						},
					},
					resources=["*"]
				)
			]
		)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get stack parameters: email and repo address
        notification_email = core.CfnParameter(
            self,
            "Email Address",
            type="String",
            description="Specify an email to receive notifications about pipeline outcomes.",
            allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            min_length=5,
            max_length=320,
            constraint_description="Please enter an email address with correct format ([email protected])",
        )
        git_address = core.CfnParameter(
            self,
            "CodeCommit Repo Address",
            type="String",
            description="AWS CodeCommit repository clone URL to connect to the framework.",
            allowed_pattern=(
                "^(((https:\/\/|ssh:\/\/)(git\-codecommit)\.[a-zA-Z0-9_.+-]+(amazonaws\.com\/)[a-zA-Z0-9-.]"
                "+(\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+$)|^$)"
            ),
            min_length=0,
            max_length=320,
            constraint_description=(
                "CodeCommit address must follow the pattern: ssh or "
                "https://git-codecommit.REGION.amazonaws.com/version/repos/REPONAME"
            ),
        )

        # Get the optional S3 assets bucket to use
        existing_bucket = core.CfnParameter(
            self,
            "ExistingS3Bucket",
            type="String",
            description="Name of existing S3 bucket to be used for ML assests. S3 Bucket must be in the same region as the deployed stack, and has versioning enabled. If not provided, a new S3 bucket will be created.",
            allowed_pattern="((?=^.{3,63}$)(?!^(\d+\.)+\d+$)(^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])$)|^$)",
            min_length=0,
            max_length=63,
        )

        # Conditions
        git_address_provided = core.CfnCondition(
            self,
            "GitAddressProvided",
            expression=core.Fn.condition_not(core.Fn.condition_equals(git_address, "")),
        )

        # client provided an existing S3 bucket name, to be used for assets
        existing_bucket_provided = core.CfnCondition(
            self,
            "S3BucketProvided",
            expression=core.Fn.condition_not(core.Fn.condition_equals(existing_bucket.value_as_string.strip(), "")),
        )

        # S3 bucket needs to be created for assets
        create_new_bucket = core.CfnCondition(
            self,
            "CreateS3Bucket",
            expression=core.Fn.condition_equals(existing_bucket.value_as_string.strip(), ""),
        )
        # Constants
        pipeline_stack_name = "MLOps-pipeline"

        # CDK Resources setup
        access_logs_bucket = s3.Bucket(
            self,
            "accessLogs",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        # Apply secure transfer bucket policy
        apply_secure_bucket_policy(access_logs_bucket)

        # This is a logging bucket.
        access_logs_bucket.node.default_child.cfn_options.metadata = suppress_s3_access_policy()

        # Import user provide S3 bucket, if any. s3.Bucket.from_bucket_arn is used instead of s3.Bucket.from_bucket_name to allow cross account bucket.
        client_existing_bucket = s3.Bucket.from_bucket_arn(
            self,
            "ClientExistingBucket",
            f"arn:aws:s3:::{existing_bucket.value_as_string.strip()}",
        )

        # Create the resource if existing_bucket_provided condition is True
        core.Aspects.of(client_existing_bucket).add(ConditionalResources(existing_bucket_provided))

        # Creating assets bucket so that users can upload ML Models to it.
        assets_bucket = s3.Bucket(
            self,
            "pipeline-assets-" + str(uuid.uuid4()),
            versioned=True,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix="assets_bucket_access_logs",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        # Apply secure transport bucket policy
        apply_secure_bucket_policy(assets_bucket)

        # Create the resource if create_new_bucket condition is True
        core.Aspects.of(assets_bucket).add(ConditionalResources(create_new_bucket))

        # Get assets S3 bucket's name/arn, based on the condition
        assets_s3_bucket_name = core.Fn.condition_if(
            existing_bucket_provided.logical_id,
            client_existing_bucket.bucket_name,
            assets_bucket.bucket_name,
        ).to_string()

        blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4())
        blueprint_repository_bucket = s3.Bucket(
            self,
            blueprints_bucket_name,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix=blueprints_bucket_name,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )
        # Apply secure transport bucket policy
        apply_secure_bucket_policy(blueprint_repository_bucket)

        # Custom resource to copy source bucket content to blueprints bucket
        custom_resource_lambda_fn = lambda_.Function(
            self,
            "CustomResourceLambda",
            code=lambda_.Code.from_asset("lambdas/custom_resource"),
            handler="index.on_event",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                "source_bucket": "https://%%BUCKET_NAME%%-"
                + core.Aws.REGION
                + ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%",
                "destination_bucket": blueprint_repository_bucket.bucket_name,
                "LOG_LEVEL": "INFO",
            },
            timeout=core.Duration.seconds(60),
        )
        custom_resource_lambda_fn.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W58",
                        "reason": "The lambda functions role already has permissions to write cloudwatch logs",
                    }
                ]
            }
        }
        blueprint_repository_bucket.grant_write(custom_resource_lambda_fn)
        custom_resource = core.CustomResource(
            self,
            "CustomResourceCopyAssets",
            service_token=custom_resource_lambda_fn.function_arn,
        )
        custom_resource.node.add_dependency(blueprint_repository_bucket)
        # IAM policies setup ###
        cloudformation_role = iam.Role(
            self,
            "mlopscloudformationrole",
            assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"),
        )
        # Cloudformation policy setup
        orchestrator_policy = iam.Policy(
            self,
            "lambdaOrchestratorPolicy",
            statements=[
                iam.PolicyStatement(
                    actions=[
                        "cloudformation:CreateStack",
                        "cloudformation:DeleteStack",
                        "cloudformation:UpdateStack",
                        "cloudformation:ListStackResources",
                    ],
                    resources=[
                        (
                            f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*"
                        ),
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "iam:CreateRole",
                        "iam:DeleteRole",
                        "iam:DeleteRolePolicy",
                        "iam:GetRole",
                        "iam:GetRolePolicy",
                        "iam:PassRole",
                        "iam:PutRolePolicy",
                        "iam:AttachRolePolicy",
                        "iam:DetachRolePolicy",
                    ],
                    resources=[f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*"],
                ),
                iam.PolicyStatement(
                    actions=[
                        "ecr:CreateRepository",
                        "ecr:DeleteRepository",
                        "ecr:DescribeRepositories",
                    ],
                    resources=[
                        (
                            f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:repository/awsmlopsmodels*"
                        )
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codebuild:CreateProject",
                        "codebuild:DeleteProject",
                        "codebuild:BatchGetProjects",
                    ],
                    resources=[
                        (
                            f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:project/ContainerFactory*"
                        ),
                        (
                            f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*"
                        ),
                        (
                            f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:report-group/*"
                        ),
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "lambda:CreateFunction",
                        "lambda:DeleteFunction",
                        "lambda:InvokeFunction",
                        "lambda:PublishLayerVersion",
                        "lambda:DeleteLayerVersion",
                        "lambda:GetLayerVersion",
                        "lambda:GetFunctionConfiguration",
                        "lambda:GetFunction",
                        "lambda:AddPermission",
                        "lambda:RemovePermission",
                        "lambda:UpdateFunctionConfiguration",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*",
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=["s3:GetObject"],
                    resources=[
                        blueprint_repository_bucket.bucket_arn,
                        blueprint_repository_bucket.arn_for_objects("*"),
                        f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codepipeline:CreatePipeline",
                        "codepipeline:DeletePipeline",
                        "codepipeline:GetPipeline",
                        "codepipeline:GetPipelineState",
                    ],
                    resources=[
                        (
                            f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*"
                        )
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "apigateway:POST",
                        "apigateway:PATCH",
                        "apigateway:DELETE",
                        "apigateway:GET",
                        "apigateway:PUT",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "logs:CreateLogGroup",
                        "logs:DescribeLogGroups",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "s3:CreateBucket",
                        "s3:PutEncryptionConfiguration",
                        "s3:PutBucketVersioning",
                        "s3:PutBucketPublicAccessBlock",
                        "s3:PutBucketLogging",
                    ],
                    resources=["arn:" + core.Aws.PARTITION + ":s3:::*"],
                ),
                iam.PolicyStatement(
                    actions=[
                        "sns:CreateTopic",
                        "sns:DeleteTopic",
                        "sns:Subscribe",
                        "sns:Unsubscribe",
                        "sns:GetTopicAttributes",
                        "sns:SetTopicAttributes",
                    ],
                    resources=[
                        (
                            f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:"
                            f"{pipeline_stack_name}*-PipelineNotification*"
                        ),
                        (
                            f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:"
                            f"{pipeline_stack_name}*-ModelMonitorPipelineNotification*"
                        ),
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "events:PutRule",
                        "events:DescribeRule",
                        "events:PutTargets",
                        "events:RemoveTargets",
                        "events:DeleteRule",
                        "events:PutEvents",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*",
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                    ],
                ),
            ],
        )
        orchestrator_policy.attach_to_role(cloudformation_role)

        # Lambda function IAM setup
        lambda_passrole_policy = iam.PolicyStatement(actions=["iam:passrole"], resources=[cloudformation_role.role_arn])
        # API Gateway and lambda setup to enable provisioning pipelines through API calls
        provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda(
            self,
            "PipelineOrchestration",
            lambda_function_props={
                "runtime": lambda_.Runtime.PYTHON_3_8,
                "handler": "index.handler",
                "code": lambda_.Code.from_asset("lambdas/pipeline_orchestration"),
            },
            api_gateway_props={
                "defaultMethodOptions": {
                    "authorizationType": apigw.AuthorizationType.IAM,
                },
                "restApiName": f"{core.Aws.STACK_NAME}-orchestrator",
                "proxy": False,
                "dataTraceEnabled": True,
            },
        )

        provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource("provisionpipeline")
        provision_resource.add_method("POST")
        status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource("pipelinestatus")
        status_resource.add_method("POST")
        blueprint_repository_bucket.grant_read(provisioner_apigw_lambda.lambda_function)
        provisioner_apigw_lambda.lambda_function.add_to_role_policy(lambda_passrole_policy)
        orchestrator_policy.attach_to_role(provisioner_apigw_lambda.lambda_function.role)
        provisioner_apigw_lambda.lambda_function.add_to_role_policy(
            iam.PolicyStatement(actions=["xray:PutTraceSegments"], resources=["*"])
        )
        lambda_node = provisioner_apigw_lambda.lambda_function.node.default_child
        lambda_node.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W12",
                        "reason": "The xray permissions PutTraceSegments is not able to be bound to resources.",
                    }
                ]
            }
        }
        # Environment variables setup
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET_URL",
            value=str(blueprint_repository_bucket.bucket_regional_domain_name),
        )
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET", value=str(blueprint_repository_bucket.bucket_name)
        )
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name)
        )
        provisioner_apigw_lambda.lambda_function.add_environment(key="ASSETS_BUCKET", value=str(assets_s3_bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn)
        )
        provisioner_apigw_lambda.lambda_function.add_environment(key="PIPELINE_STACK_NAME", value=pipeline_stack_name)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="NOTIFICATION_EMAIL", value=notification_email.value_as_string
        )
        provisioner_apigw_lambda.lambda_function.add_environment(key="LOG_LEVEL", value="DEBUG")
        cfn_policy_for_lambda = orchestrator_policy.node.default_child
        cfn_policy_for_lambda.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W76",
                        "reason": "A complex IAM policy is required for this resource.",
                    }
                ]
            }
        }

        # Codepipeline with Git source definitions ###
        source_output = codepipeline.Artifact()
        # processing git_address to retrieve repo name
        repo_name_split = core.Fn.split("/", git_address.value_as_string)
        repo_name = core.Fn.select(5, repo_name_split)
        # getting codecommit repo cdk object using 'from_repository_name'
        repo = codecommit.Repository.from_repository_name(self, "AWSMLOpsFrameworkRepository", repo_name)
        codebuild_project = codebuild.PipelineProject(
            self,
            "Take config file",
            build_spec=codebuild.BuildSpec.from_object(
                {
                    "version": "0.2",
                    "phases": {
                        "build": {
                            "commands": [
                                "ls -a",
                                "aws lambda invoke --function-name "
                                + provisioner_apigw_lambda.lambda_function.function_name
                                + " --payload fileb://mlops-config.json response.json"
                                + " --invocation-type RequestResponse",
                            ]
                        }
                    },
                }
            ),
        )
        # Defining a Codepipeline project with CodeCommit as source
        codecommit_pipeline = codepipeline.Pipeline(
            self,
            "MLOpsCodeCommitPipeline",
            stages=[
                codepipeline.StageProps(
                    stage_name="Source",
                    actions=[
                        codepipeline_actions.CodeCommitSourceAction(
                            action_name="CodeCommit",
                            repository=repo,
                            branch="main",
                            output=source_output,
                        )
                    ],
                ),
                codepipeline.StageProps(
                    stage_name="TakeConfig",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            action_name="provision_pipeline",
                            input=source_output,
                            outputs=[],
                            project=codebuild_project,
                        )
                    ],
                ),
            ],
            cross_account_keys=False,
        )
        codecommit_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["lambda:InvokeFunction"],
                resources=[provisioner_apigw_lambda.lambda_function.function_arn],
            )
        )
        codebuild_project.add_to_role_policy(
            iam.PolicyStatement(
                actions=["lambda:InvokeFunction"],
                resources=[provisioner_apigw_lambda.lambda_function.function_arn],
            )
        )
        pipeline_child_nodes = codecommit_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W35",
                        "reason": "This is a managed bucket generated by CDK for codepipeline.",
                    },
                    {
                        "id": "W51",
                        "reason": "This is a managed bucket generated by CDK for codepipeline.",
                    },
                ]
            }
        }

        # custom resource for operational metrics###
        metricsMapping = core.CfnMapping(self, "AnonymousData", mapping={"SendAnonymousData": {"Data": "Yes"}})
        metrics_condition = core.CfnCondition(
            self,
            "AnonymousDatatoAWS",
            expression=core.Fn.condition_equals(metricsMapping.find_in_map("SendAnonymousData", "Data"), "Yes"),
        )

        helper_function = lambda_.Function(
            self,
            "SolutionHelper",
            code=lambda_.Code.from_asset("lambdas/solution_helper"),
            handler="lambda_function.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(60),
        )

        createIdFunction = core.CustomResource(
            self,
            "CreateUniqueID",
            service_token=helper_function.function_arn,
            properties={"Resource": "UUID"},
            resource_type="Custom::CreateUUID",
        )

        sendDataFunction = core.CustomResource(
            self,
            "SendAnonymousData",
            service_token=helper_function.function_arn,
            properties={
                "Resource": "AnonymousMetric",
                "UUID": createIdFunction.get_att_string("UUID"),
                "gitSelected": git_address.value_as_string,
                "Region": core.Aws.REGION,
                "SolutionId": "SO0136",
                "Version": "%%VERSION%%",
            },
            resource_type="Custom::AnonymousData",
        )

        core.Aspects.of(helper_function).add(ConditionalResources(metrics_condition))
        core.Aspects.of(createIdFunction).add(ConditionalResources(metrics_condition))
        core.Aspects.of(sendDataFunction).add(ConditionalResources(metrics_condition))
        helper_function.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W58",
                        "reason": "The lambda functions role already has permissions to write cloudwatch logs",
                    }
                ]
            }
        }

        # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source
        core.Aspects.of(repo).add(ConditionalResources(git_address_provided))
        core.Aspects.of(codecommit_pipeline).add(ConditionalResources(git_address_provided))
        core.Aspects.of(codebuild_project).add(ConditionalResources(git_address_provided))

        # Create Template Interface
        self.template_options.metadata = {
            "AWS::CloudFormation::Interface": {
                "ParameterGroups": [
                    {
                        "Label": {"default": "MLOps Framework Settings"},
                        "Parameters": [
                            notification_email.logical_id,
                            git_address.logical_id,
                            existing_bucket.logical_id,
                        ],
                    }
                ],
                "ParameterLabels": {
                    f"{notification_email.logical_id}": {"default": "Notification Email (Required)"},
                    f"{git_address.logical_id}": {"default": "CodeCommit Repo URL Address (Optional)"},
                    f"{existing_bucket.logical_id}": {"default": "Name of an Existing S3 Bucket (Optional)"},
                },
            }
        }
        # Outputs #
        core.CfnOutput(
            self,
            id="BlueprintsBucket",
            value=f"https://s3.console.aws.amazon.com/s3/buckets/{blueprint_repository_bucket.bucket_name}",
            description="S3 Bucket to upload MLOps Framework Blueprints",
        )
        core.CfnOutput(
            self,
            id="AssetsBucket",
            value=f"https://s3.console.aws.amazon.com/s3/buckets/{assets_s3_bucket_name}",
            description="S3 Bucket to upload model artifact",
        )
Exemple #16
0
    def __init__(self, scope: core.Construct, config: dict, id: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create the securty group that will allow to connect to this instance
        # I am lazy and create only 1 SG that allows TCP 5432 from itself
        # database clients (lambda functions) will have TCP 5432 authorized for themselves too,
        # which is not necessary but harmless
        self.db_security_group = ec2.SecurityGroup(self,
                                                   "Database Security Group",
                                                   vpc=config['vpc'])
        self.db_security_group.add_ingress_rule(self.db_security_group,
                                                ec2.Port.tcp(5432))

        self.cluster = rds.DatabaseCluster(
            self,
            config['rds']['name'],
            engine=rds.DatabaseClusterEngine.aurora_postgres(
                version=rds.AuroraPostgresEngineVersion.VER_11_7),
            default_database_name=config['rds']['databaseName'],
            master_user=rds.Login(username=config['rds']['masterUsername']),
            instance_props=rds.InstanceProps(
                vpc=config['vpc'], security_groups=[self.db_security_group]))

        # Add Secrets Manager Password rotation
        self.cluster.add_rotation_single_user()

        # aurora serverless is not yet support by CDK, https://github.com/aws/aws-cdk/issues/929
        # escape hatch https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html#cfn_layer_raw
        # cfn_aurora_cluster = cluster.node.default_child
        # cfn_aurora_cluster.add_override("Properties.EngineMode", "serverless")
        # cfn_aurora_cluster.add_override("Properties.EnableHttpEndpoint",True) # Enable Data API
        # cfn_aurora_cluster.add_override("Properties.ScalingConfiguration", {
        #     'AutoPause': True,
        #     'MaxCapacity': 4,
        #     'MinCapacity': 1,
        #     'SecondsUntilAutoPause': 600
        # })
        # cluster.node.try_remove_child('Instance1') # Remove 'Server' instance that isn't required for serverless Aurora

        # create a custom resource to initialize the data schema
        function = _lambda.Function(
            self,
            config['custom resource lambda']['name'],
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset('./custom_resources'),
            handler='app.on_event',
            vpc=config['vpc'],
            environment={
                'DB_SECRET_ARN': self.get_secret_arn(),
                'PYTHON_LOGLEVEL': 'DEBUG'
            },
            security_groups=[self.db_security_group])
        # add permission to access the secret
        function.add_to_role_policy(
            iam.PolicyStatement(resources=[self.get_secret_arn()],
                                actions=["secretsmanager:GetSecretValue"]))

        custom_resource_provider = cr.Provider(self,
                                               'Custom Resource Provider',
                                               on_event_handler=function)
        custom_resource = core.CustomResource(
            self,
            'Custom Resource',
            service_token=custom_resource_provider.service_token)

        # Tell CFN to wait for the database to be ready before ot create the custom resource
        custom_resource.node.add_dependency(self.cluster)
    def __init__(
        self,
        scope: cdk.Construct,
        construct_id: str,
        # domain: SMSDomainStack,
        **kwargs
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        user_name = cdk.CfnParameter(
            self,
            "SMSUserName",
            type="String",
            description="User Name",
            default="StudioUser",
        )

        git_repository = cdk.CfnParameter(
            self,
            "GitRepository",
            type="String",
            description="Git Repository",
            default="https://github.com/acere/SagemakerStudioCDK.git",
        )

        # Read the StudioDomainId exported by the StudioDomain stack
        StudioDomainId = cdk.Fn.import_value("StudioDomainId")
        role_arn = cdk.Fn.import_value("SageMakerStudioUserRole")

        user_settings = sagemaker.CfnUserProfile.UserSettingsProperty(
            execution_role=role_arn
        )
        user = sagemaker.CfnUserProfile(
            self,
            "user",
            domain_id=StudioDomainId,
            # single_sign_on_user_identifier="UserName",
            # single_sign_on_user_value="SSOUserName",
            user_profile_name=user_name.value_as_string,
            user_settings=user_settings,
        )
        user_id = user.user_profile_name

        cdk.CfnOutput(
            self,
            "UserID",
            value=user_id,
            description="SageMaker Studio User ID",
            export_name="StudioUserId",
        )

        provider_service_token = cdk.Fn.import_value("StudioUserProviderToken")
        cr_users_init = cdk.CustomResource(
            self,
            "PopulateUserLambda",
            # service_token=provider.service_token,
            service_token=provider_service_token,
            properties={
                "StudioUserName": user.user_profile_name,
                "DomainID": StudioDomainId,
                "GitRepository": git_repository,
            },
        )
        cr_users_init.node.add_dependency(user)

        self.JupyterApp = sagemaker.CfnApp(
            self,
            "DefaultStudioApp",
            app_name="default",
            app_type="JupyterServer",
            domain_id=StudioDomainId,
            user_profile_name=user_id,
        )
        self.JupyterApp.add_depends_on(user)
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 *,
                 multi_account=False,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get stack parameters:
        notification_email = create_notification_email_parameter(self)
        git_address = create_git_address_parameter(self)
        # Get the optional S3 assets bucket to use
        existing_bucket = create_existing_bucket_parameter(self)
        # Get the optional S3 assets bucket to use
        existing_ecr_repo = create_existing_ecr_repo_parameter(self)
        # create only if multi_account template
        if multi_account:
            # create development parameters
            account_type = "development"
            dev_account_id = create_account_id_parameter(
                self, "DEV_ACCOUNT_ID", account_type)
            dev_org_id = create_org_id_parameter(self, "DEV_ORG_ID",
                                                 account_type)
            # create staging parameters
            account_type = "staging"
            staging_account_id = create_account_id_parameter(
                self, "STAGING_ACCOUNT_ID", account_type)
            staging_org_id = create_org_id_parameter(self, "STAGING_ORG_ID",
                                                     account_type)
            # create production parameters
            account_type = "production"
            prod_account_id = create_account_id_parameter(
                self, "PROD_ACCOUNT_ID", account_type)
            prod_org_id = create_org_id_parameter(self, "PROD_ORG_ID",
                                                  account_type)

        # Conditions
        git_address_provided = create_git_address_provided_condition(
            self, git_address)

        # client provided an existing S3 bucket name, to be used for assets
        existing_bucket_provided = create_existing_bucket_provided_condition(
            self, existing_bucket)

        # client provided an existing Amazon ECR name
        existing_ecr_provided = create_existing_ecr_provided_condition(
            self, existing_ecr_repo)

        # S3 bucket needs to be created for assets
        create_new_bucket = create_new_bucket_condition(self, existing_bucket)

        # Amazon ECR repo needs too be created for custom Algorithms
        create_new_ecr_repo = create_new_ecr_repo_condition(
            self, existing_ecr_repo)

        # Constants
        pipeline_stack_name = "mlops-pipeline"

        # CDK Resources setup
        access_logs_bucket = s3.Bucket(
            self,
            "accessLogs",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        # Apply secure transfer bucket policy
        apply_secure_bucket_policy(access_logs_bucket)

        # This is a logging bucket.
        access_logs_bucket.node.default_child.cfn_options.metadata = suppress_s3_access_policy(
        )

        # Import user provide S3 bucket, if any. s3.Bucket.from_bucket_arn is used instead of
        # s3.Bucket.from_bucket_name to allow cross account bucket.
        client_existing_bucket = s3.Bucket.from_bucket_arn(
            self,
            "ClientExistingBucket",
            f"arn:aws:s3:::{existing_bucket.value_as_string.strip()}",
        )

        # Create the resource if existing_bucket_provided condition is True
        core.Aspects.of(client_existing_bucket).add(
            ConditionalResources(existing_bucket_provided))

        # Import user provided Amazon ECR repository

        client_erc_repo = ecr.Repository.from_repository_name(
            self, "ClientExistingECRReo", existing_ecr_repo.value_as_string)
        # Create the resource if existing_ecr_provided condition is True
        core.Aspects.of(client_erc_repo).add(
            ConditionalResources(existing_ecr_provided))

        # Creating assets bucket so that users can upload ML Models to it.
        assets_bucket = s3.Bucket(
            self,
            "pipeline-assets-" + str(uuid.uuid4()),
            versioned=True,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix="assets_bucket_access_logs",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        # Apply secure transport bucket policy
        apply_secure_bucket_policy(assets_bucket)
        s3_actions = ["s3:GetObject", "s3:ListBucket"]
        # if multi account
        if multi_account:
            # add permissions for other accounts to access the assets bucket

            assets_bucket.add_to_resource_policy(
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=s3_actions,
                    principals=[
                        iam.AccountPrincipal(dev_account_id.value_as_string),
                        iam.AccountPrincipal(
                            staging_account_id.value_as_string),
                        iam.AccountPrincipal(prod_account_id.value_as_string),
                    ],
                    resources=[
                        assets_bucket.bucket_arn,
                        f"{assets_bucket.bucket_arn}/*"
                    ],
                ))

        # Create the resource if create_new_bucket condition is True
        core.Aspects.of(assets_bucket).add(
            ConditionalResources(create_new_bucket))

        # Get assets S3 bucket's name/arn, based on the condition
        assets_s3_bucket_name = core.Fn.condition_if(
            existing_bucket_provided.logical_id,
            client_existing_bucket.bucket_name,
            assets_bucket.bucket_name,
        ).to_string()

        # Creating Amazon ECR repository
        ecr_repo = ecr.Repository(self, "ECRRepo", image_scan_on_push=True)

        # if multi account
        if multi_account:
            # add permissios to other account to pull images
            ecr_repo.add_to_resource_policy(
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        "ecr:DescribeImages",
                        "ecr:DescribeRepositories",
                        "ecr:GetDownloadUrlForLayer",
                        "ecr:BatchGetImage",
                        "ecr:BatchCheckLayerAvailability",
                    ],
                    principals=[
                        iam.AccountPrincipal(dev_account_id.value_as_string),
                        iam.AccountPrincipal(
                            staging_account_id.value_as_string),
                        iam.AccountPrincipal(prod_account_id.value_as_string),
                    ],
                ))
        # Create the resource if create_new_ecr condition is True
        core.Aspects.of(ecr_repo).add(
            ConditionalResources(create_new_ecr_repo))

        # Get ECR repo's name based on the condition
        ecr_repo_name = core.Fn.condition_if(
            existing_ecr_provided.logical_id,
            client_erc_repo.repository_name,
            ecr_repo.repository_name,
        ).to_string()

        # Get ECR repo's arn based on the condition
        ecr_repo_arn = core.Fn.condition_if(
            existing_ecr_provided.logical_id,
            client_erc_repo.repository_arn,
            ecr_repo.repository_arn,
        ).to_string()

        blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4())
        blueprint_repository_bucket = s3.Bucket(
            self,
            blueprints_bucket_name,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix=blueprints_bucket_name,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )
        # Apply secure transport bucket policy
        apply_secure_bucket_policy(blueprint_repository_bucket)

        # if multi account
        if multi_account:
            # add permissions for other accounts to access the blueprint bucket
            blueprint_repository_bucket.add_to_resource_policy(
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=s3_actions,
                    principals=[
                        iam.AccountPrincipal(dev_account_id.value_as_string),
                        iam.AccountPrincipal(
                            staging_account_id.value_as_string),
                        iam.AccountPrincipal(prod_account_id.value_as_string),
                    ],
                    resources=[
                        blueprint_repository_bucket.bucket_arn,
                        f"{blueprint_repository_bucket.bucket_arn}/*"
                    ],
                ))

        # Custom resource to copy source bucket content to blueprints bucket
        custom_resource_lambda_fn = lambda_.Function(
            self,
            "CustomResourceLambda",
            code=lambda_.Code.from_asset("lambdas/custom_resource"),
            handler="index.on_event",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                "source_bucket": "https://%%BUCKET_NAME%%-" + core.Aws.REGION +
                ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%",
                "destination_bucket": blueprint_repository_bucket.bucket_name,
                "LOG_LEVEL": "INFO",
            },
            timeout=core.Duration.seconds(60),
        )

        custom_resource_lambda_fn.node.default_child.cfn_options.metadata = suppress_lambda_policies(
        )
        blueprint_repository_bucket.grant_write(custom_resource_lambda_fn)
        custom_resource = core.CustomResource(
            self,
            "CustomResourceCopyAssets",
            service_token=custom_resource_lambda_fn.function_arn,
        )
        custom_resource.node.add_dependency(blueprint_repository_bucket)
        # IAM policies setup ###
        cloudformation_role = iam.Role(
            self,
            "mlopscloudformationrole",
            assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"),
        )
        lambda_invoke_action = "lambda:InvokeFunction"
        # Cloudformation policy setup
        orchestrator_policy = iam.Policy(
            self,
            "lambdaOrchestratorPolicy",
            statements=[
                iam.PolicyStatement(
                    actions=[
                        "cloudformation:CreateStack",
                        "cloudformation:DeleteStack",
                        "cloudformation:UpdateStack",
                        "cloudformation:ListStackResources",
                    ],
                    resources=[
                        (f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:"
                         f"{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*"
                         ),
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "iam:CreateRole",
                        "iam:DeleteRole",
                        "iam:DeleteRolePolicy",
                        "iam:GetRole",
                        "iam:GetRolePolicy",
                        "iam:PassRole",
                        "iam:PutRolePolicy",
                        "iam:AttachRolePolicy",
                        "iam:DetachRolePolicy",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*"
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "ecr:CreateRepository",
                        "ecr:DescribeRepositories",
                    ],
                    resources=[
                        (f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:"
                         f"{core.Aws.ACCOUNT_ID}:repository/{ecr_repo_name}")
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codebuild:CreateProject",
                        "codebuild:DeleteProject",
                        "codebuild:BatchGetProjects",
                    ],
                    resources=[
                        (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                         f"{core.Aws.ACCOUNT_ID}:project/ContainerFactory*"),
                        (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                         f"{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*"),
                        (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                         f"{core.Aws.ACCOUNT_ID}:report-group/*"),
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "lambda:CreateFunction",
                        "lambda:DeleteFunction",
                        lambda_invoke_action,
                        "lambda:PublishLayerVersion",
                        "lambda:DeleteLayerVersion",
                        "lambda:GetLayerVersion",
                        "lambda:GetFunctionConfiguration",
                        "lambda:GetFunction",
                        "lambda:AddPermission",
                        "lambda:RemovePermission",
                        "lambda:UpdateFunctionConfiguration",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*",
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=s3_actions,
                    resources=[
                        blueprint_repository_bucket.bucket_arn,
                        blueprint_repository_bucket.arn_for_objects("*"),
                        f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codepipeline:CreatePipeline",
                        "codepipeline:UpdatePipeline",
                        "codepipeline:DeletePipeline",
                        "codepipeline:GetPipeline",
                        "codepipeline:GetPipelineState",
                    ],
                    resources=
                    [(f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:"
                      f"{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*")],
                ),
                iam.PolicyStatement(
                    actions=[
                        "apigateway:POST",
                        "apigateway:PATCH",
                        "apigateway:DELETE",
                        "apigateway:GET",
                        "apigateway:PUT",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "logs:CreateLogGroup",
                        "logs:DescribeLogGroups",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "s3:CreateBucket",
                        "s3:PutEncryptionConfiguration",
                        "s3:PutBucketVersioning",
                        "s3:PutBucketPublicAccessBlock",
                        "s3:PutBucketLogging",
                    ],
                    resources=[f"arn:{core.Aws.PARTITION}:s3:::*"],
                ),
                iam.PolicyStatement(
                    actions=[
                        "s3:PutObject",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*"
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "sns:CreateTopic",
                        "sns:DeleteTopic",
                        "sns:Subscribe",
                        "sns:Unsubscribe",
                        "sns:GetTopicAttributes",
                        "sns:SetTopicAttributes",
                    ],
                    resources=
                    [(f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:"
                      f"{pipeline_stack_name}*-*PipelineNotification*")],
                ),
                iam.PolicyStatement(
                    actions=[
                        "events:PutRule",
                        "events:DescribeRule",
                        "events:PutTargets",
                        "events:RemoveTargets",
                        "events:DeleteRule",
                        "events:PutEvents",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*",
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                    ],
                ),
            ],
        )
        orchestrator_policy.attach_to_role(cloudformation_role)

        # Lambda function IAM setup
        lambda_passrole_policy = iam.PolicyStatement(
            actions=["iam:passrole"], resources=[cloudformation_role.role_arn])
        # create sagemaker layer
        sm_layer = sagemaker_layer(self, blueprint_repository_bucket)
        # make sure the sagemaker code is uploaded first to the blueprints bucket
        sm_layer.node.add_dependency(custom_resource)
        # API Gateway and lambda setup to enable provisioning pipelines through API calls
        provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda(
            self,
            "PipelineOrchestration",
            lambda_function_props={
                "runtime": lambda_.Runtime.PYTHON_3_8,
                "handler": "index.handler",
                "code":
                lambda_.Code.from_asset("lambdas/pipeline_orchestration"),
                "layers": [sm_layer],
                "timeout": core.Duration.minutes(10),
            },
            api_gateway_props={
                "defaultMethodOptions": {
                    "authorizationType": apigw.AuthorizationType.IAM,
                },
                "restApiName": f"{core.Aws.STACK_NAME}-orchestrator",
                "proxy": False,
                "dataTraceEnabled": True,
            },
        )

        # add lambda supressions
        provisioner_apigw_lambda.lambda_function.node.default_child.cfn_options.metadata = suppress_lambda_policies(
        )

        provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource(
            "provisionpipeline")
        provision_resource.add_method("POST")
        status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource(
            "pipelinestatus")
        status_resource.add_method("POST")
        blueprint_repository_bucket.grant_read(
            provisioner_apigw_lambda.lambda_function)
        provisioner_apigw_lambda.lambda_function.add_to_role_policy(
            lambda_passrole_policy)
        orchestrator_policy.attach_to_role(
            provisioner_apigw_lambda.lambda_function.role)

        # Environment variables setup
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET_URL",
            value=str(blueprint_repository_bucket.bucket_regional_domain_name),
        )
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET",
            value=str(blueprint_repository_bucket.bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ASSETS_BUCKET", value=str(assets_s3_bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="PIPELINE_STACK_NAME", value=pipeline_stack_name)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="NOTIFICATION_EMAIL", value=notification_email.value_as_string)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="REGION", value=core.Aws.REGION)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="IS_MULTI_ACCOUNT", value=str(multi_account))

        # if multi account
        if multi_account:
            provisioner_apigw_lambda.lambda_function.add_environment(
                key="DEV_ACCOUNT_ID", value=dev_account_id.value_as_string)
            provisioner_apigw_lambda.lambda_function.add_environment(
                key="DEV_ORG_ID", value=dev_org_id.value_as_string)

            provisioner_apigw_lambda.lambda_function.add_environment(
                key="STAGING_ACCOUNT_ID",
                value=staging_account_id.value_as_string)
            provisioner_apigw_lambda.lambda_function.add_environment(
                key="STAGING_ORG_ID", value=staging_org_id.value_as_string)

            provisioner_apigw_lambda.lambda_function.add_environment(
                key="PROD_ACCOUNT_ID", value=prod_account_id.value_as_string)
            provisioner_apigw_lambda.lambda_function.add_environment(
                key="PROD_ORG_ID", value=prod_org_id.value_as_string)

        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ECR_REPO_NAME", value=ecr_repo_name)

        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ECR_REPO_ARN", value=ecr_repo_arn)

        provisioner_apigw_lambda.lambda_function.add_environment(
            key="LOG_LEVEL", value="DEBUG")
        cfn_policy_for_lambda = orchestrator_policy.node.default_child
        cfn_policy_for_lambda.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W76",
                    "reason":
                    "A complex IAM policy is required for this resource.",
                }]
            }
        }

        # Codepipeline with Git source definitions ###
        source_output = codepipeline.Artifact()
        # processing git_address to retrieve repo name
        repo_name_split = core.Fn.split("/", git_address.value_as_string)
        repo_name = core.Fn.select(5, repo_name_split)
        # getting codecommit repo cdk object using 'from_repository_name'
        repo = codecommit.Repository.from_repository_name(
            self, "AWSMLOpsFrameworkRepository", repo_name)
        codebuild_project = codebuild.PipelineProject(
            self,
            "Take config file",
            build_spec=codebuild.BuildSpec.from_object({
                "version": "0.2",
                "phases": {
                    "build": {
                        "commands": [
                            "ls -a",
                            "aws lambda invoke --function-name " +
                            provisioner_apigw_lambda.lambda_function.
                            function_name +
                            " --payload fileb://mlops-config.json response.json"
                            + " --invocation-type RequestResponse",
                        ]
                    }
                },
            }),
        )
        # Defining a Codepipeline project with CodeCommit as source
        codecommit_pipeline = codepipeline.Pipeline(
            self,
            "MLOpsCodeCommitPipeline",
            stages=[
                codepipeline.StageProps(
                    stage_name="Source",
                    actions=[
                        codepipeline_actions.CodeCommitSourceAction(
                            action_name="CodeCommit",
                            repository=repo,
                            branch="main",
                            output=source_output,
                        )
                    ],
                ),
                codepipeline.StageProps(
                    stage_name="TakeConfig",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            action_name="provision_pipeline",
                            input=source_output,
                            outputs=[],
                            project=codebuild_project,
                        )
                    ],
                ),
            ],
            cross_account_keys=False,
        )
        codecommit_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=[lambda_invoke_action],
                resources=[
                    provisioner_apigw_lambda.lambda_function.function_arn
                ],
            ))
        codebuild_project.add_to_role_policy(
            iam.PolicyStatement(
                actions=[lambda_invoke_action],
                resources=[
                    provisioner_apigw_lambda.lambda_function.function_arn
                ],
            ))
        pipeline_child_nodes = codecommit_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id":
                        "W35",
                        "reason":
                        "This is a managed bucket generated by CDK for codepipeline.",
                    },
                    {
                        "id":
                        "W51",
                        "reason":
                        "This is a managed bucket generated by CDK for codepipeline.",
                    },
                ]
            }
        }

        # custom resource for operational metrics###
        metrics_mapping = core.CfnMapping(
            self,
            "AnonymousData",
            mapping={"SendAnonymousData": {
                "Data": "Yes"
            }})
        metrics_condition = core.CfnCondition(
            self,
            "AnonymousDatatoAWS",
            expression=core.Fn.condition_equals(
                metrics_mapping.find_in_map("SendAnonymousData", "Data"),
                "Yes"),
        )

        helper_function = lambda_.Function(
            self,
            "SolutionHelper",
            code=lambda_.Code.from_asset("lambdas/solution_helper"),
            handler="lambda_function.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(60),
        )

        helper_function.node.default_child.cfn_options.metadata = suppress_lambda_policies(
        )
        create_id_function = core.CustomResource(
            self,
            "CreateUniqueID",
            service_token=helper_function.function_arn,
            properties={"Resource": "UUID"},
            resource_type="Custom::CreateUUID",
        )

        send_data_function = core.CustomResource(
            self,
            "SendAnonymousData",
            service_token=helper_function.function_arn,
            properties={
                "Resource": "AnonymousMetric",
                "UUID": create_id_function.get_att_string("UUID"),
                "gitSelected": git_address.value_as_string,
                "Region": core.Aws.REGION,
                "SolutionId": "SO0136",
                "Version": "%%VERSION%%",
            },
            resource_type="Custom::AnonymousData",
        )

        core.Aspects.of(helper_function).add(
            ConditionalResources(metrics_condition))
        core.Aspects.of(create_id_function).add(
            ConditionalResources(metrics_condition))
        core.Aspects.of(send_data_function).add(
            ConditionalResources(metrics_condition))

        # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source
        core.Aspects.of(repo).add(ConditionalResources(git_address_provided))
        core.Aspects.of(codecommit_pipeline).add(
            ConditionalResources(git_address_provided))
        core.Aspects.of(codebuild_project).add(
            ConditionalResources(git_address_provided))

        # Create Template Interface
        paramaters_list = [
            notification_email.logical_id,
            git_address.logical_id,
            existing_bucket.logical_id,
            existing_ecr_repo.logical_id,
        ]

        # if multi account
        if multi_account:
            paramaters_list.extend([
                dev_account_id.logical_id,
                dev_org_id.logical_id,
                staging_account_id.logical_id,
                staging_org_id.logical_id,
                prod_account_id.logical_id,
                prod_org_id.logical_id,
            ])

        paramaters_labels = {
            f"{notification_email.logical_id}": {
                "default": "Notification Email (Required)"
            },
            f"{git_address.logical_id}": {
                "default": "CodeCommit Repo URL Address (Optional)"
            },
            f"{existing_bucket.logical_id}": {
                "default": "Name of an Existing S3 Bucket (Optional)"
            },
            f"{existing_ecr_repo.logical_id}": {
                "default":
                "Name of an Existing Amazon ECR repository (Optional)"
            },
        }

        if multi_account:
            paramaters_labels.update({
                f"{dev_account_id.logical_id}": {
                    "default": "Development Account ID (Required)"
                },
                f"{dev_org_id.logical_id}": {
                    "default":
                    "Development Account Organizational Unit ID (Required)"
                },
                f"{staging_account_id.logical_id}": {
                    "default": "Staging Account ID (Required)"
                },
                f"{staging_org_id.logical_id}": {
                    "default":
                    "Staging Account Organizational Unit ID (Required)"
                },
                f"{prod_account_id.logical_id}": {
                    "default": "Production Account ID (Required)"
                },
                f"{prod_org_id.logical_id}": {
                    "default":
                    "Production Account Organizational Unit ID (Required)"
                },
            })
        self.template_options.metadata = {
            "AWS::CloudFormation::Interface": {
                "ParameterGroups": [{
                    "Label": {
                        "default": "MLOps Framework Settings"
                    },
                    "Parameters": paramaters_list,
                }],
                "ParameterLabels":
                paramaters_labels,
            }
        }
        # Outputs #
        core.CfnOutput(
            self,
            id="BlueprintsBucket",
            value=
            f"https://s3.console.aws.amazon.com/s3/buckets/{blueprint_repository_bucket.bucket_name}",
            description="S3 Bucket to upload MLOps Framework Blueprints",
        )
        core.CfnOutput(
            self,
            id="AssetsBucket",
            value=
            f"https://s3.console.aws.amazon.com/s3/buckets/{assets_s3_bucket_name}",
            description="S3 Bucket to upload model artifact",
        )
        core.CfnOutput(
            self,
            id="ECRRepoName",
            value=ecr_repo_name,
            description="Amazon ECR repository's name",
        )
        core.CfnOutput(
            self,
            id="ECRRepoArn",
            value=ecr_repo_arn,
            description="Amazon ECR repository's arn",
        )
Exemple #19
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc: aws_ec2.Vpc,
                 ecs_cluster=aws_ecs.Cluster,
                 alb=elbv2.ApplicationLoadBalancer,
                 albTestListener=elbv2.ApplicationListener,
                 albProdListener=elbv2.ApplicationListener,
                 blueGroup=elbv2.ApplicationTargetGroup,
                 greenGroup=elbv2.ApplicationTargetGroup,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ECS_APP_NAME = "Nginx-app",
        ECS_DEPLOYMENT_GROUP_NAME = "NginxAppECSBlueGreen"
        ECS_DEPLOYMENT_CONFIG_NAME = "CodeDeployDefault.ECSLinear10PercentEvery1Minutes"
        ECS_TASKSET_TERMINATION_WAIT_TIME = 10
        ECS_TASK_FAMILY_NAME = "Nginx-microservice"
        ECS_APP_NAME = "Nginx-microservice"
        ECS_APP_LOG_GROUP_NAME = "/ecs/Nginx-microservice"
        DUMMY_TASK_FAMILY_NAME = "sample-Nginx-microservice"
        DUMMY_APP_NAME = "sample-Nginx-microservice"
        DUMMY_APP_LOG_GROUP_NAME = "/ecs/sample-Nginx-microservice"
        DUMMY_CONTAINER_IMAGE = "smuralee/nginx"

        # =============================================================================
        # ECR and CodeCommit repositories for the Blue/ Green deployment
        # =============================================================================

        # ECR repository for the docker images
        NginxecrRepo = aws_ecr.Repository(self,
                                          "NginxRepo",
                                          image_scan_on_push=True)

        NginxCodeCommitrepo = aws_codecommit.Repository(
            self,
            "NginxRepository",
            repository_name=ECS_APP_NAME,
            description="Oussama application hosted on NGINX")

        # =============================================================================
        #   CODE BUILD and ECS TASK ROLES for the Blue/ Green deployment
        # =============================================================================

        # IAM role for the Code Build project
        codeBuildServiceRole = aws_iam.Role(
            self,
            "codeBuildServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codebuild.amazonaws.com'))

        inlinePolicyForCodeBuild = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability",
                "ecr:InitiateLayerUpload", "ecr:UploadLayerPart",
                "ecr:CompleteLayerUpload", "ecr:PutImage"
            ],
            resources=["*"])

        codeBuildServiceRole.add_to_policy(inlinePolicyForCodeBuild)

        # ECS task role
        ecsTaskRole = aws_iam.Role(
            self,
            "ecsTaskRoleForWorkshop",
            assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com'))

        ecsTaskRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonECSTaskExecutionRolePolicy"))

        # =============================================================================
        # CODE DEPLOY APPLICATION for the Blue/ Green deployment
        # =============================================================================

        # Creating the code deploy application
        codeDeployApplication = codedeploy.EcsApplication(
            self, "NginxAppCodeDeploy")

        # Creating the code deploy service role
        codeDeployServiceRole = aws_iam.Role(
            self,
            "codeDeployServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codedeploy.amazonaws.com'))
        codeDeployServiceRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                "AWSCodeDeployRoleForECS"))

        # IAM role for custom lambda function
        customLambdaServiceRole = aws_iam.Role(
            self,
            "codeDeployCustomLambda",
            assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com'))

        inlinePolicyForLambda = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iam:PassRole", "sts:AssumeRole", "codedeploy:List*",
                "codedeploy:Get*", "codedeploy:UpdateDeploymentGroup",
                "codedeploy:CreateDeploymentGroup",
                "codedeploy:DeleteDeploymentGroup"
            ],
            resources=["*"])

        customLambdaServiceRole.add_managed_policy(
            aws_iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AWSLambdaBasicExecutionRole'))
        customLambdaServiceRole.add_to_policy(inlinePolicyForLambda)

        # Custom resource to create the deployment group
        createDeploymentGroupLambda = aws_lambda.Function(
            self,
            'createDeploymentGroupLambda',
            code=aws_lambda.Code.from_asset("custom_resources"),
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            handler='create_deployment_group.handler',
            role=customLambdaServiceRole,
            description="Custom resource to create deployment group",
            memory_size=128,
            timeout=core.Duration.seconds(60))

        # ================================================================================================
        # CloudWatch Alarms for 4XX errors
        blue4xxMetric = aws_cloudwatch.Metric(
            namespace='AWS/ApplicationELB',
            metric_name='HTTPCode_Target_4XX_Count',
            dimensions={
                "TargetGroup": blueGroup.target_group_full_name,
                "LoadBalancer": alb.load_balancer_full_name
            },
            statistic="sum",
            period=core.Duration.minutes(1))

        blueGroupAlarm = aws_cloudwatch.Alarm(
            self,
            "blue4xxErrors",
            alarm_name="Blue_4xx_Alarm",
            alarm_description=
            "CloudWatch Alarm for the 4xx errors of Blue target group",
            metric=blue4xxMetric,
            threshold=1,
            evaluation_periods=1)

        green4xxMetric = aws_cloudwatch.Metric(
            namespace='AWS/ApplicationELB',
            metric_name='HTTPCode_Target_4XX_Count',
            dimensions={
                "TargetGroup": greenGroup.target_group_full_name,
                "LoadBalancer": alb.load_balancer_full_name
            },
            statistic="sum",
            period=core.Duration.minutes(1))
        greenGroupAlarm = aws_cloudwatch.Alarm(
            self,
            "green4xxErrors",
            alarm_name="Green_4xx_Alarm",
            alarm_description=
            "CloudWatch Alarm for the 4xx errors of Green target group",
            metric=green4xxMetric,
            threshold=1,
            evaluation_periods=1)

        # ================================================================================================
        # DUMMY TASK DEFINITION for the initial service creation
        # This is required for the service being made available to create the CodeDeploy Deployment Group
        # ================================================================================================
        sampleTaskDefinition = aws_ecs.FargateTaskDefinition(
            self,
            "sampleTaskDefn",
            family=DUMMY_TASK_FAMILY_NAME,
            cpu=256,
            memory_limit_mib=1024,
            task_role=ecsTaskRole,
            execution_role=ecsTaskRole)

        sampleContainerDefn = sampleTaskDefinition.add_container(
            "sampleAppContainer",
            image=aws_ecs.ContainerImage.from_registry(DUMMY_CONTAINER_IMAGE),
            logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup(
                self,
                "sampleAppLogGroup",
                log_group_name=DUMMY_APP_LOG_GROUP_NAME,
                removal_policy=core.RemovalPolicy.DESTROY),
                                         stream_prefix=DUMMY_APP_NAME),
            docker_labels={"name": DUMMY_APP_NAME})

        port_mapping = aws_ecs.PortMapping(container_port=80,
                                           protocol=aws_ecs.Protocol.TCP)

        sampleContainerDefn.add_port_mappings(port_mapping)

        # ================================================================================================
        # ECS task definition using ECR image
        # Will be used by the CODE DEPLOY for Blue/Green deployment
        # ================================================================================================
        NginxTaskDefinition = aws_ecs.FargateTaskDefinition(
            self,
            "appTaskDefn",
            family=ECS_TASK_FAMILY_NAME,
            cpu=256,
            memory_limit_mib=1024,
            task_role=ecsTaskRole,
            execution_role=ecsTaskRole)

        NginxcontainerDefinition = NginxTaskDefinition.add_container(
            "NginxAppContainer",
            image=aws_ecs.ContainerImage.from_ecr_repository(
                NginxecrRepo, "latest"),
            logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup(
                self,
                "NginxAppLogGroup",
                log_group_name=ECS_APP_LOG_GROUP_NAME,
                removal_policy=core.RemovalPolicy.DESTROY),
                                         stream_prefix=ECS_APP_NAME),
            docker_labels={"name": ECS_APP_NAME})
        NginxcontainerDefinition.add_port_mappings(port_mapping)

        # =============================================================================
        # ECS SERVICE for the Blue/ Green deployment
        # =============================================================================
        NginxAppService = aws_ecs.FargateService(
            self,
            "NginxAppService",
            cluster=ecs_cluster,
            task_definition=NginxTaskDefinition,
            health_check_grace_period=core.Duration.seconds(10),
            desired_count=3,
            deployment_controller={
                "type": aws_ecs.DeploymentControllerType.CODE_DEPLOY
            },
            service_name=ECS_APP_NAME)

        NginxAppService.connections.allow_from(alb, aws_ec2.Port.tcp(80))
        NginxAppService.connections.allow_from(alb, aws_ec2.Port.tcp(8080))
        NginxAppService.attach_to_application_target_group(blueGroup)

        # =============================================================================
        # CODE DEPLOY - Deployment Group CUSTOM RESOURCE for the Blue/ Green deployment
        # =============================================================================

        core.CustomResource(
            self,
            'customEcsDeploymentGroup',
            service_token=createDeploymentGroupLambda.function_arn,
            properties={
                "ApplicationName": codeDeployApplication.application_name,
                "DeploymentGroupName": ECS_DEPLOYMENT_GROUP_NAME,
                "DeploymentConfigName": ECS_DEPLOYMENT_CONFIG_NAME,
                "ServiceRoleArn": codeDeployServiceRole.role_arn,
                "BlueTargetGroup": blueGroup.target_group_name,
                "GreenTargetGroup": greenGroup.target_group_name,
                "ProdListenerArn": albProdListener.listener_arn,
                "TestListenerArn": albTestListener.listener_arn,
                "EcsClusterName": ecs_cluster.cluster_name,
                "EcsServiceName": NginxAppService.service_name,
                "TerminationWaitTime": ECS_TASKSET_TERMINATION_WAIT_TIME,
                "BlueGroupAlarm": blueGroupAlarm.alarm_name,
                "GreenGroupAlarm": greenGroupAlarm.alarm_name,
            })

        ecsDeploymentGroup = codedeploy.EcsDeploymentGroup.from_ecs_deployment_group_attributes(
            self,
            "ecsDeploymentGroup",
            application=codeDeployApplication,
            deployment_group_name=ECS_DEPLOYMENT_GROUP_NAME,
            deployment_config=codedeploy.EcsDeploymentConfig.
            from_ecs_deployment_config_name(self, "ecsDeploymentConfig",
                                            ECS_DEPLOYMENT_CONFIG_NAME))

        # =============================================================================
        # CODE BUILD PROJECT for the Blue/ Green deployment
        # =============================================================================

        # Creating the code build project
        NginxAppcodebuild = aws_codebuild.Project(
            self,
            "NginxAppCodeBuild",
            role=codeBuildServiceRole,
            environment=aws_codebuild.BuildEnvironment(
                build_image=aws_codebuild.LinuxBuildImage.STANDARD_4_0,
                compute_type=aws_codebuild.ComputeType.SMALL,
                privileged=True,
                environment_variables={
                    'REPOSITORY_URI': {
                        'value':
                        NginxecrRepo.repository_uri,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    },
                    'TASK_EXECUTION_ARN': {
                        'value':
                        ecsTaskRole.role_arn,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    },
                    'TASK_FAMILY': {
                        'value':
                        ECS_TASK_FAMILY_NAME,
                        'type':
                        aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT
                    }
                }),
            source=aws_codebuild.Source.code_commit(
                repository=NginxCodeCommitrepo))

        # =============================================================================
        # CODE PIPELINE for Blue/Green ECS deployment
        # =============================================================================

        codePipelineServiceRole = aws_iam.Role(
            self,
            "codePipelineServiceRole",
            assumed_by=aws_iam.ServicePrincipal('codepipeline.amazonaws.com'))

        inlinePolicyForCodePipeline = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iam:PassRole", "sts:AssumeRole", "codecommit:Get*",
                "codecommit:List*", "codecommit:GitPull",
                "codecommit:UploadArchive", "codecommit:CancelUploadArchive",
                "codebuild:BatchGetBuilds", "codebuild:StartBuild",
                "codedeploy:CreateDeployment", "codedeploy:Get*",
                "codedeploy:RegisterApplicationRevision", "s3:Get*",
                "s3:List*", "s3:PutObject"
            ],
            resources=["*"])

        codePipelineServiceRole.add_to_policy(inlinePolicyForCodePipeline)

        sourceArtifact = codepipeline.Artifact('sourceArtifact')
        buildArtifact = codepipeline.Artifact('buildArtifact')

        # S3 bucket for storing the code pipeline artifacts
        NginxAppArtifactsBucket = s3.Bucket(
            self,
            "NginxAppArtifactsBucket",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)

        # S3 bucket policy for the code pipeline artifacts
        denyUnEncryptedObjectUploads = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.DENY,
            actions=["s3:PutObject"],
            principals=[aws_iam.AnyPrincipal()],
            resources=[NginxAppArtifactsBucket.bucket_arn + "/*"],
            conditions={
                "StringNotEquals": {
                    "s3:x-amz-server-side-encryption": "aws:kms"
                }
            })

        denyInsecureConnections = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.DENY,
            actions=["s3:*"],
            principals=[aws_iam.AnyPrincipal()],
            resources=[NginxAppArtifactsBucket.bucket_arn + "/*"],
            conditions={"Bool": {
                "aws:SecureTransport": "false"
            }})

        NginxAppArtifactsBucket.add_to_resource_policy(
            denyUnEncryptedObjectUploads)
        NginxAppArtifactsBucket.add_to_resource_policy(denyInsecureConnections)

        # Code Pipeline - CloudWatch trigger event is created by CDK
        codepipeline.Pipeline(
            self,
            "ecsBlueGreen",
            role=codePipelineServiceRole,
            artifact_bucket=NginxAppArtifactsBucket,
            stages=[
                codepipeline.StageProps(
                    stage_name='Source',
                    actions=[
                        aws_codepipeline_actions.CodeCommitSourceAction(
                            action_name='Source',
                            repository=NginxCodeCommitrepo,
                            output=sourceArtifact,
                        )
                    ]),
                codepipeline.StageProps(
                    stage_name='Build',
                    actions=[
                        aws_codepipeline_actions.CodeBuildAction(
                            action_name='Build',
                            project=NginxAppcodebuild,
                            input=sourceArtifact,
                            outputs=[buildArtifact])
                    ]),
                codepipeline.StageProps(
                    stage_name='Deploy',
                    actions=[
                        aws_codepipeline_actions.CodeDeployEcsDeployAction(
                            action_name='Deploy',
                            deployment_group=ecsDeploymentGroup,
                            app_spec_template_input=buildArtifact,
                            task_definition_template_input=buildArtifact,
                        )
                    ])
            ])

        # =============================================================================
        # Export the outputs
        # =============================================================================
        core.CfnOutput(self,
                       "ecsBlueGreenCodeRepo",
                       description="Demo app code commit repository",
                       export_name="ecsBlueGreenDemoAppRepo",
                       value=NginxCodeCommitrepo.repository_clone_url_http)

        core.CfnOutput(self,
                       "ecsBlueGreenLBDns",
                       description="Load balancer DNS",
                       export_name="ecsBlueGreenLBDns",
                       value=alb.load_balancer_dns_name)
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        ca: CodeArtifactStack,
        s3: S3Stack,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        ca_sts_policy = iam.PolicyStatement(
            sid="sts",
            effect=iam.Effect.ALLOW,
            actions=["sts:GetServiceBearerToken"],
            resources=["*"],
            conditions={
                "StringEquals": {"sts:AWSServiceName": "codeartifact.amazonaws.com"},
            },
        )
        ca_token_policy = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                "codeartifact:GetAuthorizationToken",
                "codeartifact:ReadFromRepository",
                "codeartifact:GetRepositoryEndpoint",
            ],
            resources=["*"],
        )

        with open("lambda/lambda_handler.py", encoding="utf8") as fp:
            handler_code = fp.read()

        # Create Lambda
        lambda_fn = lambda_.Function(
            self,
            "MWAA-UpdateCodeArtifactIndexURL",
            code=lambda_.InlineCode(handler_code),
            handler="index.handler",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
            environment={
                "CA_DOMAIN": ca.repo.domain_name,
                "CA_DOMAIN_OWNER": self.account,
                "CA_REPOSITORY_NAME": ca.repo.repository_name,
                "BUCKET_NAME": s3.instance.bucket_name,
            },
            initial_policy=[ca_token_policy, ca_sts_policy],
        )

        # Give permission to read/write from the S3 bucket
        s3.instance.grant_read_write(lambda_fn)

        # Run Lambda every 10 hours
        rule = events.Rule(
            self,
            "Rule",
            schedule=events.Schedule.rate(core.Duration.hours(10)),
            enabled=True,
        )
        rule.add_target(targets.LambdaFunction(lambda_fn))

        # Invoke Lambda once after cdk deploy
        core.CustomResource(self, "InvokeLambda", service_token=lambda_fn.function_arn)
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        branch: str,
        sandbox_account: str,
        **kwargs
    ) -> None:
        """Init the Construct fore creating hd-auto-service-catalog.

        Args:
            scope: CDK Parent Stack aap.py
            id: Name of the stack: "hd-auto-service-catalog"
            branch: string for A/B Deployment
            sandbox_account: Sandbox account id
            **kwargs:
        """
        super().__init__(scope, id, **kwargs)

        # # The code that defines your stack goes here
        # def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
        #     string = "".join(random.choice(chars) for _ in range(size)).lower()
        #     return string
        #
        # branch = branch

        # ##############################################################
        # Tagging List
        # ##############################################################

        tagging_list = []

        # ##############################################################
        # Account List
        # ##############################################################

        # account_list = ["431892011317"]

        # ##############################################################
        # Parameters
        # ##############################################################

        # ===============================
        # App name
        app_name = core.CfnParameter(
            self,
            id="AppName-{}".format(branch),
            description="Name of the app",
            type="String",
            default="hd-auto-cicd-service-catalog",
        )

        # ===============================
        # Environment name
        env_name = core.CfnParameter(
            self,
            id="EnvName-{}".format(branch),
            description="Name of the environment",
            type="String",
            default="auto",
        )

        # ===============================
        # IAM Role and Policy parameter
        role_name = core.CfnParameter(
            self,
            id="ConstraintRoleName-{}".format(branch),
            description="Name of the launch constraint role",
            type="String",
            default="CrossAccountAdmin",
        )

        # ===============================
        # Principal management lambdas
        unassign_lambda = core.CfnParameter(
            self,
            id="UnassignPrincipalLambdaName-{}".format(branch),
            description="Name of the unassign principal management Lambda",
            type="String",
            default="UnassignPrincipalFromServiceCatalog",
        )

        assign_lambda = core.CfnParameter(
            self,
            id="AssignPrincipalLambdaName-{}".format(branch),
            description="Name of the assign principal management Lambda",
            type="String",
            default="AssignPrincipalToServiceCatalog",
        )

        # ===============================
        # Branch name
        if branch == "master":
            branch_name = "master"
        elif branch == "dmz":
            branch_name = "dmz"
        else:
            branch_name = "feature/{}".format(branch.split("-")[1])

        # ===============================
        # Path name
        path_name = core.CfnParameter(
            self,
            id="Path-{}".format(branch),
            description="CodeCommit repository folder for Service Catalogs Products",
            type="String",
            default="service_catalog/products/",
        )

        # ===============================
        # Path for the configuration INI
        path_ini = core.CfnParameter(
            self,
            id="ConfigINI-{}".format(branch),
            description="Configuration file path",
            type="String",
            default="service_catalog/config/config_{}.ini".format(branch.split("-")[0]),
        )

        # ===============================
        # Path for the template store
        template_store = core.CfnParameter(
            self,
            id="TemplateStore-{}".format(branch),
            description="S3 Bucket and Folder evaluated CloudFormation Templates",
            type="String",
            default="template-store/",
        )

        # ##############################################################
        # Artifacts Bucket
        # ##############################################################

        artifact_bucket = _s3.Bucket(
            self,
            id="ArtifactsBucket-{}".format(branch),
            bucket_name="my-sandbox-cicd-build-artifacts-{}".format(
                branch.split("-")[0]
            ),
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        empty_s3_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "s3:DeleteBucket",
                "s3:ListBucket",
                "s3:DeleteObjects",
                "s3:DeleteObject",
            ],
            resources=[artifact_bucket.bucket_arn, artifact_bucket.bucket_arn + "/*",],
        )

        empty_bucket_lambda = Lambda.create_lambda(
            self,
            name="EmptyArtifactsBucket-{}".format(branch),
            function_name="EmptyArtifactsBucket-{}".format(branch),
            handler="empty_bucket.empty_bucket",
            code_injection_method=_lambda.Code.asset(path="./src/lambda/empty_bucket/"),
            lambda_runtime=_lambda.Runtime.PYTHON_3_7,
            amount_of_memory=128,
            timeout=30,
            amount_of_retries=0,
            rules_to_invoke=None,
            events_to_invoke=None,
            lambda_layers_to_use=None,
            policy_statements=[empty_s3_policy,],
            log_retention=None,
            environment_vars=[],
        )

        cr_empty_bucket = core.CustomResource(
            self,
            id="CR-EmptyBucket-{}".format(branch),
            service_token=empty_bucket_lambda.lambda_function_object.function_arn,
            properties={"BUCKET_NAME": artifact_bucket.bucket_name,},
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        cr_empty_bucket.node.add_dependency(artifact_bucket)

        tagging_list.append(cr_empty_bucket)

        artifact_bucket.add_to_resource_policy(
            permission=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["s3:GetObject"],
                resources=[artifact_bucket.bucket_arn + "/template-store/*",],
                principals=[_iam.ServicePrincipal("servicecatalog"),],
            )
        )

        tagging_list.append(artifact_bucket)

        # ##############################################################
        # Code repo
        # ##############################################################

        if branch == "master":
            service_catalog_git = _code.Repository(
                self,
                id="ServiceCatalogGit",
                repository_name="hd-auto-service-catalog",
                description="This git hosts all templates for the ServiceCatalog and the CICD itself.",
            )
            tagging_list.append(service_catalog_git)
        else:
            service_catalog_git = _code.Repository.from_repository_name(
                self, id="ServiceCatalogGit", repository_name="hd-auto-service-catalog",
            )
            tagging_list.append(service_catalog_git)

        # ##############################################################
        # Lambda Layer
        # ##############################################################

        source_code = _lambda.Code.from_asset("./src/lambda_layer/")

        layer = _lambda.LayerVersion(
            self,
            id="Python3_7_Layer-{}".format(branch),
            code=source_code,
            compatible_runtimes=[_lambda.Runtime.PYTHON_3_7],
        )

        tagging_list.append(layer)

        # ##############################################################
        # CodeBuild Project
        # ##############################################################

        build_project = _codebuild.PipelineProject(
            self,
            id="BuildProject-{}".format(branch),
            project_name="hd-auto-cicd-service-catalog-{}".format(branch),
            description="Build project for the Service Catalog pipeline",
            environment=_codebuild.BuildEnvironment(
                build_image=_codebuild.LinuxBuildImage.STANDARD_4_0, privileged=True
            ),
            cache=_codebuild.Cache.bucket(artifact_bucket, prefix="codebuild-cache"),
            build_spec=_codebuild.BuildSpec.from_source_filename("./buildspec.yaml"),
        )

        tagging_list.append(build_project)

        # CodeBuild IAM permissions to read write to s3
        artifact_bucket.grant_read_write(build_project)
        # Build and create test runs for templates
        build_project.add_to_role_policy(
            statement=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                not_actions=["aws-portal:*", "organizations:*"],
                resources=["*"],  # No further restriction due to IAM!
            )
        )

        # ##############################################################
        # Service Catalog
        # ##############################################################

        portfolio = _servicecatalog.CfnPortfolio(
            self,
            id="BasicPortfolio-{}".format(branch),
            display_name="hd-mdp-portfolio-{}".format(branch),
            provider_name="MDP-Team",
            accept_language="en",
            description="""
                This portfolio contains AWS Services combined into technical and functional approved architectures.
                You don't need IAM permissions to run those products. You will use them.
                """,
        )

        remove_portfolio_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "servicecatalog:SearchProductsAsAdmin",
                "servicecatalog:DeleteProduct",
                "servicecatalog:DeleteConstraint",
                "servicecatalog:ListConstraintsForPortfolio",
                "servicecatalog:DisassociatePrincipalFromPortfolio",
                "servicecatalog:DisassociateProductFromPortfolio",
            ],
            resources=["*",],
        )

        iam_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "iam:GetRole",
                "iam:PassRole",
                "iam:CreateRole",
                "iam:DeleteRole",
                "iam:ListRoles",
                "iam:PutRolePolicy",
                "iam:DeleteRolePolicy",
                "iam:DeletePolicy",
            ],
            resources=[
                "arn:aws:iam::{}:role/{}".format(
                    core.Aws.ACCOUNT_ID, role_name.value_as_string
                ),
            ],
        )

        remove_products_lambda = Lambda.create_lambda(
            self,
            name="RemoveProductsFromPortfolio-{}".format(branch),
            function_name="RemoveProductsFromPortfolio-{}".format(branch),
            handler="remove_portfolio.remove_portfolio",
            code_injection_method=_lambda.Code.asset(
                path="./src/lambda/remove_portfolio/"
            ),
            lambda_runtime=_lambda.Runtime.PYTHON_3_7,
            amount_of_memory=128,
            timeout=30,
            amount_of_retries=0,
            rules_to_invoke=None,
            events_to_invoke=None,
            lambda_layers_to_use=None,
            policy_statements=[remove_portfolio_policy, iam_policy],
            log_retention=None,
            environment_vars=[
                {"Key": "SANDBOX_ACCOUNT_ID", "Value": "{}".format(sandbox_account),}
            ],
        )

        cr_remove_products = core.CustomResource(
            self,
            id="CR-RemoveProductsFromPortfolio-{}".format(branch),
            service_token=remove_products_lambda.lambda_function_object.function_arn,
            properties={"PORTFOLIO_ID": portfolio.ref,},
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        cr_remove_products.node.add_dependency(portfolio)

        iam_role_list = [role_name.value_as_string]
        if branch == "master":
            # TODO: Accept Portfolio share principal management
            #     for idx, account in enumerate(account_list):
            #         _servicecatalog.CfnPortfolioShare(
            #             self,
            #             id="PortfolioSharing-{}-{}".format(branch, idx),
            #             account_id=account,
            #             portfolio_id=portfolio.ref,
            #             accept_language="en",
            #         )
            for idx, role in enumerate(iam_role_list):
                _servicecatalog.CfnPortfolioPrincipalAssociation(
                    self,
                    id="PrincipalAssociation-{}-{}".format(branch, idx),
                    portfolio_id=portfolio.ref,
                    principal_arn="arn:aws:iam::{}:role/{}".format(
                        core.Aws.ACCOUNT_ID, role
                    ),
                    principal_type="IAM",
                    accept_language="en",
                )
            core.CfnOutput(
                self, id="PortfolioId-{}".format(branch), value=portfolio.ref
            )
            tagging_list.append(portfolio)
        else:
            for idx, role in enumerate(iam_role_list):
                _servicecatalog.CfnPortfolioPrincipalAssociation(
                    self,
                    id="PrincipalAssociation-{}-{}".format(branch, idx),
                    portfolio_id=portfolio.ref,
                    principal_arn="arn:aws:iam::{}:role/{}".format(
                        core.Aws.ACCOUNT_ID, role
                    ),
                    principal_type="IAM",
                    accept_language="en",
                )
            core.CfnOutput(
                self, id="PortfolioId-{}".format(branch), value=portfolio.ref
            )
            tagging_list.append(portfolio)

        # ##############################################################
        # Lambda Permissions
        # ##############################################################

        s3_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "s3:GetObject*",
                "s3:GetBucket*",
                "s3:List*",
                "s3:DeleteObject*",
                "s3:PutObject*",
                "s3:Abort*",
            ],
            resources=[artifact_bucket.bucket_arn, artifact_bucket.bucket_arn + "/*"],
        )

        codecommit_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "codecommit:GetDifferences",
                "codecommit:GetBranch",
                "codecommit:GetCommit",
            ],
            resources=[service_catalog_git.repository_arn],
            conditions={"StringEquals": {"aws:RequestedRegion": "eu-central-1"}},
        )

        codebuild_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=["codebuild:StartBuild", "codebuild:UpdateProject*"],
            resources=[build_project.project_arn],
            conditions={"StringEquals": {"aws:RequestedRegion": "eu-central-1"}},
        )

        service_catalog_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "servicecatalog:CreateProduct",
                "servicecatalog:CreateProvisioningArtifact",
                "servicecatalog:UpdateProvisioningArtifact",
                "servicecatalog:DeleteProvisioningArtifact",
                "servicecatalog:ListProvisioningArtifacts",
                "servicecatalog:ListPortfolios",
                "servicecatalog:SearchProductsAsAdmin",
                "servicecatalog:AssociateProductWithPortfolio",
                "servicecatalog:AssociatePrincipalWithPortfolio",
                "servicecatalog:DisassociatePrincipalFromPortfolio",
                "servicecatalog:DisassociateProductFromPortfolio",
                "servicecatalog:DeleteProduct",
                "servicecatalog:CreatePortfolioShare",
                "servicecatalog:AcceptPortfolioShare",
                "servicecatalog:CreateConstraint",
                "servicecatalog:DeleteConstraint",
                "servicecatalog:ListConstraintsForPortfolio",
            ],
            resources=["*"],
        )

        sts_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=["sts:AssumeRole"],
            resources=[
                "arn:aws:iam::{}:role/{}".format(
                    sandbox_account, role_name.value_as_string
                ),
            ],
        )

        codepipeline_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "codepipeline:PutJobFailureResult",  # Supports only a wildcard (*) in the policy Resource element.
                "codepipeline:PutJobSuccessResult",  # Supports only a wildcard (*) in the policy Resource element.
            ],  # https://docs.aws.amazon.com/codepipeline/latest/userguide/permissions-reference.html
            resources=["*"],
            conditions={"StringEquals": {"aws:RequestedRegion": "eu-central-1"}},
        )

        lambda_policy = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW,
            actions=[
                "lambda:GetFunction",
                "lambda:CreateFunction",
                "lambda:DeleteFunction",
                "lambda:AddPermission",
                "lambda:RemovePermission",
                "lambda:CreateEventSourceMapping",
                "lambda:DeleteEventSourceMapping",
                "lambda:InvokeFunction",
                "lambda:UpdateFunctionCode",
                "lambda:UpdateFunctionConfiguration",
            ],
            resources=[
                "arn:aws:lambda:{}:{}:function:{}-{}".format(
                    core.Aws.REGION,
                    sandbox_account,
                    unassign_lambda.value_as_string,
                    sandbox_account,
                ),
                "arn:aws:lambda:{}:{}:function:{}-{}".format(
                    core.Aws.REGION,
                    sandbox_account,
                    assign_lambda.value_as_string,
                    sandbox_account,
                ),
            ],
            conditions={"StringEquals": {"aws:RequestedRegion": "eu-central-1"}},
        )

        # ##############################################################
        # CICD Lambdas
        # ##############################################################

        # ==========================
        # Get Latest Git Meta Data
        git_metadata = Lambda.create_lambda(
            self,
            name="GetLastGitChanges-{}".format(branch),
            function_name="GetLastGitChanges-{}".format(branch,),
            handler="git_metadata.get_changes",
            code_injection_method=_lambda.Code.asset(path="./src/lambda/git_metadata/"),
            lambda_runtime=_lambda.Runtime.PYTHON_3_7,
            amount_of_memory=128,
            timeout=30,
            amount_of_retries=0,
            rules_to_invoke=None,
            events_to_invoke=None,
            lambda_layers_to_use=[layer],
            policy_statements=[
                codecommit_policy,
                codebuild_policy,
                codepipeline_policy,
                service_catalog_policy,
            ],
            log_retention=None,
            environment_vars=[
                {
                    "Key": "REPOSITORY_NAME",
                    "Value": "{}".format(service_catalog_git.repository_name),
                },
            ],
        )

        # ==========================
        # Principal Management Lambda
        principal_management = Lambda.create_lambda(
            self,
            name="PrincipalManagement-{}".format(branch),
            function_name="PrincipalManagement-{}".format(branch),
            handler="principal_management.principal_management",
            code_injection_method=_lambda.Code.asset(
                path="./src/lambda/principal_management/"
            ),
            lambda_runtime=_lambda.Runtime.PYTHON_3_7,
            amount_of_memory=1024,
            timeout=120,
            amount_of_retries=0,
            rules_to_invoke=None,
            events_to_invoke=None,
            lambda_layers_to_use=[layer],
            policy_statements=[
                iam_policy,
                lambda_policy,
                sts_policy,
                service_catalog_policy,
                codepipeline_policy,
                codecommit_policy,
            ],
            log_retention=None,
            environment_vars=[
                {"Key": "SANDBOX_ACCOUNT_ID", "Value": "{}".format(sandbox_account),}
            ],
        )

        # ==========================
        # Sync Service Catalog Lambda

        service_catalog_synchronisation = Lambda.create_lambda(
            self,
            name="UpdateServiceCatalog-{}".format(branch),
            function_name="UpdateServiceCatalog-{}".format(branch),
            handler="sync_catalog.service_catalog_janitor",
            code_injection_method=_lambda.Code.asset(
                path="./src/lambda/update_servicecatalog/"
            ),
            lambda_runtime=_lambda.Runtime.PYTHON_3_7,
            amount_of_memory=1024,
            timeout=120,
            amount_of_retries=0,
            rules_to_invoke=None,
            events_to_invoke=None,
            lambda_layers_to_use=[layer],
            policy_statements=[
                sts_policy,
                service_catalog_policy,
                codepipeline_policy,
                codecommit_policy,
                iam_policy,
                s3_policy,
            ],
            log_retention=None,
            environment_vars=[
                {
                    "Key": "LOCAL_ROLE_NAME_SC",
                    "Value": "{}".format(role_name.value_as_string),
                },
                {"Key": "SANDBOX_ACCOUNT_ID", "Value": "{}".format(sandbox_account),},
                {
                    "Key": "REPOSITORY_NAME",
                    "Value": "{}".format(service_catalog_git.repository_name),
                },
                {"Key": "PATH_INI", "Value": "{}".format(path_ini.value_as_string)},
                {"Key": "PATH", "Value": "{}".format(path_name.value_as_string)},
                {"Key": "BUCKET", "Value": "{}".format(artifact_bucket.bucket_name)},
                {
                    "Key": "S3_PATH",
                    "Value": "{}".format(template_store.value_as_string),
                },
            ],
        )

        # ##############################################################
        # CodePipeline
        # ##############################################################

        # General output
        source_output = _codepipeline.Artifact("git-change")
        tested_source_files = _codepipeline.Artifact("tested-cfn")

        cicd_pipeline = _codepipeline.Pipeline(
            self,
            id="ServiceCatalogPipeline-{}".format(branch),
            pipeline_name="ServiceCatalog-CICD-{}".format(branch),
            artifact_bucket=artifact_bucket,
            stages=[
                _codepipeline.StageProps(
                    stage_name="Source_CFN-Templates",
                    actions=[
                        _codepipeline_actions.CodeCommitSourceAction(
                            action_name="SourceControlCFNTemplates",
                            output=source_output,
                            repository=service_catalog_git,
                            variables_namespace="source",
                            branch=branch_name,
                        ),
                    ],
                ),
                _codepipeline.StageProps(
                    stage_name="Getting_CFN-Template",
                    actions=[
                        _codepipeline_actions.LambdaInvokeAction(
                            action_name="GettingCFNTemplate",
                            lambda_=git_metadata.lambda_function_object,
                            user_parameters={
                                "before_commit": "",
                                "after_commit": "#{source.CommitId}",
                            },
                            variables_namespace="filtered_source",
                        )
                    ],
                ),
                _codepipeline.StageProps(
                    stage_name="Testing_CFN-Template",
                    actions=[
                        _codepipeline_actions.CodeBuildAction(
                            type=_codepipeline_actions.CodeBuildActionType.BUILD,
                            action_name="TestingCFNTemplates",
                            project=build_project,
                            input=source_output,
                            outputs=[tested_source_files],
                            environment_variables={
                                "PIPELINE_NAME": _codebuild.BuildEnvironmentVariable(
                                    value="ServiceCatalog-CICD-{}".format(branch),
                                    type=_codebuild.BuildEnvironmentVariableType.PLAINTEXT,
                                ),
                                "FILES_ADDED": _codebuild.BuildEnvironmentVariable(
                                    value="#{filtered_source.added_files}",
                                    type=_codebuild.BuildEnvironmentVariableType.PLAINTEXT,
                                ),
                                "FILES_MODIFIED": _codebuild.BuildEnvironmentVariable(
                                    value="#{filtered_source.modified_files}",
                                    type=_codebuild.BuildEnvironmentVariableType.PLAINTEXT,
                                ),
                                "FILES_DELETED": _codebuild.BuildEnvironmentVariable(
                                    value="#{filtered_source.deleted_files}",
                                    type=_codebuild.BuildEnvironmentVariableType.PLAINTEXT,
                                ),
                                "JOB_ID": _codebuild.BuildEnvironmentVariable(
                                    value="#{filtered_source.job_id}",
                                    type=_codebuild.BuildEnvironmentVariableType.PLAINTEXT,
                                ),
                                "REPOSITORY_BRANCH": _codebuild.BuildEnvironmentVariable(
                                    value="#{source.BranchName}",
                                    type=_codebuild.BuildEnvironmentVariableType.PLAINTEXT,
                                ),
                                "REPOSITORY_NAME": _codebuild.BuildEnvironmentVariable(
                                    value="#{source.RepositoryName}",
                                    type=_codebuild.BuildEnvironmentVariableType.PLAINTEXT,
                                ),
                            },
                        )
                    ],
                ),
                _codepipeline.StageProps(
                    stage_name="Principal_Management",
                    actions=[
                        _codepipeline_actions.LambdaInvokeAction(
                            action_name="PrincipalManagement",
                            lambda_=principal_management.lambda_function_object,
                            user_parameters={
                                "job_id": "#{filtered_source.job_id}",
                                "commit_id": "#{filtered_source.commit_id}",
                                "portfolio_id": portfolio.ref,
                            },
                        )
                    ],
                ),
                _codepipeline.StageProps(
                    stage_name="Update_Servicecatalog",
                    actions=[
                        _codepipeline_actions.LambdaInvokeAction(
                            action_name="UpdateServiceCatalog",
                            lambda_=service_catalog_synchronisation.lambda_function_object,
                            inputs=[source_output],
                            user_parameters={
                                "modified_files": "#{filtered_source.modified_files}",
                                "added_files": "#{filtered_source.added_files}",
                                "deleted_files": "#{filtered_source.deleted_files}",
                                "job_id": "#{filtered_source.job_id}",
                                "commit_id": "#{filtered_source.commit_id}",
                                "portfolio_id": portfolio.ref,
                            },
                        )
                    ],
                ),
            ],
        )

        cicd_pipeline.add_to_role_policy(
            statement=_iam.PolicyStatement(
                effect=_iam.Effect.ALLOW,
                actions=["codecommit:GetBranch", "codecommit:GetCommit"],
                resources=[service_catalog_git.repository_arn],
            )
        )

        tagging_list.append(cicd_pipeline)

        # ##############################################################
        # Tag resources
        # ##############################################################

        Tags.tag_resources(
            resources_list=tagging_list,
            keys_list=["app", "env"],
            values_list=[app_name.value_as_string, env_name.value_as_string],
        )

        _ssm.StringParameter(
            self,
            id="LambdaLayerExport-{}".format(branch),
            parameter_name="/hd/mdp/{}/lambda/layer-pandas-numpy-servicecatalog".format(
                branch
            ),
            description="Lambda Layer ARN",
            string_value=layer.layer_version_arn,
        )
    def __init__(self, scope: core.Construct, id: str,
                 log_bucket: _s3.Bucket,
                 config_table: _dynamodb.Table,
                 tshirt_size: str,
                 sink_bucket: _s3.Bucket,
                 vpc: _ec2.Vpc,
                 **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        service_role = _iam.Role(
            self, 'BatchEmrServiceRole',
            assumed_by=_iam.ServicePrincipal('elasticmapreduce.amazonaws.com')
        )

        service_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonElasticMapReduceRole'))

        cluster_role = _iam.Role(
            self, 'BatchEmrClusterRole',
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com")
        )

        _iam.Policy(
            self, 'BatchEmrClusterPolicy',
            statements=[
                _iam.PolicyStatement(
                    actions=[
                        "glue:CreateDatabase",
                        "glue:UpdateDatabase",
                        "glue:DeleteDatabase",
                        "glue:GetDatabase",
                        "glue:GetDatabases",
                        "glue:CreateTable",
                        "glue:UpdateTable",
                        "glue:DeleteTable",
                        "glue:GetTable",
                        "glue:GetTables",
                        "glue:GetTableVersions",
                        "glue:CreatePartition",
                        "glue:BatchCreatePartition",
                        "glue:UpdatePartition",
                        "glue:DeletePartition",
                        "glue:BatchDeletePartition",
                        "glue:GetPartition",
                        "glue:GetPartitions",
                        "glue:BatchGetPartition",
                        "glue:CreateUserDefinedFunction",
                        "glue:UpdateUserDefinedFunction",
                        "glue:DeleteUserDefinedFunction",
                        "glue:GetUserDefinedFunction",
                        "glue:GetUserDefinedFunctions",
                        "cloudwatch:PutMetricData",
                        "dynamodb:ListTables",
                        "s3:HeadBucket",
                        "ec2:Describe*",
                    ],
                    resources=['*']
                ),
                _iam.PolicyStatement(
                    actions=['s3:GetObject'],
                    resources=[
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES + DataGenConfig.DSDGEN_INSTALL_SCRIPT,
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES + DataGenConfig.JAR_FILE
                    ]
                ),
                _iam.PolicyStatement(
                    actions=['s3:PutObject'],
                    resources=[log_bucket.bucket_arn + "/data-generator/*"]
                ),
                _iam.PolicyStatement(
                    actions=[
                        "s3:AbortMultipartUpload",
                        "s3:CreateBucket",
                        "s3:DeleteObject",
                        "s3:GetBucketVersioning",
                        "s3:GetObject",
                        "s3:GetObjectTagging",
                        "s3:GetObjectVersion",
                        "s3:ListBucket",
                        "s3:ListBucketMultipartUploads",
                        "s3:ListBucketVersions",
                        "s3:ListMultipartUploadParts",
                        "s3:PutBucketVersioning",
                        "s3:PutObject",
                        "s3:PutObjectTagging"
                    ],
                    resources=[
                        sink_bucket.bucket_arn + '/*',
                        sink_bucket.bucket_arn

                    ]
                )
            ],
            roles=[cluster_role]
        )

        cluster_role.add_managed_policy(_iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'))

        _iam.CfnInstanceProfile(
            self, 'BatchEmrClusterInstanceProfile',
            roles=[cluster_role.role_name],
            instance_profile_name=cluster_role.role_name
        )

        # Security Groups for the EMR cluster (private subnet)
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html#emr-sg-elasticmapreduce-master-private
        master_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-Master-Private', vpc=vpc)
        slave_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-Slave-Private', vpc=vpc)
        service_sg = _ec2.SecurityGroup(self, 'ElasticMapReduce-ServiceAccess', vpc=vpc, allow_all_outbound=False)

        # Service SG used by the proxy instance
        service_sg.add_ingress_rule(master_sg, _ec2.Port.tcp(9443))
        service_sg.add_egress_rule(master_sg, _ec2.Port.tcp(8443))
        service_sg.add_egress_rule(slave_sg, _ec2.Port.tcp(8443))

        # EMR Master
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        # EMR Slave
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        with open('common/common_cdk/lambda/datagen_config.py', 'r') as f:
            lambda_source = f.read()

        configure_datagen_function = _lambda.SingletonFunction(
            self, 'BatchConfigureDatagenLambda',
            uuid="58a9a222-ff07-11ea-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='datagen-config',
            environment={
                'TABLE_NAME': config_table.table_name,
                'JAR_LOCATION': BINARIES_LOCATION + DataGenConfig.JAR_FILE,
            },
            timeout=core.Duration.seconds(10)
        )

        configure_datagen_function.role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'dynamodb:GetItem',
                    'dynamodb:PutItem',
                ],
                resources=[config_table.table_arn]
            )
        )

        terminate_cluster = _sfn_tasks.EmrTerminateCluster(
            self, 'BatchDeleteCluster',
            cluster_id=_sfn.TaskInput.from_data_at("$.Emr.Cluster.Id").value,
            integration_pattern=_sfn.IntegrationPattern.RUN_JOB,
        )

        terminate_cluster_error = _sfn_tasks.EmrTerminateCluster(
            self, 'BatchDeleteClusterError',
            cluster_id=_sfn.TaskInput.from_data_at("$.Emr.Cluster.Id").value,
            integration_pattern=_sfn.IntegrationPattern.RUN_JOB,
        ).next(_sfn.Fail(self, 'StepFailure'))

        create_cluster = _sfn_tasks.EmrCreateCluster(
            self, "BatchCreateEMRCluster",
            name="BatchDatagenCluster",
            result_path="$.Emr",
            release_label='emr-5.30.1',
            log_uri=log_bucket.s3_url_for_object() + "/data-generator",
            cluster_role=cluster_role,
            service_role=service_role,
            bootstrap_actions=[
                _sfn_tasks.EmrCreateCluster.BootstrapActionConfigProperty(
                    name="dsdgen-install",
                    script_bootstrap_action=_sfn_tasks.EmrCreateCluster.ScriptBootstrapActionConfigProperty(
                        path=BINARIES_LOCATION + DataGenConfig.DSDGEN_INSTALL_SCRIPT,
                    )
                )
            ],
            applications=[
                _sfn_tasks.EmrCreateCluster.ApplicationConfigProperty(
                    name="spark"
                ),
                _sfn_tasks.EmrCreateCluster.ApplicationConfigProperty(
                    name="hadoop"
                )
            ],
            instances=_sfn_tasks.EmrCreateCluster.InstancesConfigProperty(
                emr_managed_master_security_group=master_sg.security_group_id,
                emr_managed_slave_security_group=slave_sg.security_group_id,
                service_access_security_group=service_sg.security_group_id,
                ec2_subnet_ids=vpc.select_subnets().subnet_ids,
                instance_fleets=[
                    _sfn_tasks.EmrCreateCluster.InstanceFleetConfigProperty(
                        instance_fleet_type=_sfn_tasks.EmrCreateCluster.InstanceRoleType.MASTER,
                        instance_type_configs=[
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m4.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5d.xlarge',
                                weighted_capacity=1
                            ),
                        ],
                        launch_specifications=_sfn_tasks.EmrCreateCluster.InstanceFleetProvisioningSpecificationsProperty(
                            spot_specification=_sfn_tasks.EmrCreateCluster.SpotProvisioningSpecificationProperty(
                                timeout_action=_sfn_tasks.EmrCreateCluster.SpotTimeoutAction.SWITCH_TO_ON_DEMAND,
                                timeout_duration_minutes=5
                            )
                        ),
                        target_on_demand_capacity=0,
                        target_spot_capacity=1
                    ),
                    _sfn_tasks.EmrCreateCluster.InstanceFleetConfigProperty(
                        instance_fleet_type=_sfn_tasks.EmrCreateCluster.InstanceRoleType.CORE,
                        instance_type_configs=[
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5.2xlarge',
                                weighted_capacity=2
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.xlarge',
                                weighted_capacity=1
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m5a.2xlarge',
                                weighted_capacity=2
                            ),
                            _sfn_tasks.EmrCreateCluster.InstanceTypeConfigProperty(
                                instance_type='m4.xlarge',
                                weighted_capacity=1
                            )
                        ],
                        launch_specifications=_sfn_tasks.EmrCreateCluster.InstanceFleetProvisioningSpecificationsProperty(
                            spot_specification=_sfn_tasks.EmrCreateCluster.SpotProvisioningSpecificationProperty(
                                timeout_action=_sfn_tasks.EmrCreateCluster.SpotTimeoutAction.SWITCH_TO_ON_DEMAND,
                                timeout_duration_minutes=5
                            )
                        ),
                        target_on_demand_capacity=0,
                        target_spot_capacity=DataGenConfig.BATCH_CLUSTER_SIZE[tshirt_size]

                    )
                ]
            )
        ).add_catch(handler=terminate_cluster_error, result_path="$.error")

        configure_datagen = _sfn_tasks.LambdaInvoke(
            self, "BatchConfigureDatagenTask",
            lambda_function=configure_datagen_function,
            payload=_sfn.TaskInput.from_text('{'
                                             '"Param": "batch_iterator",'
                                             '"Module": "batch",'
                                             '"SinkBucket": "'+sink_bucket.s3_url_for_object()+'",'
                                             '"Parallelism": "'+str(int(DataGenConfig.BATCH_DATA_SIZE[tshirt_size])*2)+'",'
                                             '"DataSize": "'+DataGenConfig.BATCH_DATA_SIZE[tshirt_size]+'",'
                                             '"TmpBucket": "fake-bucket"'
                                             '}'),
            result_path='$.Config'
        ).add_catch(handler=terminate_cluster_error, result_path="$.error")

        add_datagen_step = _sfn.CustomState(
            self, 'BatchAddDataGenStep',
            state_json={
                "Type": "Task",
                "Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
                "Parameters": {
                    "ClusterId.$": "$.Emr.Cluster.Id",
                    "Step": {
                        "Name": "DatagenStep",
                        "ActionOnFailure": "CONTINUE",
                        "HadoopJarStep": {
                            "Jar": "command-runner.jar",
                            "Args.$": "$.Config.Payload.StepParam"
                        }
                    }
                },
                "ResultPath": "$.Step",
                "Next": "BatchUpdateIterator",
                "Catch": [
                    {
                        "ErrorEquals": ["States.ALL"],
                        "Next": "BatchDeleteClusterError",
                        "ResultPath": "$.error"
                    }
                ]
            }
        )

        update_iterator = _sfn_tasks.DynamoUpdateItem(
            self, 'BatchUpdateIterator',
            table=config_table,
            key={
                'param': _sfn_tasks.DynamoAttributeValue.from_string('batch_iterator')
            },
            update_expression='SET iterator = if_not_exists(iterator, :start) + :inc',
            expression_attribute_values={
                ":inc": _sfn_tasks.DynamoAttributeValue.from_number(1),
                ":start": _sfn_tasks.DynamoAttributeValue.from_number(0)
            },
            result_path=_sfn.JsonPath.DISCARD
        )

        definition = configure_datagen \
            .next(create_cluster) \
            .next(add_datagen_step) \
            .next(update_iterator) \
            .next(terminate_cluster)

        datagen_stepfunctions = _sfn.StateMachine(
            self, "BatchDataGenStepFunctions",
            definition=definition,
            timeout=core.Duration.minutes(30)
        )

        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    'elasticmapreduce:AddJobFlowSteps',
                    'elasticmapreduce:DescribeStep'
                ],
                resources=['*']
            )
        )
        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(
                actions= [
                    "iam:CreateServiceLinkedRole",
                    "iam:PutRolePolicy"
                ],
                resources=["arn:aws:iam::*:role/aws-service-role/elasticmapreduce.amazonaws.com*/AWSServiceRoleForEMRCleanup*"],
                conditions= {
                    "StringLike": {
                        "iam:AWSServiceName": [
                            "elasticmapreduce.amazonaws.com",
                            "elasticmapreduce.amazonaws.com.cn"
                        ]
                    }
                }
            )
        )

        step_trigger = _events.Rule(
            self, 'BatchSteptrigger',
            schedule=_events.Schedule.cron(minute='0/30',
                                           hour='*',
                                           month='*',
                                           week_day='*',
                                           year='*')
        )

        step_trigger.add_target(_events_targets.SfnStateMachine(machine=datagen_stepfunctions))

        with open('common/common_cdk/lambda/stepfunctions_trigger.py', 'r') as f:
            lambda_source = f.read()

        stepfunctions_trigger_lambda = _lambda.SingletonFunction(
            self, 'BatchStepFunctionsTriggerLambda',
            uuid="9597f6f2-f840-11ea-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stepfunctions-batch-datagen-trigger'
        )

        stepfunctions_trigger_lambda.role.add_to_policy(
            _iam.PolicyStatement(
                actions=["states:StartExecution"],
                resources=['*']
            )
        )

        trigger_step_lambda_provider = _custom_resources.Provider(
            self, 'StepFunctionsTriggerLambdaProvider',
            on_event_handler=stepfunctions_trigger_lambda
        )

        core.CustomResource(
            self, 'StepFunctionsTrigger',
            service_token=trigger_step_lambda_provider.service_token,
            properties={
                "stepArn": datagen_stepfunctions.state_machine_arn
            }
        )

        # terminate clusters
        with open('common/common_cdk/lambda/stepfunctions_terminate_emr.py', 'r') as f:
            lambda_source = f.read()

        sfn_terminate = _lambda.SingletonFunction(
            self, 'StepFuncTerminateBatch',
            uuid='58a9a422-ff07-11ea-adc1-0242ac120002',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            timeout=core.Duration.minutes(5)
        )

        sfn_terminate.role.add_to_policy(
            _iam.PolicyStatement(
                actions=[
                    'elasticmapreduce:ListClusters',
                    'elasticmapreduce:TerminateJobFlows',
                    'states:ListStateMachines',
                    'states:ListExecutions',
                    'states:StopExecution'
                ],
                resources=['*']
            )
        )

        sfn_terminate_provider = _custom_resources.Provider(
            self, 'StepFuncTerminateBatchLambdaProvider',
            on_event_handler=sfn_terminate
        )

        core.CustomResource(
            self, 'StepFuncTerminateBatchCustomResource',
            service_token=sfn_terminate_provider.service_token,
            properties={
                "state_machine": 'BatchDatagen'
            })
    def setup_custom_authorizer_user_pass(self):
        custom_authorizer_name = self.custom_auth_user_pass_default_authorizer_name
        self._parameters_to_save[
            "custom_authorizer_user_pass_name"] = custom_authorizer_name
        token_key_name = "IoTTokenKeyName"
        self._parameters_to_save[
            "custom_authorizer_user_pass_token_key_name"] = token_key_name
        token_value = "allow"
        self._parameters_to_save[
            "custom_authorizer_user_pass_token_value"] = token_value
        self._parameters_to_save[
            "custom_authorizer_user_pass_username"] = self.custom_auth_user_pass_username
        self._parameters_to_save[
            "custom_authorizer_user_pass_password"] = self.custom_auth_user_pass_password

        iot_custom_authorizer_key_resource = self.create_custom_authorizer_signing_key_generic(
            "2",
            "Manages an asymmetric CMK and token signature for iot custom authorizer with username and password.",
            token_value,
        )

        custom_authorizer_token_signature = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_token_signature").to_string()
        self._parameters_to_save[
            "custom_authorizer_user_pass_token_signature"] = custom_authorizer_token_signature

        # TODO: remove forcing of us-east-1 when enhanced custom authorizers are available in all regions
        # Force region to 'us-east-1' due to enhanced custom authorizers only available in this region
        authorizer_function_arn = self.setup_custom_authorizer_function(
            "2",
            "custom_resources/iot_custom_authorizer_user_pass_function",
            "iot_custom_authorizer_user_pass.handler",
            "Sample custom authorizer that allows or denies based on username and password",
            {
                "custom_auth_user_pass_username":
                self.custom_auth_user_pass_username,
                "custom_auth_user_pass_password":
                self.custom_auth_user_pass_password
            },
            "us-east-1",
        )
        create_authorizer_policy = aws_iam.PolicyStatement(
            effect=aws_iam.Effect.ALLOW,
            actions=[
                "iot:CreateAuthorizer",
                "iot:UpdateAuthorizer",
                "iot:DeleteAuthorizer",
                "iot:UpdateDomainConfiguration",
                "iot:CreateDomainConfiguration",
                "iot:DescribeDomainConfiguration",
                "iot:DeleteDomainConfiguration",
            ],
            resources=["*"],
        )
        provider_lambda = aws_lambda.SingletonFunction(
            self,
            "iot_custom_authorizer_user_pass_provider_lambda",
            uuid=
            "iot_custom_authorizer_user_pass_provider_lambda_20200727123737",
            runtime=aws_lambda.Runtime.PYTHON_3_7,
            code=aws_lambda.Code.asset(
                "custom_resources/iot_custom_authorizer_user_pass_provider"),
            handler="iot_custom_authorizer_user_pass_provider.on_event",
            description=
            "Sets up an IoT custom authorizer for user password & required domain config due to beta status",
            environment={
                "custom_auth_user_pass_uuid":
                self.custom_auth_user_pass_uuid,
                "custom_auth_user_pass_default_authorizer_name":
                self.custom_auth_user_pass_default_authorizer_name,
                "custom_auth_user_pass_domain_configuration_name":
                self.custom_auth_user_pass_domain_configuration_name
            },
            current_version_options=aws_lambda.VersionOptions(
                removal_policy=core.RemovalPolicy.DESTROY),
            initial_policy=[create_authorizer_policy],
        )

        provider = custom_resources.Provider(
            self,
            "iot_custom_authorizer_user_pass_provider",
            on_event_handler=provider_lambda)

        public_key = iot_custom_authorizer_key_resource.get_att(
            "custom_authorizer_public_key").to_string()

        iot_endpoint = core.CustomResource(
            self,
            "iot_custom_authorizer_user_pass",
            resource_type="Custom::IoTCustomAuthorizer",
            service_token=provider.service_token,
            properties={
                "authorizer_function_arn": authorizer_function_arn,
                "authorizer_name": custom_authorizer_name,
                "public_key": public_key,
                "token_key_name": token_key_name,
            },
        )
        endpoint_address = iot_endpoint.get_att(
            "BetaEndpointAddress").to_string()
        self._parameters_to_save[
            "iot_beta_endpoint_address"] = endpoint_address
    def __init__(self, scope: core.Construct, id: str, cluster: eks.Cluster,
                 kafka: msk.CfnCluster, vpc: ec2.Vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        pip.main([
            "install", "--system", "--target", "custom_resources/kafka/lib",
            "kafka-python"
        ])
        arn = cr.AwsCustomResource(
            self,
            'clusterArn',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=['*']),
            on_create=cr.AwsSdkCall(
                action='listClusters',
                service='Kafka',
                physical_resource_id=cr.PhysicalResourceId.of(
                    "ClusterNameFilter"),
                parameters={
                    "ClusterNameFilter": kafka.cluster_name,
                    "MaxResults": 1
                },
            ),
        )

        bootstraps = cr.AwsCustomResource(
            self,
            'clusterBootstraps',
            policy=cr.AwsCustomResourcePolicy.from_sdk_calls(resources=["*"]),
            on_create=cr.AwsSdkCall(
                action='getBootstrapBrokers',
                service='Kafka',
                physical_resource_id=cr.PhysicalResourceId.of("ClusterArn"),
                parameters={
                    "ClusterArn":
                    arn.get_response_field("ClusterInfoList.0.ClusterArn")
                },
            ),
        )

        manifests = []
        for namespace in self.node.try_get_context("kubernetes")['namespaces']:
            manifests.append({
                "apiVersion": "v1",
                "kind": "ConfigMap",
                "metadata": {
                    "name": "kafka",
                    "namespace": namespace
                },
                "data": {
                    "bootstrap":
                    bootstraps.get_response_field('BootstrapBrokerStringTls'),
                }
            })
        eks.KubernetesManifest(self,
                               "kafka-config",
                               cluster=cluster,
                               manifest=manifests)

        function = lbd.SingletonFunction(
            self,
            "KafkaConfigFunction",
            uuid="b09329a3-5206-46f7-822f-337da714aeac",
            code=lbd.Code.from_asset("custom_resources/kafka/"),
            handler="config.handler",
            runtime=lbd.Runtime.PYTHON_3_7,
            function_name="kafkaConfig",
            log_retention=logs.RetentionDays.ONE_DAY,
            security_group=ec2.SecurityGroup.from_security_group_id(
                self, "lambdaKafkaVPC", vpc.vpc_default_security_group),
            timeout=core.Duration.seconds(30),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(one_per_az=True))

        provider = cr.Provider(self,
                               "KafkaConfigProvider",
                               on_event_handler=function,
                               log_retention=logs.RetentionDays.ONE_DAY)

        core.CustomResource(
            self,
            "KafkaLoadTopic",
            service_token=provider.service_token,
            properties={
                "bootstrap":
                bootstraps.get_response_field('BootstrapBrokerStringTls'),
                "topic":
                "load",
                "partitions":
                150,
                "replicas":
                1
            })

        core.CustomResource(
            self,
            "KafkaGenerateTopic",
            service_token=provider.service_token,
            properties={
                "bootstrap":
                bootstraps.get_response_field('BootstrapBrokerStringTls'),
                "topic":
                "generate",
                "partitions":
                200,
                "replicas":
                1
            })
Exemple #25
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        policy_name: str,
        policy_document: Any,
        timeout: Duration = None
    ) -> None:
        super().__init__(scope, id)

        if type(policy_document) == dict:
            policy_document = json.dumps(policy_document)

        account_id = Stack.of(self).account
        region = Stack.of(self).region

        # IMPORTANT! Setting resources to the exact policy name is the most restrictive we can be, but this will cause issues 
        # When trying to update the policy name. 
        # See this issue for more info: https://github.com/aws/aws-cdk/issues/14037
        # A possible work around is setting resources to 'arn:aws:iot:{region}:{account_id}:policy/*', which is more permissive.
        lambda_role = iam.Role(
            scope=self,
            id=f'{id}LambdaRole',
            assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
            inline_policies={
                "IotPolicyProvisioningPolicy":
                    iam.PolicyDocument(statements=[
                        iam.PolicyStatement(
                            actions=[
                                "iot:ListPolicyVersions", "iot:CreatePolicy", "iot:CreatePolicyVersion", "iot:DeletePolicy",
                                "iot:DeletePolicyVersion", "iot:GetPolicy"
                            ],
                            resources=[f'arn:aws:iot:{region}:{account_id}:policy/{policy_name}'],
                            effect=iam.Effect.ALLOW,
                        )
                    ])
            },
            managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole")],
        )

        if not timeout:
            timeout = Duration.minutes(5)

        with open(path.join(path.dirname(__file__), 'iot_policy_event_handler.py')) as file:
            code = file.read()

        event_handler = aws_lambda.Function(
            scope=self,
            id=f'{id}EventHandler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_inline(code),
            handler='index.on_event',
            role=lambda_role,
            timeout=timeout,
        )

        with open(path.join(path.dirname(__file__), 'iot_policy_is_complete_handler.py')) as file:
            is_complete_code = file.read()

        is_complete_handler = aws_lambda.Function(
            scope=self,
            id=f'{id}IsCompleteHandler',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_inline(is_complete_code),
            handler='index.is_complete',
            role=lambda_role,
        )

        provider = Provider(
            scope=self, 
            id=f'{id}Provider', 
            on_event_handler=event_handler,
            is_complete_handler=is_complete_handler, 
            query_interval=Duration.minutes(2),
        )

        core.CustomResource(
            scope=self,
            id=f'{id}IotPolicy',
            service_token=provider.service_token,
            removal_policy=RemovalPolicy.DESTROY,
            resource_type="Custom::IotPolicyAsync",
            properties={
                "policy_name": policy_name,
                "policy_document": policy_document,
            },
        )
Exemple #26
0
    def __init__(self, scope: core.Construct, id: str, log_bucket: _s3.Bucket,
                 config_table: _dynamodb.Table, tshirt_size: str,
                 sink_bucket: _s3.Bucket, web_sale_stream: str,
                 web_customer_stream: str, web_customer_address_stream: str,
                 kinesis_key: _kms.Key, vpc: _ec2.Vpc, **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        stack = core.Stack.of(self)

        stream_source_bucket = AutoEmptyBucket(
            self,
            'StreamSource',
            bucket_name='ara-stream-source-' + core.Aws.ACCOUNT_ID,
            uuid='95505f50-0276-11eb-adc1-0242ac120002')

        service_role = _iam.Role(
            self,
            'StreamEmrServiceRole',
            assumed_by=_iam.ServicePrincipal('elasticmapreduce.amazonaws.com'))

        service_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'service-role/AmazonElasticMapReduceRole'))

        cluster_role = _iam.Role(
            self,
            'StreamEmrClusterRole',
            assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com"))

        _iam.Policy(
            self,
            'StreamEmrClusterPolicy',
            statements=[
                _iam.PolicyStatement(actions=[
                    "glue:CreateDatabase",
                    "glue:UpdateDatabase",
                    "glue:DeleteDatabase",
                    "glue:GetDatabase",
                    "glue:GetDatabases",
                    "glue:CreateTable",
                    "glue:UpdateTable",
                    "glue:DeleteTable",
                    "glue:GetTable",
                    "glue:GetTables",
                    "glue:GetTableVersions",
                    "glue:CreatePartition",
                    "glue:BatchCreatePartition",
                    "glue:UpdatePartition",
                    "glue:DeletePartition",
                    "glue:BatchDeletePartition",
                    "glue:GetPartition",
                    "glue:GetPartitions",
                    "glue:BatchGetPartition",
                    "glue:CreateUserDefinedFunction",
                    "glue:UpdateUserDefinedFunction",
                    "glue:DeleteUserDefinedFunction",
                    "glue:GetUserDefinedFunction",
                    "glue:GetUserDefinedFunctions",
                    "cloudwatch:PutMetricData",
                    "dynamodb:ListTables",
                    "s3:HeadBucket",
                    "ec2:Describe*",
                ],
                                     resources=['*']),
                _iam.PolicyStatement(
                    actions=['s3:GetObject'],
                    resources=[
                        'arn:aws:s3:::' + ARA_BUCKET_NAME + BINARIES +
                        DataGenConfig.DSDGEN_INSTALL_SCRIPT, 'arn:aws:s3:::' +
                        ARA_BUCKET_NAME + BINARIES + DataGenConfig.JAR_FILE
                    ]),
                _iam.PolicyStatement(
                    actions=['s3:PutObject'],
                    resources=[log_bucket.bucket_arn + "/data-generator/*"]),
                _iam.PolicyStatement(
                    actions=[
                        "s3:AbortMultipartUpload", "s3:CreateBucket",
                        "s3:DeleteObject", "s3:GetBucketVersioning",
                        "s3:GetObject", "s3:GetObjectTagging",
                        "s3:GetObjectVersion", "s3:ListBucket",
                        "s3:ListBucketMultipartUploads",
                        "s3:ListBucketVersions", "s3:ListMultipartUploadParts",
                        "s3:PutBucketVersioning", "s3:PutObject",
                        "s3:PutObjectTagging"
                    ],
                    resources=[
                        sink_bucket.bucket_arn + '/*', sink_bucket.bucket_arn,
                        stream_source_bucket.bucket.bucket_arn + '/*',
                        stream_source_bucket.bucket.bucket_arn
                    ])
            ],
            roles=[cluster_role])

        cluster_role.add_managed_policy(
            _iam.ManagedPolicy.from_aws_managed_policy_name(
                'AmazonSSMManagedInstanceCore'))

        _iam.CfnInstanceProfile(self,
                                'StreamEmrClusterInstanceProfile',
                                roles=[cluster_role.role_name],
                                instance_profile_name=cluster_role.role_name)

        # Security Groups for the EMR cluster (private subnet)
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-man-sec-groups.html#emr-sg-elasticmapreduce-master-private
        master_sg = _ec2.SecurityGroup(self,
                                       'ElasticMapReduce-Master-Private',
                                       vpc=vpc)
        slave_sg = _ec2.SecurityGroup(self,
                                      'ElasticMapReduce-Slave-Private',
                                      vpc=vpc)
        service_sg = _ec2.SecurityGroup(self,
                                        'ElasticMapReduce-ServiceAccess',
                                        vpc=vpc,
                                        allow_all_outbound=False)

        # Service SG used by the proxy instance
        service_sg.add_ingress_rule(master_sg, _ec2.Port.tcp(9443))
        service_sg.add_egress_rule(master_sg, _ec2.Port.tcp(8443))
        service_sg.add_egress_rule(slave_sg, _ec2.Port.tcp(8443))

        # EMR Master
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        master_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        master_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        # EMR Slave
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(master_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_icmp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_tcp())
        slave_sg.add_ingress_rule(slave_sg, _ec2.Port.all_udp())
        slave_sg.add_ingress_rule(service_sg, _ec2.Port.tcp(8443))

        with open('common/common_cdk/lambda/datagen_config.py', 'r') as f:
            lambda_source = f.read()

        configure_datagen_function = _lambda.SingletonFunction(
            self,
            'StreamConfigureDatagenLambda',
            uuid="a9904dec-01cf-11eb-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stream-datagen-config',
            environment={
                'TABLE_NAME': config_table.table_name,
                'JAR_LOCATION': BINARIES_LOCATION + DataGenConfig.JAR_FILE,
            },
            timeout=core.Duration.seconds(10))

        configure_datagen_function.role.add_to_policy(
            _iam.PolicyStatement(actions=[
                'dynamodb:GetItem',
                'dynamodb:PutItem',
            ],
                                 resources=[config_table.table_arn]))

        emr_cluster = _emr.CfnCluster(
            self,
            'StreamEmrCluster',
            name="StreamDatagenCluster",
            job_flow_role=cluster_role.role_name,
            service_role=service_role.role_name,
            release_label='emr-5.30.1',
            visible_to_all_users=True,
            log_uri=log_bucket.s3_url_for_object() + "/data-generator",
            applications=[
                _emr.CfnCluster.ApplicationProperty(name='hadoop'),
                _emr.CfnCluster.ApplicationProperty(name='spark')
            ],
            bootstrap_actions=[
                _emr.CfnCluster.BootstrapActionConfigProperty(
                    name="dsdgen-install",
                    script_bootstrap_action=_emr.CfnCluster.
                    ScriptBootstrapActionConfigProperty(
                        path=BINARIES_LOCATION +
                        DataGenConfig.DSDGEN_INSTALL_SCRIPT))
            ],
            instances=_emr.CfnCluster.JobFlowInstancesConfigProperty(
                emr_managed_master_security_group=master_sg.security_group_id,
                emr_managed_slave_security_group=slave_sg.security_group_id,
                service_access_security_group=service_sg.security_group_id,
                ec2_subnet_id=vpc.private_subnets[0].subnet_id,
                core_instance_group=_emr.CfnCluster.
                InstanceGroupConfigProperty(instance_count=DataGenConfig.
                                            BATCH_CLUSTER_SIZE[tshirt_size],
                                            instance_type='m5.xlarge'),
                master_instance_group=_emr.CfnCluster.
                InstanceGroupConfigProperty(instance_count=1,
                                            instance_type='m4.large')))

        configure_datagen = _sfn_tasks.LambdaInvoke(
            self,
            "ConfigureDatagenTask",
            lambda_function=configure_datagen_function,
            payload=_sfn.TaskInput.from_text(
                '{'
                '"Param": "stream_iterator",'
                '"Module": "stream",'
                '"SinkBucket": "' + sink_bucket.s3_url_for_object() + '",'
                '"Parallelism": "' +
                str(int(DataGenConfig.STREAM_DATA_SIZE[tshirt_size]) * 2) +
                '",'
                '"DataSize": "' + DataGenConfig.STREAM_DATA_SIZE[tshirt_size] +
                '",'
                '"TmpBucket": "' +
                str(stream_source_bucket.bucket.s3_url_for_object()) + '"'
                '}'),
            result_path='$.Config')

        add_datagen_step = _sfn.CustomState(
            self,
            'StreamAddDataGenStep',
            state_json={
                "Type": "Task",
                "Resource": "arn:aws:states:::elasticmapreduce:addStep.sync",
                "Parameters": {
                    "ClusterId.$": "$.Emr.Cluster.Id",
                    "Step": {
                        "Name": "DatagenStep",
                        "ActionOnFailure": "CONTINUE",
                        "HadoopJarStep": {
                            "Jar": "command-runner.jar",
                            "Args.$": "$.Config.Payload.StepParam"
                        }
                    }
                },
                "ResultPath": "$.Step",
                "Next": "StreamUpdateIterator"
            })

        update_iterator = _sfn_tasks.DynamoUpdateItem(
            self,
            'StreamUpdateIterator',
            table=config_table,
            key={
                'param':
                _sfn_tasks.DynamoAttributeValue.from_string('stream_iterator')
            },
            update_expression=
            'SET iterator = if_not_exists(iterator, :start) + :inc',
            expression_attribute_values={
                ":inc": _sfn_tasks.DynamoAttributeValue.from_number(1),
                ":start": _sfn_tasks.DynamoAttributeValue.from_number(0)
            },
            result_path=_sfn.JsonPath.DISCARD)

        definition = configure_datagen \
            .next(add_datagen_step) \
            .next(update_iterator)

        datagen_stepfunctions = _sfn.StateMachine(
            self,
            "StreamDataGenStepFunctions",
            definition=definition,
            timeout=core.Duration.minutes(30))

        datagen_stepfunctions.add_to_role_policy(
            _iam.PolicyStatement(actions=[
                'elasticmapreduce:AddJobFlowSteps',
                'elasticmapreduce:DescribeStep'
            ],
                                 resources=['*']))

        step_trigger = _events.Rule(self,
                                    'StreamStepTrigger',
                                    schedule=_events.Schedule.cron(
                                        minute='0/10',
                                        hour='*',
                                        month='*',
                                        week_day='*',
                                        year='*'))

        step_trigger.add_target(
            _events_targets.SfnStateMachine(
                machine=datagen_stepfunctions,
                input=_events.RuleTargetInput.from_object({
                    "Emr": {
                        "Cluster": {
                            "Id": core.Fn.ref(emr_cluster.logical_id)
                        }
                    }
                })))

        with open('common/common_cdk/lambda/stepfunctions_trigger.py',
                  'r') as f:
            lambda_source = f.read()

        stepfunctions_trigger_lambda = _lambda.SingletonFunction(
            self,
            'StreamStepFunctionsTriggerLambda',
            uuid="cf042246-01d0-11eb-adc1-0242ac120002",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.inline(lambda_source),
            handler='index.handler',
            function_name='stepfunctions-stream-datagen-trigger')

        stepfunctions_trigger_lambda.role.add_to_policy(
            _iam.PolicyStatement(actions=["states:StartExecution"],
                                 resources=['*']))

        trigger_step_lambda_provider = _custom_resources.Provider(
            self,
            'StreamStepFunctionsTriggerLambdaProvider',
            on_event_handler=stepfunctions_trigger_lambda)

        core.CustomResource(
            self,
            'StreamStepFunctionsTrigger',
            service_token=trigger_step_lambda_provider.service_token,
            properties={"stepArn": datagen_stepfunctions.state_machine_arn})

        with open('common/common_cdk/lambda/stream_generator.py', 'r') as f:
            lambda_source = f.read()

        sale_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebSaleStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_sale_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(sale_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='sale', suffix='csv'))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=["kinesis:PutRecords"],
                                 resources=[
                                     stack.format_arn(
                                         service='kinesis',
                                         resource='stream',
                                         resource_name=web_sale_stream)
                                 ]))

        sale_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))

        customer_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebCustomerStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_customer_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(
                customer_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='customer', suffix='csv'))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=["kinesis:PutRecords"],
                                 resources=[
                                     stack.format_arn(
                                         service='kinesis',
                                         resource='stream',
                                         resource_name=web_customer_stream)
                                 ]))

        customer_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))

        address_stream_generator_lambda = _lambda.Function(
            scope=self,
            id='WebCustomerAddressStreamGenerator',
            runtime=_lambda.Runtime.PYTHON_3_7,
            memory_size=2048,
            timeout=core.Duration.minutes(15),
            code=_lambda.Code.inline(lambda_source),
            handler='index.lambda_handler',
            environment={
                'REGION': core.Aws.REGION,
                'STREAM_NAME': web_customer_address_stream
            })

        stream_source_bucket.bucket.add_event_notification(
            _s3.EventType.OBJECT_CREATED,
            _s3_notifications.LambdaDestination(
                address_stream_generator_lambda),
            _s3.NotificationKeyFilter(prefix='address', suffix='csv'))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=[
                    "s3:DeleteObject",
                    "s3:GetObject",
                    "s3:ListBucket",
                ],
                resources=[
                    stream_source_bucket.bucket.bucket_arn + '/*',
                    stream_source_bucket.bucket.bucket_arn
                ]))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(
                actions=["kinesis:PutRecords"],
                resources=[
                    stack.format_arn(service='kinesis',
                                     resource='stream',
                                     resource_name=web_customer_address_stream)
                ]))

        address_stream_generator_lambda.add_to_role_policy(
            _iam.PolicyStatement(actions=['kms:GenerateDataKey'],
                                 resources=[
                                     stack.format_arn(
                                         service='kms',
                                         resource='key',
                                         sep='/',
                                         resource_name=kinesis_key.key_id)
                                 ]))
	def __init__(self, scope: core.Construct, id: str, elastic: Elastic, vpc: ec2.Vpc, roles: list, cluster: eks.Cluster, **kwargs) -> None:
		super().__init__(scope, id, **kwargs)

		sm_policy = iam.PolicyStatement(
			actions=["secretsmanager:GetSecretValue"], 
			effect=iam.Effect.ALLOW, 
			resources=[elastic.secret.secret_arn]
		)

		es_policy = iam.PolicyStatement(
			actions=["es:DescribeElasticsearchDomain"], 
			effect=iam.Effect.ALLOW, 
			resources=[elastic.domain.domain_arn]
		)

		function = lbd.SingletonFunction(
			self,
			"ElasticsearchConfigFunction",
			uuid="e579d5f9-1709-43ea-b75f-9d1452ca7690",
			code=lbd.Code.from_asset(
				"custom_resources/elasticsearch/"
			),
			handler="config.handler",
			runtime=lbd.Runtime.PYTHON_3_7,
			function_name="elasticsearchConfig",
			initial_policy=[sm_policy,es_policy],
			log_retention=logs.RetentionDays.ONE_DAY,
			security_group=ec2.SecurityGroup.from_security_group_id(self, "lambdaVPC", vpc.vpc_default_security_group),
			timeout=core.Duration.seconds(30),
			vpc=vpc,
			vpc_subnets=ec2.SubnetSelection(
				one_per_az=True
			)
		)

		provider = cr.Provider(
			self, "ElasticsearchConfigProvider",
    	on_event_handler=function,
    	log_retention=logs.RetentionDays.ONE_DAY
		)

		core.CustomResource(
			self, "ElasticSearchConfig", 
			service_token=provider.service_token,
			properties={
				"domain": elastic.domain.domain_name,
				"secret": elastic.secret.secret_arn,
				"roles": [role.role_arn for role in roles],
				"shards": self.node.try_get_context("elastic")['shards'],
				"user": boto3.client('sts').get_caller_identity().get('Arn'),
				"replicas": self.node.try_get_context("elastic")['replicas']
			}
		)

		manifests = []
		for namespace in self.node.try_get_context("kubernetes")['namespaces']:
			manifests.append({
				"apiVersion": "v1",
				"kind": "ConfigMap",
				"metadata": {
					"name": "elasticsearch",
					"namespace": namespace
				},
				"data": {
					"url": elastic.domain.domain_endpoint
				}
			})
		eks.KubernetesManifest(
			self, 
			"elastic-search-cm", 
			cluster=cluster,
			manifest=manifests
		)		
	def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
		super().__init__(scope, id, **kwargs)

	# CloudFormation Parameters

		glue_db_name = core.CfnParameter(self, "GlueDatabaseNameNycTlc", 
				type="String",
				description="Name of Glue Database to be created for NYC TLC.",
				allowed_pattern="[\w-]+",
				default = "nyc_tlc_db"
			)

		glue_table_name = core.CfnParameter(self, "GlueTableNameNycTlc", 
				type="String",
				description="Name of Glue Table to be created for NYC TLC.",
				allowed_pattern="[\w-]+",
				default = "nyc_tlc_table"
			)

		self.template_options.description = "\
This template deploys the dataset containing New York City Taxi and Limousine Commission (TLC) Trip Record Data.\n \
Sample data is copied from the public dataset into a local S3 bucket, a database and table are created in AWS Glue, \
and the S3 location is registered with AWS Lake Formation."

		self.template_options.metadata = {
			"AWS::CloudFormation::Interface": {
				"License": "MIT-0"
			}
		}
	# Create S3 bucket for storing a copy of the Dataset locally in the AWS Account

		local_dataset_bucket = s3.Bucket(self, "LocalNycTlcBucket",
			block_public_access = s3.BlockPublicAccess(
				block_public_acls=True, 
				block_public_policy=True, 
				ignore_public_acls=True, 
				restrict_public_buckets=True),
			removal_policy = core.RemovalPolicy.DESTROY)

		public_dataset_bucket = s3.Bucket.from_bucket_arn(self, "PublicDatasetBucket", BUCKET_ARN)

		with open("lambda/s3_copy.py", encoding="utf8") as fp:
			s3_copy_code = fp.read()

		s3_copy_execution_role = iam.Role(self, "S3CopyHandlerServiceRole",
			assumed_by = iam.ServicePrincipal('lambda.amazonaws.com'),
			managed_policies = [
				iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
			],
			inline_policies = { "S3CopyHandlerRoleInlinePolicy" : iam.PolicyDocument( 
				statements = [
					iam.PolicyStatement(
						effect=iam.Effect.ALLOW,
						actions=[
							"s3:Get*"
						],
						resources=[
							public_dataset_bucket.bucket_arn,
							public_dataset_bucket.arn_for_objects("*")
						]),
					iam.PolicyStatement(
						effect=iam.Effect.ALLOW,
						actions=[
							"s3:PutObject",
							"s3:GetObject",
							"s3:DeleteObject"
						],
						resources=[local_dataset_bucket.arn_for_objects("*")]
						)
					]
				) }
			)

		s3_copy_fn = _lambda.Function(self, "S3CopyHandler", 
			runtime = _lambda.Runtime.PYTHON_3_7,
			code = _lambda.InlineCode.from_inline(s3_copy_code),
			handler = "index.handler",
			role =  s3_copy_execution_role,
			timeout = core.Duration.seconds(600)
		)

		s3_copy = core.CustomResource(self, "S3Copy", 
			service_token = s3_copy_fn.function_arn,
			resource_type = "Custom::S3Copy",
			properties = {
				"PublicDatasetBucket": public_dataset_bucket.bucket_name,
				"LocalDatasetBucket" : local_dataset_bucket.bucket_name,
				"PublicDatasetObject": OBJECT,
				"LocalDatasetPrefix": glue_table_name.value_as_string
			} 
		)	

	# Create Database, Table and Partitions for Amazon Reviews

		lakeformation_resource = lf.CfnResource(self, "LakeFormationResource", 
			resource_arn = local_dataset_bucket.bucket_arn, 
			use_service_linked_role = True)

		lakeformation_resource.node.add_dependency(s3_copy)

		cfn_glue_db = glue.CfnDatabase(self, "GlueDatabase", 
			catalog_id = core.Aws.ACCOUNT_ID,
			database_input = glue.CfnDatabase.DatabaseInputProperty(
				name = glue_db_name.value_as_string, 
				location_uri=local_dataset_bucket.s3_url_for_object(),
			)
		)

		nyc_tlc_table = glue.CfnTable(self, "GlueTableNycTlc", 
			catalog_id = cfn_glue_db.catalog_id,
			database_name = glue_db_name.value_as_string,
			table_input = glue.CfnTable.TableInputProperty(
				description = "New York City Taxi and Limousine Commission (TLC) Trip Record Data",
				name = glue_table_name.value_as_string,
				parameters = {
					"skip.header.line.count": "1",
					"compressionType": "none",
					"classification": "csv",
					"delimiter": ",",
					"typeOfData": "file"
				},
				storage_descriptor = glue.CfnTable.StorageDescriptorProperty(
					columns = [
						{"name":"vendorid","type":"bigint"},
						{"name":"lpep_pickup_datetime","type":"string"},
						{"name":"lpep_dropoff_datetime","type":"string"},
						{"name":"store_and_fwd_flag","type":"string"},
						{"name":"ratecodeid","type":"bigint"},
						{"name":"pulocationid","type":"bigint"},
						{"name":"dolocationid","type":"bigint"},
						{"name":"passenger_count","type":"bigint"},
						{"name":"trip_distance","type":"double"},
						{"name":"fare_amount","type":"double"},
						{"name":"extra","type":"double"},
						{"name":"mta_tax","type":"double"},
						{"name":"tip_amount","type":"double"},
						{"name":"tolls_amount","type":"double"},
						{"name":"ehail_fee","type":"string"},
						{"name":"improvement_surcharge","type":"double"},
						{"name":"total_amount","type":"double"},
						{"name":"payment_type","type":"bigint"},
						{"name":"trip_type","type":"bigint"},
						{"name":"congestion_surcharge","type":"double"}],
					location = local_dataset_bucket.s3_url_for_object() + "/" + glue_table_name.value_as_string + "/",
					input_format = "org.apache.hadoop.mapred.TextInputFormat",
					output_format = "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",
					compressed = False,
					serde_info = glue.CfnTable.SerdeInfoProperty( 
						serialization_library = "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
						parameters = {
							"field.delim": ","
						}
					)
				),
				table_type = "EXTERNAL_TABLE"
			)
		)

		nyc_tlc_table.node.add_dependency(cfn_glue_db)

		core.CfnOutput(self, "LocalNycTlcBucketOutput", 
			value=local_dataset_bucket.bucket_name, 
			description="S3 Bucket created to store the dataset")

		core.CfnOutput(self, "GlueDatabaseOutput", 
			value=cfn_glue_db.ref, 
			description="Glue DB created to host the dataset table")

		core.CfnOutput(self, "GlueTableNycTlcOutput", 
			value=nyc_tlc_table.ref, 
			description="Glue Table created to host the dataset")
Exemple #29
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # CloudFormation Parameters

        glue_db_name = core.CfnParameter(
            self,
            "GlueDatabaseName",
            type="String",
            description="Glue Database where the Table belongs.",
            allowed_pattern="[\w-]+",
        )

        glue_table_name = core.CfnParameter(
            self,
            "GlueTableName",
            type="String",
            description="Glue Table where access will be granted.",
            allowed_pattern="[\w-]+",
        )

        grantee_role_arn = core.CfnParameter(
            self,
            "GranteeIAMRoleARN",
            type="String",
            description="IAM Role's ARN.",
            allowed_pattern=
            "arn:(aws[a-zA-Z-]*)?:iam::\d{12}:role\/?[a-zA-Z0-9_+=,.@\-]+")

        grantee_vpc = core.CfnParameter(
            self,
            "GranteeVPC",
            type="String",
            description=
            "VPC ID from where the S3 access point will be accessed.",
            allowed_pattern="vpc-[a-zA-Z0-9]+")

        is_lakeformation = core.CfnParameter(
            self,
            "LakeFormationParam",
            type="String",
            description=
            "If Lake Formation is used, the stack must be deployed using an IAM role with Lake Formation Admin permissions.",
            allowed_values=["Yes", "No"])

        # CloudFormation Parameter Groups

        self.template_options.description = "\
This template deploys an S3 Access Point which provides a given IAM Role \
access to the underlying data location for a given Glue Table.\n\
Main use case for this template is to grant an ETL process in another AWS Account, \
access to the S3 objects (e.g., Parquet files) associated to a Glue Table."

        self.template_options.metadata = {
            "AWS::CloudFormation::Interface": {
                "License":
                "MIT-0",
                "ParameterGroups": [{
                    "Label": {
                        "default": "Lake Formation (Producer Account)"
                    },
                    "Parameters": [is_lakeformation.logical_id]
                }, {
                    "Label": {
                        "default":
                        "Source Data Catalog Resource (Producer Account)"
                    },
                    "Parameters":
                    [glue_db_name.logical_id, glue_table_name.logical_id]
                }, {
                    "Label": {
                        "default": "Grantee IAM Role (Consumer Account)"
                    },
                    "Parameters":
                    [grantee_role_arn.logical_id, grantee_vpc.logical_id]
                }],
                "ParameterLabels": {
                    is_lakeformation.logical_id: {
                        "default":
                        "Are data permissions managed by Lake Formation?"
                    },
                    glue_db_name.logical_id: {
                        "default": "What is the Glue DB Name for the Table?"
                    },
                    glue_table_name.logical_id: {
                        "default": "What is the Glue Table Name?"
                    },
                    grantee_role_arn.logical_id: {
                        "default": "What is the ARN of the IAM Role?"
                    },
                    grantee_vpc.logical_id: {
                        "default":
                        "What VPC will be used to access the S3 Access Point?"
                    }
                }
            }
        }

        is_lakeformation_condition = core.CfnCondition(
            self,
            "IsLakeFormation",
            expression=core.Fn.condition_equals("Yes", is_lakeformation))

        # Create S3 Access Point to share dataset objects

        grantee_role = iam.Role.from_role_arn(self, "GranteeIAMRole",
                                              grantee_role_arn.value_as_string)

        glue_table_arn = f"arn:aws:glue:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:table/{glue_db_name.value_as_string}/{glue_table_name.value_as_string}"

        glue_table = glue.Table.from_table_arn(self,
                                               "GlueTable",
                                               table_arn=glue_table_arn)

        # Invoke Lambda to obtain S3 bucket and S3 prefix from Glue Table

        get_s3_from_table_execution_role = iam.Role(
            self,
            "GetS3FromTableServiceRole",
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSLambdaBasicExecutionRole"),
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "service-role/AWSGlueServiceRole")
            ])

        lf_permission = lf.CfnPermissions(
            self,
            "LFPermissionForLambda",
            data_lake_principal=lf.CfnPermissions.DataLakePrincipalProperty(
                data_lake_principal_identifier=get_s3_from_table_execution_role
                .role_arn),
            resource=lf.CfnPermissions.ResourceProperty(
                table_resource=lf.CfnPermissions.TableResourceProperty(
                    name=glue_table_name.value_as_string,
                    database_name=glue_db_name.value_as_string)),
            permissions=["DESCRIBE"])

        lf_permission.apply_removal_policy(core.RemovalPolicy.DESTROY,
                                           apply_to_update_replace_policy=True)
        lf_permission.node.add_dependency(get_s3_from_table_execution_role)
        lf_permission.cfn_options.condition = is_lakeformation_condition

        lf_wait_condition_handle = cfn.CfnWaitConditionHandle(
            self, "LFWaitConditionHandle")
        lf_wait_condition_handle.add_metadata(
            "WaitForLFPermissionIfExists",
            core.Fn.condition_if(is_lakeformation_condition.logical_id,
                                 lf_permission.logical_id, ""))

        with open("lambda/get_s3_from_table.py", encoding="utf8") as fp:
            get_s3_from_table_code = fp.read()

        get_s3_from_table_fn = _lambda.Function(
            self,
            "GetS3FromTableHandler",
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.InlineCode.from_inline(get_s3_from_table_code),
            handler="index.handler",
            role=get_s3_from_table_execution_role,
            timeout=core.Duration.seconds(600))

        get_s3_from_table = core.CustomResource(
            self,
            "GetS3FromTable",
            service_token=get_s3_from_table_fn.function_arn,
            resource_type="Custom::GetS3FromTable",
            properties={
                "GlueDatabase": glue_db_name.value_as_string,
                "GlueTable": glue_table_name.value_as_string
            })

        get_s3_from_table.node.add_dependency(lf_wait_condition_handle)

        table_bucket = get_s3_from_table.get_att_string("TableBucket")
        table_prefix = get_s3_from_table.get_att_string("TablePrefix")

        # Create S3 Access Point

        table_name_normalized = core.Fn.join(
            "-", core.Fn.split("_", glue_table_name.value_as_string))
        random_suffix = core.Fn.select(
            0,
            core.Fn.split(
                "-", core.Fn.select(2, core.Fn.split("/", core.Aws.STACK_ID))))

        s3_accesspoint_name = f"{table_name_normalized}-{random_suffix}"

        s3_accesspoint_arn = f"arn:aws:s3:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:accesspoint/{s3_accesspoint_name}"

        glue_table_accesspoint_path = f"{s3_accesspoint_arn}/object/{table_prefix}"

        # s3_accesspoint_block_config = s3.CfnAccessPoint.PublicAccessBlockConfigurationProperty(block_public_acls=True, block_public_policy=True, ignore_public_acls=True, restrict_public_buckets=True)

        s3_accesspoint_policy = iam.PolicyDocument(statements=[
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                principals=[iam.ArnPrincipal(arn=grantee_role.role_arn)],
                actions=["s3:GetObject*"],
                resources=[f"{glue_table_accesspoint_path}*"]),
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                principals=[iam.ArnPrincipal(arn=grantee_role.role_arn)],
                actions=["s3:ListBucket*"],
                resources=[s3_accesspoint_arn],
                conditions={"StringLike": {
                    "s3:prefix": f"{table_prefix}*"
                }})
        ])

        s3_accesspoint = s3.CfnAccessPoint(
            self,
            "S3AccessPoint",
            bucket=f"{table_bucket}",
            name=s3_accesspoint_name,
            # network_origin = "Internet",
            policy=s3_accesspoint_policy,
            vpc_configuration=s3.CfnAccessPoint.VpcConfigurationProperty(
                vpc_id=grantee_vpc.value_as_string))

        glue_table_accesspoint_path_output = f"arn:aws:s3:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:accesspoint/{s3_accesspoint.name}/object/{table_prefix}"

        # Output

        core.CfnOutput(self,
                       "IAMRoleArnOutput",
                       value=grantee_role.role_arn,
                       description="IAM Role Arn")

        core.CfnOutput(self,
                       "GlueTableOutput",
                       value=glue_table.table_arn,
                       description="Glue Table ARN")

        core.CfnOutput(self,
                       "S3AccessPointPathOutput",
                       value=glue_table_accesspoint_path_output,
                       description="S3 Access Point Path for Glue Table")
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create bucket and upload scrips 
        bucket = s3.Bucket(self, "ScriptBucket")

        self.script_bucket = bucket

        s3deploy.BucketDeployment(self, "UploadScriptsToBucket",
            sources=[s3deploy.Source.asset(os.path.join(dirname, "scripts"))],
            destination_bucket=bucket
        )

        # Greengrass Core Thing policy
        greengrass_core_policy = iot.CfnPolicy(self,
            'GreenGrassCorePolicy',
            policy_name='greengrass-demo-policy',
            policy_document={
                "Version": "2012-10-17",
                "Statement": [
                    {
                        "Effect": "Allow",
                        "Action": [
                            "iot:Publish",
                            "iot:Subscribe",
                            "iot:Connect",
                            "iot:Receive"
                        ],
                        "Resource": [
                            "*"
                        ]
                    },
                    {
                        "Effect": "Allow",
                        "Action": [
                            "iot:GetThingShadow",
                            "iot:UpdateThingShadow",
                            "iot:DeleteThingShadow"
                        ],
                        "Resource": [
                            "*"
                        ]
                    },
                    {
                        "Effect": "Allow",
                        "Action": [
                            "greengrass:*"
                        ],
                        "Resource": [
                            "*"
                        ]
                    }
                ]
            }
        )

        self.core_policy_name = greengrass_core_policy.policy_name

        # Create a Greengrass group role
        greengrass_group_role = iam.Role(self, "GroupRole",
            assumed_by=iam.ServicePrincipal("greengrass.amazonaws.com")
        )
        greengrass_group_role.add_to_policy(iam.PolicyStatement(
            resources=["arn:aws:logs:*:*:*"],
            actions=[
                "logs:CreateLogGroup",
                "logs:CreateLogStream",
                "logs:PutLogEvents"
            ]
        ))
        greengrass_group_role.add_to_policy(iam.PolicyStatement(
            resources=["*"],
            actions=["iot:*"]
        ))
     
        self.greengrass_group_role_arn = greengrass_group_role.role_arn
        
        # A custom resource to verify that there is a service role for greengrass on the account 
        greengrass_mgmt_function = awslambda.SingletonFunction(
            self,
            "MgmttHandler",
            uuid="58854ea2-0624-4ca5-b600-fa88d4b9164e",
            runtime=awslambda.Runtime.PYTHON_3_7,
            code=awslambda.Code.asset("custom_resources"),
            handler="greengrassmgmt.handler",
        )

        greengrass_mgmt_function.add_to_role_policy(
            iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        'greengrass:*',
                        'iot:*',
                        'iam:CreateRole',
                        'iam:AttachRolePolicy',
                        'iam:PassRole'
                    ],
                    resources=['*']
                )
        )

        greengrass_mgmt_provider = cust_resource.Provider(self, "MgmtProvider",
            on_event_handler=greengrass_mgmt_function
        )

        core.CustomResource(self, "MgmtCustResource", 
            service_token=greengrass_mgmt_provider.service_token
        )