def configure_inference(scope, blueprint_bucket):
    """
    configure_inference updates inference lambda function's environment variables and puts the value
    for Sagemaker endpoint URI as a lambda invoked codepipeline action

    :scope: CDK Construct scope that's needed to create CDK resources
    :blueprint_bucket: CDK object of the blueprint bucket that contains resources for BYOM pipeline
    :is_realtime_inference: a CDK CfnCondition object that says if inference type is realtime or not
    :return: codepipeline action in a form of a CDK object that can be attached to a codepipeline stage
    """
    # provision api gateway and lambda for inference using solution constructs
    inference_api_gateway = aws_apigateway_lambda.ApiGatewayToLambda(
        scope,
        "BYOMInference",
        lambda_function_props={
            "runtime": lambda_.Runtime.PYTHON_3_8,
            "handler": "main.handler",
            "code": lambda_.Code.from_bucket(blueprint_bucket, "blueprints/byom/lambdas/inference.zip"),
        },
        api_gateway_props={
            "defaultMethodOptions": {
                "authorizationType": apigw.AuthorizationType.IAM,
            },
            "restApiName": f"{core.Aws.STACK_NAME}-inference",
            "proxy": False,
        },
    )

    provision_resource = inference_api_gateway.api_gateway.root.add_resource("inference")
    provision_resource.add_method("POST")
    inference_api_gateway.lambda_function.add_to_role_policy(
        iam.PolicyStatement(
            actions=[
                "sagemaker:InvokeEndpoint",
            ],
            resources=[
                f"arn:{core.Aws.PARTITION}:sagemaker:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:endpoint/*",
            ],
        )
    )

    # lambda function that gets invoked from codepipeline
    configure_inference_lambda = lambda_.Function(
        scope,
        "configure_inference_lambda",
        runtime=lambda_.Runtime.PYTHON_3_8,
        handler="main.handler",
        code=lambda_.Code.from_bucket(blueprint_bucket, "blueprints/byom/lambdas/configure_inference_lambda.zip"),
        environment={
            "inference_lambda_arn": inference_api_gateway.lambda_function.function_arn,
            "LOG_LEVEL": "INFO",
        },
    )
    configure_inference_lambda.node.default_child.cfn_options.metadata = suppress_cloudwatch_policy()
    # iam permissions to respond to codepipeline and update inference lambda
    configure_inference_lambda.add_to_role_policy(
        iam.PolicyStatement(
            actions=[
                "lambda:UpdateFunctionConfiguration",
            ],
            resources=[inference_api_gateway.lambda_function.function_arn],
        )
    )
    configure_inference_lambda.add_to_role_policy(codepipeline_policy())

    role_child_nodes = configure_inference_lambda.role.node.find_all()
    role_child_nodes[2].node.default_child.cfn_options.metadata = {
        "cfn_nag": {
            "rules_to_suppress": [
                {
                    "id": "W12",
                    "reason": (
                        "The codepipeline permissions PutJobSuccessResult and PutJobFailureResult "
                        "are not able to be bound to resources."
                    ),
                }
            ]
        }
    }
    # configuring codepipeline action to invoke the lambda
    configure_inference_action = codepipeline_actions.LambdaInvokeAction(
        action_name="configure_inference_lambda",
        inputs=[],
        outputs=[],
        # passing the parameter from the last stage in pipeline
        user_parameters=[{"endpointName": "#{sagemaker_endpoint.endpointName}"}],
        lambda_=configure_inference_lambda,
    )

    return (configure_inference_lambda.function_arn, configure_inference_action)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get stack parameters: email and repo address
        notification_email = core.CfnParameter(
            self,
            "Email Address",
            type="String",
            description=
            "Specify an email to receive notifications about pipeline outcomes.",
            allowed_pattern='^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$',
            min_length=5,
            max_length=320,
            constraint_description=
            "Please enter an email address with correct format ([email protected])"
        )
        git_address = core.CfnParameter(
            self,
            "CodeCommit Repo Address",
            type="String",
            description=
            "AWS CodeCommit repository clone URL to connect to the framework.",
            allowed_pattern=
            '^(((https:\/\/|ssh:\/\/)(git\-codecommit)\.[a-zA-Z0-9_.+-]+(amazonaws\.com\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+$)|)',
            min_length=0,
            max_length=320,
            constraint_description=
            "CodeCommit address must follow the pattern: ssh or https://git-codecommit.REGION.amazonaws.com/version/repos/REPONAME"
        )

        # Conditions
        git_address_provided = core.CfnCondition(
            self,
            "GitAddressProvided",
            expression=core.Fn.condition_not(
                core.Fn.condition_equals(git_address, "")),
        )

        # Constants
        pipeline_stack_name = "MLOps-pipeline"

        # CDK Resources setup
        access_logs_bucket = s3.Bucket(
            self,
            "accessLogs",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)
        access_logs_bucket.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W35",
                        "reason": "This is the access bucket."
                    },
                    {
                        "id": "W51",
                        "reason":
                        "This S3 bucket does not need a bucket policy.",
                    },
                ]
            }
        }
        source_bucket = s3.Bucket.from_bucket_name(self, "BucketByName",
                                                   "%%BUCKET_NAME%%")

        blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4())
        blueprint_repository_bucket = s3.Bucket(
            self,
            blueprints_bucket_name,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix=blueprints_bucket_name,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL)
        blueprint_repository_bucket.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W51",
                    "reason":
                    "This S3 bucket does not need a bucket policy. All access to this bucket is restricted by IAM (CDK grant_read method)",
                }]
            }
        }

        # Custom resource to copy source bucket content to blueprints bucket
        custom_resource_lambda_fn = lambda_.Function(
            self,
            "CustomResourceLambda",
            code=lambda_.Code.from_asset("lambdas/custom_resource"),
            handler="index.on_event",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                "source_bucket": "https://%%BUCKET_NAME%%-" + core.Aws.REGION +
                ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%",
                "destination_bucket": blueprint_repository_bucket.bucket_name,
                "LOG_LEVEL": "INFO",
            },
            timeout=core.Duration.seconds(60),
        )
        custom_resource_lambda_fn.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W58",
                    "reason":
                    "The lambda functions role already has permissions to write cloudwatch logs",
                }]
            }
        }
        blueprint_repository_bucket.grant_write(custom_resource_lambda_fn)
        custom_resource = core.CustomResource(
            self,
            "CustomResourceCopyAssets",
            service_token=custom_resource_lambda_fn.function_arn,
        )
        custom_resource.node.add_dependency(blueprint_repository_bucket)
        ### IAM policies setup ###
        cloudformation_role = iam.Role(
            self,
            "mlopscloudformationrole",
            assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"),
        )
        # Cloudformation policy setup
        orchestrator_policy = iam.Policy(
            self,
            "lambdaOrchestratorPolicy",
            statements=[
                iam.PolicyStatement(
                    actions=[
                        "cloudformation:CreateStack",
                        "cloudformation:DeleteStack",
                        "cloudformation:UpdateStack",
                        "cloudformation:ListStackResources",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "iam:CreateRole",
                        "iam:DeleteRole",
                        "iam:DeleteRolePolicy",
                        "iam:GetRole",
                        "iam:GetRolePolicy",
                        "iam:PassRole",
                        "iam:PutRolePolicy",
                        "iam:AttachRolePolicy",
                        "iam:DetachRolePolicy",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*"
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "ecr:CreateRepository",
                        "ecr:DeleteRepository",
                        "ecr:DescribeRepositories",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:repository/awsmlopsmodels*"
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codebuild:CreateProject",
                        "codebuild:DeleteProject",
                        "codebuild:BatchGetProjects",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:project/ContainerFactory*",
                        f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*",
                        f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:report-group/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "lambda:CreateFunction",
                        "lambda:DeleteFunction",
                        "lambda:InvokeFunction",
                        "lambda:PublishLayerVersion",
                        "lambda:DeleteLayerVersion",
                        "lambda:GetLayerVersion",
                        "lambda:GetFunctionConfiguration",
                        "lambda:GetFunction",
                        "lambda:AddPermission",
                        "lambda:RemovePermission",
                        "lambda:UpdateFunctionConfiguration",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*",
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=["s3:GetObject"],
                    resources=[
                        blueprint_repository_bucket.bucket_arn,
                        blueprint_repository_bucket.arn_for_objects("*"),
                        f"arn:{core.Aws.PARTITION}:s3:::pipeline-assets-*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codepipeline:CreatePipeline",
                        "codepipeline:DeletePipeline",
                        "codepipeline:GetPipeline",
                        "codepipeline:GetPipelineState",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*"
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "apigateway:POST",
                        "apigateway:PATCH",
                        "apigateway:DELETE",
                        "apigateway:GET",
                        "apigateway:PUT",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "logs:CreateLogGroup",
                        "logs:DescribeLogGroups",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "s3:CreateBucket",
                        "s3:PutEncryptionConfiguration",
                        "s3:PutBucketVersioning",
                        "s3:PutBucketPublicAccessBlock",
                        "s3:PutBucketLogging",
                    ],
                    resources=["arn:" + core.Aws.PARTITION + ":s3:::*"],
                ),
                iam.PolicyStatement(
                    actions=[
                        "sns:CreateTopic",
                        "sns:DeleteTopic",
                        "sns:Subscribe",
                        "sns:Unsubscribe",
                        "sns:GetTopicAttributes",
                        "sns:SetTopicAttributes",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*-PipelineNotification*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "events:PutRule",
                        "events:DescribeRule",
                        "events:PutTargets",
                        "events:RemoveTargets",
                        "events:DeleteRule",
                        "events:PutEvents",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*",
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                    ],
                ),
            ],
        )
        orchestrator_policy.attach_to_role(cloudformation_role)

        # Lambda function IAM setup
        lambda_passrole_policy = iam.PolicyStatement(
            actions=["iam:passrole"], resources=[cloudformation_role.role_arn])
        # API Gateway and lambda setup to enable provisioning pipelines through API calls
        provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda(
            self,
            "PipelineOrchestration",
            lambda_function_props={
                "runtime": lambda_.Runtime.PYTHON_3_8,
                "handler": "index.handler",
                "code":
                lambda_.Code.from_asset("lambdas/pipeline_orchestration"),
            },
            api_gateway_props={
                "defaultMethodOptions": {
                    "authorizationType": apigw.AuthorizationType.IAM,
                },
                "restApiName": f"{core.Aws.STACK_NAME}-orchestrator",
                "proxy": False
            },
        )
        provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource(
            'provisionpipeline')
        provision_resource.add_method('POST')
        status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource(
            'pipelinestatus')
        status_resource.add_method('POST')
        blueprint_repository_bucket.grant_read(
            provisioner_apigw_lambda.lambda_function)
        provisioner_apigw_lambda.lambda_function.add_to_role_policy(
            lambda_passrole_policy)
        orchestrator_policy.attach_to_role(
            provisioner_apigw_lambda.lambda_function.role)
        provisioner_apigw_lambda.lambda_function.add_to_role_policy(
            iam.PolicyStatement(actions=["xray:PutTraceSegments"],
                                resources=["*"]))
        lambda_node = provisioner_apigw_lambda.lambda_function.node.default_child
        lambda_node.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W12",
                    "reason":
                    "The xray permissions PutTraceSegments is not able to be bound to resources.",
                }]
            }
        }
        # Environment variables setup
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET_URL",
            value=str(blueprint_repository_bucket.bucket_regional_domain_name),
        )
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET",
            value=str(blueprint_repository_bucket.bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="PIPELINE_STACK_NAME", value=pipeline_stack_name)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="NOTIFICATION_EMAIL", value=notification_email.value_as_string)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="LOG_LEVEL", value="DEBUG")
        cfn_policy_for_lambda = orchestrator_policy.node.default_child
        cfn_policy_for_lambda.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W76",
                    "reason":
                    "A complex IAM policy is required for this resource.",
                }]
            }
        }

        ### Codepipeline with Git source definitions ###
        source_output = codepipeline.Artifact()
        # processing git_address to retrieve repo name
        repo_name_split = core.Fn.split("/", git_address.value_as_string)
        repo_name = core.Fn.select(5, repo_name_split)
        # getting codecommit repo cdk object using 'from_repository_name'
        repo = codecommit.Repository.from_repository_name(
            self, "AWSMLOpsFrameworkRepository", repo_name)
        codebuild_project = codebuild.PipelineProject(
            self,
            "Take config file",
            build_spec=codebuild.BuildSpec.from_object({
                "version": "0.2",
                "phases": {
                    "build": {
                        "commands": [
                            "ls -a",
                            "aws lambda invoke --function-name " +
                            provisioner_apigw_lambda.lambda_function.
                            function_name +
                            " --payload fileb://mlops-config.json response.json"
                            + " --invocation-type RequestResponse",
                        ]
                    }
                },
            }),
        )
        # Defining a Codepipeline project with CodeCommit as source
        codecommit_pipeline = codepipeline.Pipeline(
            self,
            "MLOpsCodeCommitPipeline",
            stages=[
                codepipeline.StageProps(
                    stage_name="Source",
                    actions=[
                        codepipeline_actions.CodeCommitSourceAction(
                            action_name="CodeCommit",
                            repository=repo,
                            output=source_output,
                        )
                    ],
                ),
                codepipeline.StageProps(
                    stage_name="TakeConfig",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            action_name="provision_pipeline",
                            input=source_output,
                            outputs=[],
                            project=codebuild_project,
                        )
                    ],
                ),
            ],
            cross_account_keys=False,
        )
        codecommit_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["lambda:InvokeFunction"],
                resources=[
                    provisioner_apigw_lambda.lambda_function.function_arn
                ],
            ))
        codebuild_project.add_to_role_policy(
            iam.PolicyStatement(
                actions=["lambda:InvokeFunction"],
                resources=[
                    provisioner_apigw_lambda.lambda_function.function_arn
                ],
            ))
        pipeline_child_nodes = codecommit_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id":
                        "W35",
                        "reason":
                        "This is a managed bucket generated by CDK for codepipeline.",
                    },
                    {
                        "id":
                        "W51",
                        "reason":
                        "This is a managed bucket generated by CDK for codepipeline.",
                    },
                ]
            }
        }

        ###custom resource for operational metrics###
        metricsMapping = core.CfnMapping(
            self,
            'AnonymousData',
            mapping={'SendAnonymousData': {
                'Data': 'Yes'
            }})
        metrics_condition = core.CfnCondition(
            self,
            'AnonymousDatatoAWS',
            expression=core.Fn.condition_equals(
                metricsMapping.find_in_map('SendAnonymousData', 'Data'),
                'Yes'))

        helper_function = lambda_.Function(
            self,
            "SolutionHelper",
            code=lambda_.Code.from_asset("lambdas/solution_helper"),
            handler="lambda_function.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(60),
        )

        createIdFunction = core.CustomResource(
            self,
            'CreateUniqueID',
            service_token=helper_function.function_arn,
            properties={'Resource': 'UUID'},
            resource_type='Custom::CreateUUID')

        sendDataFunction = core.CustomResource(
            self,
            'SendAnonymousData',
            service_token=helper_function.function_arn,
            properties={
                'Resource': 'AnonymousMetric',
                'UUID': createIdFunction.get_att_string('UUID'),
                'gitSelected': git_address.value_as_string,
                'Region': core.Aws.REGION,
                'SolutionId': 'SO0136',
                'Version': '%%VERSION%%',
            },
            resource_type='Custom::AnonymousData')

        core.Aspects.of(helper_function).add(
            ConditionalResources(metrics_condition))
        core.Aspects.of(createIdFunction).add(
            ConditionalResources(metrics_condition))
        core.Aspects.of(sendDataFunction).add(
            ConditionalResources(metrics_condition))
        helper_function.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W58",
                    "reason":
                    "The lambda functions role already has permissions to write cloudwatch logs",
                }]
            }
        }

        # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source
        core.Aspects.of(repo).add(ConditionalResources(git_address_provided))
        core.Aspects.of(codecommit_pipeline).add(
            ConditionalResources(git_address_provided))
        core.Aspects.of(codebuild_project).add(
            ConditionalResources(git_address_provided))
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get stack parameters: email and repo address
        notification_email = core.CfnParameter(
            self,
            "Email Address",
            type="String",
            description="Specify an email to receive notifications about pipeline outcomes.",
            allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$",
            min_length=5,
            max_length=320,
            constraint_description="Please enter an email address with correct format ([email protected])",
        )
        git_address = core.CfnParameter(
            self,
            "CodeCommit Repo Address",
            type="String",
            description="AWS CodeCommit repository clone URL to connect to the framework.",
            allowed_pattern=(
                "^(((https:\/\/|ssh:\/\/)(git\-codecommit)\.[a-zA-Z0-9_.+-]+(amazonaws\.com\/)[a-zA-Z0-9-.]"
                "+(\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+$)|^$)"
            ),
            min_length=0,
            max_length=320,
            constraint_description=(
                "CodeCommit address must follow the pattern: ssh or "
                "https://git-codecommit.REGION.amazonaws.com/version/repos/REPONAME"
            ),
        )

        # Get the optional S3 assets bucket to use
        existing_bucket = core.CfnParameter(
            self,
            "ExistingS3Bucket",
            type="String",
            description="Name of existing S3 bucket to be used for ML assests. S3 Bucket must be in the same region as the deployed stack, and has versioning enabled. If not provided, a new S3 bucket will be created.",
            allowed_pattern="((?=^.{3,63}$)(?!^(\d+\.)+\d+$)(^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])$)|^$)",
            min_length=0,
            max_length=63,
        )

        # Conditions
        git_address_provided = core.CfnCondition(
            self,
            "GitAddressProvided",
            expression=core.Fn.condition_not(core.Fn.condition_equals(git_address, "")),
        )

        # client provided an existing S3 bucket name, to be used for assets
        existing_bucket_provided = core.CfnCondition(
            self,
            "S3BucketProvided",
            expression=core.Fn.condition_not(core.Fn.condition_equals(existing_bucket.value_as_string.strip(), "")),
        )

        # S3 bucket needs to be created for assets
        create_new_bucket = core.CfnCondition(
            self,
            "CreateS3Bucket",
            expression=core.Fn.condition_equals(existing_bucket.value_as_string.strip(), ""),
        )
        # Constants
        pipeline_stack_name = "MLOps-pipeline"

        # CDK Resources setup
        access_logs_bucket = s3.Bucket(
            self,
            "accessLogs",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        # Apply secure transfer bucket policy
        apply_secure_bucket_policy(access_logs_bucket)

        # This is a logging bucket.
        access_logs_bucket.node.default_child.cfn_options.metadata = suppress_s3_access_policy()

        # Import user provide S3 bucket, if any. s3.Bucket.from_bucket_arn is used instead of s3.Bucket.from_bucket_name to allow cross account bucket.
        client_existing_bucket = s3.Bucket.from_bucket_arn(
            self,
            "ClientExistingBucket",
            f"arn:aws:s3:::{existing_bucket.value_as_string.strip()}",
        )

        # Create the resource if existing_bucket_provided condition is True
        core.Aspects.of(client_existing_bucket).add(ConditionalResources(existing_bucket_provided))

        # Creating assets bucket so that users can upload ML Models to it.
        assets_bucket = s3.Bucket(
            self,
            "pipeline-assets-" + str(uuid.uuid4()),
            versioned=True,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix="assets_bucket_access_logs",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        # Apply secure transport bucket policy
        apply_secure_bucket_policy(assets_bucket)

        # Create the resource if create_new_bucket condition is True
        core.Aspects.of(assets_bucket).add(ConditionalResources(create_new_bucket))

        # Get assets S3 bucket's name/arn, based on the condition
        assets_s3_bucket_name = core.Fn.condition_if(
            existing_bucket_provided.logical_id,
            client_existing_bucket.bucket_name,
            assets_bucket.bucket_name,
        ).to_string()

        blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4())
        blueprint_repository_bucket = s3.Bucket(
            self,
            blueprints_bucket_name,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix=blueprints_bucket_name,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )
        # Apply secure transport bucket policy
        apply_secure_bucket_policy(blueprint_repository_bucket)

        # Custom resource to copy source bucket content to blueprints bucket
        custom_resource_lambda_fn = lambda_.Function(
            self,
            "CustomResourceLambda",
            code=lambda_.Code.from_asset("lambdas/custom_resource"),
            handler="index.on_event",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                "source_bucket": "https://%%BUCKET_NAME%%-"
                + core.Aws.REGION
                + ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%",
                "destination_bucket": blueprint_repository_bucket.bucket_name,
                "LOG_LEVEL": "INFO",
            },
            timeout=core.Duration.seconds(60),
        )
        custom_resource_lambda_fn.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W58",
                        "reason": "The lambda functions role already has permissions to write cloudwatch logs",
                    }
                ]
            }
        }
        blueprint_repository_bucket.grant_write(custom_resource_lambda_fn)
        custom_resource = core.CustomResource(
            self,
            "CustomResourceCopyAssets",
            service_token=custom_resource_lambda_fn.function_arn,
        )
        custom_resource.node.add_dependency(blueprint_repository_bucket)
        # IAM policies setup ###
        cloudformation_role = iam.Role(
            self,
            "mlopscloudformationrole",
            assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"),
        )
        # Cloudformation policy setup
        orchestrator_policy = iam.Policy(
            self,
            "lambdaOrchestratorPolicy",
            statements=[
                iam.PolicyStatement(
                    actions=[
                        "cloudformation:CreateStack",
                        "cloudformation:DeleteStack",
                        "cloudformation:UpdateStack",
                        "cloudformation:ListStackResources",
                    ],
                    resources=[
                        (
                            f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*"
                        ),
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "iam:CreateRole",
                        "iam:DeleteRole",
                        "iam:DeleteRolePolicy",
                        "iam:GetRole",
                        "iam:GetRolePolicy",
                        "iam:PassRole",
                        "iam:PutRolePolicy",
                        "iam:AttachRolePolicy",
                        "iam:DetachRolePolicy",
                    ],
                    resources=[f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*"],
                ),
                iam.PolicyStatement(
                    actions=[
                        "ecr:CreateRepository",
                        "ecr:DeleteRepository",
                        "ecr:DescribeRepositories",
                    ],
                    resources=[
                        (
                            f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:repository/awsmlopsmodels*"
                        )
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codebuild:CreateProject",
                        "codebuild:DeleteProject",
                        "codebuild:BatchGetProjects",
                    ],
                    resources=[
                        (
                            f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:project/ContainerFactory*"
                        ),
                        (
                            f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*"
                        ),
                        (
                            f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:report-group/*"
                        ),
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "lambda:CreateFunction",
                        "lambda:DeleteFunction",
                        "lambda:InvokeFunction",
                        "lambda:PublishLayerVersion",
                        "lambda:DeleteLayerVersion",
                        "lambda:GetLayerVersion",
                        "lambda:GetFunctionConfiguration",
                        "lambda:GetFunction",
                        "lambda:AddPermission",
                        "lambda:RemovePermission",
                        "lambda:UpdateFunctionConfiguration",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*",
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=["s3:GetObject"],
                    resources=[
                        blueprint_repository_bucket.bucket_arn,
                        blueprint_repository_bucket.arn_for_objects("*"),
                        f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codepipeline:CreatePipeline",
                        "codepipeline:DeletePipeline",
                        "codepipeline:GetPipeline",
                        "codepipeline:GetPipelineState",
                    ],
                    resources=[
                        (
                            f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:"
                            f"{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*"
                        )
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "apigateway:POST",
                        "apigateway:PATCH",
                        "apigateway:DELETE",
                        "apigateway:GET",
                        "apigateway:PUT",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "logs:CreateLogGroup",
                        "logs:DescribeLogGroups",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "s3:CreateBucket",
                        "s3:PutEncryptionConfiguration",
                        "s3:PutBucketVersioning",
                        "s3:PutBucketPublicAccessBlock",
                        "s3:PutBucketLogging",
                    ],
                    resources=["arn:" + core.Aws.PARTITION + ":s3:::*"],
                ),
                iam.PolicyStatement(
                    actions=[
                        "sns:CreateTopic",
                        "sns:DeleteTopic",
                        "sns:Subscribe",
                        "sns:Unsubscribe",
                        "sns:GetTopicAttributes",
                        "sns:SetTopicAttributes",
                    ],
                    resources=[
                        (
                            f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:"
                            f"{pipeline_stack_name}*-PipelineNotification*"
                        ),
                        (
                            f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:"
                            f"{pipeline_stack_name}*-ModelMonitorPipelineNotification*"
                        ),
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "events:PutRule",
                        "events:DescribeRule",
                        "events:PutTargets",
                        "events:RemoveTargets",
                        "events:DeleteRule",
                        "events:PutEvents",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*",
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                    ],
                ),
            ],
        )
        orchestrator_policy.attach_to_role(cloudformation_role)

        # Lambda function IAM setup
        lambda_passrole_policy = iam.PolicyStatement(actions=["iam:passrole"], resources=[cloudformation_role.role_arn])
        # API Gateway and lambda setup to enable provisioning pipelines through API calls
        provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda(
            self,
            "PipelineOrchestration",
            lambda_function_props={
                "runtime": lambda_.Runtime.PYTHON_3_8,
                "handler": "index.handler",
                "code": lambda_.Code.from_asset("lambdas/pipeline_orchestration"),
            },
            api_gateway_props={
                "defaultMethodOptions": {
                    "authorizationType": apigw.AuthorizationType.IAM,
                },
                "restApiName": f"{core.Aws.STACK_NAME}-orchestrator",
                "proxy": False,
                "dataTraceEnabled": True,
            },
        )

        provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource("provisionpipeline")
        provision_resource.add_method("POST")
        status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource("pipelinestatus")
        status_resource.add_method("POST")
        blueprint_repository_bucket.grant_read(provisioner_apigw_lambda.lambda_function)
        provisioner_apigw_lambda.lambda_function.add_to_role_policy(lambda_passrole_policy)
        orchestrator_policy.attach_to_role(provisioner_apigw_lambda.lambda_function.role)
        provisioner_apigw_lambda.lambda_function.add_to_role_policy(
            iam.PolicyStatement(actions=["xray:PutTraceSegments"], resources=["*"])
        )
        lambda_node = provisioner_apigw_lambda.lambda_function.node.default_child
        lambda_node.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W12",
                        "reason": "The xray permissions PutTraceSegments is not able to be bound to resources.",
                    }
                ]
            }
        }
        # Environment variables setup
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET_URL",
            value=str(blueprint_repository_bucket.bucket_regional_domain_name),
        )
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET", value=str(blueprint_repository_bucket.bucket_name)
        )
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name)
        )
        provisioner_apigw_lambda.lambda_function.add_environment(key="ASSETS_BUCKET", value=str(assets_s3_bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn)
        )
        provisioner_apigw_lambda.lambda_function.add_environment(key="PIPELINE_STACK_NAME", value=pipeline_stack_name)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="NOTIFICATION_EMAIL", value=notification_email.value_as_string
        )
        provisioner_apigw_lambda.lambda_function.add_environment(key="LOG_LEVEL", value="DEBUG")
        cfn_policy_for_lambda = orchestrator_policy.node.default_child
        cfn_policy_for_lambda.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W76",
                        "reason": "A complex IAM policy is required for this resource.",
                    }
                ]
            }
        }

        # Codepipeline with Git source definitions ###
        source_output = codepipeline.Artifact()
        # processing git_address to retrieve repo name
        repo_name_split = core.Fn.split("/", git_address.value_as_string)
        repo_name = core.Fn.select(5, repo_name_split)
        # getting codecommit repo cdk object using 'from_repository_name'
        repo = codecommit.Repository.from_repository_name(self, "AWSMLOpsFrameworkRepository", repo_name)
        codebuild_project = codebuild.PipelineProject(
            self,
            "Take config file",
            build_spec=codebuild.BuildSpec.from_object(
                {
                    "version": "0.2",
                    "phases": {
                        "build": {
                            "commands": [
                                "ls -a",
                                "aws lambda invoke --function-name "
                                + provisioner_apigw_lambda.lambda_function.function_name
                                + " --payload fileb://mlops-config.json response.json"
                                + " --invocation-type RequestResponse",
                            ]
                        }
                    },
                }
            ),
        )
        # Defining a Codepipeline project with CodeCommit as source
        codecommit_pipeline = codepipeline.Pipeline(
            self,
            "MLOpsCodeCommitPipeline",
            stages=[
                codepipeline.StageProps(
                    stage_name="Source",
                    actions=[
                        codepipeline_actions.CodeCommitSourceAction(
                            action_name="CodeCommit",
                            repository=repo,
                            branch="main",
                            output=source_output,
                        )
                    ],
                ),
                codepipeline.StageProps(
                    stage_name="TakeConfig",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            action_name="provision_pipeline",
                            input=source_output,
                            outputs=[],
                            project=codebuild_project,
                        )
                    ],
                ),
            ],
            cross_account_keys=False,
        )
        codecommit_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=["lambda:InvokeFunction"],
                resources=[provisioner_apigw_lambda.lambda_function.function_arn],
            )
        )
        codebuild_project.add_to_role_policy(
            iam.PolicyStatement(
                actions=["lambda:InvokeFunction"],
                resources=[provisioner_apigw_lambda.lambda_function.function_arn],
            )
        )
        pipeline_child_nodes = codecommit_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W35",
                        "reason": "This is a managed bucket generated by CDK for codepipeline.",
                    },
                    {
                        "id": "W51",
                        "reason": "This is a managed bucket generated by CDK for codepipeline.",
                    },
                ]
            }
        }

        # custom resource for operational metrics###
        metricsMapping = core.CfnMapping(self, "AnonymousData", mapping={"SendAnonymousData": {"Data": "Yes"}})
        metrics_condition = core.CfnCondition(
            self,
            "AnonymousDatatoAWS",
            expression=core.Fn.condition_equals(metricsMapping.find_in_map("SendAnonymousData", "Data"), "Yes"),
        )

        helper_function = lambda_.Function(
            self,
            "SolutionHelper",
            code=lambda_.Code.from_asset("lambdas/solution_helper"),
            handler="lambda_function.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(60),
        )

        createIdFunction = core.CustomResource(
            self,
            "CreateUniqueID",
            service_token=helper_function.function_arn,
            properties={"Resource": "UUID"},
            resource_type="Custom::CreateUUID",
        )

        sendDataFunction = core.CustomResource(
            self,
            "SendAnonymousData",
            service_token=helper_function.function_arn,
            properties={
                "Resource": "AnonymousMetric",
                "UUID": createIdFunction.get_att_string("UUID"),
                "gitSelected": git_address.value_as_string,
                "Region": core.Aws.REGION,
                "SolutionId": "SO0136",
                "Version": "%%VERSION%%",
            },
            resource_type="Custom::AnonymousData",
        )

        core.Aspects.of(helper_function).add(ConditionalResources(metrics_condition))
        core.Aspects.of(createIdFunction).add(ConditionalResources(metrics_condition))
        core.Aspects.of(sendDataFunction).add(ConditionalResources(metrics_condition))
        helper_function.node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id": "W58",
                        "reason": "The lambda functions role already has permissions to write cloudwatch logs",
                    }
                ]
            }
        }

        # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source
        core.Aspects.of(repo).add(ConditionalResources(git_address_provided))
        core.Aspects.of(codecommit_pipeline).add(ConditionalResources(git_address_provided))
        core.Aspects.of(codebuild_project).add(ConditionalResources(git_address_provided))

        # Create Template Interface
        self.template_options.metadata = {
            "AWS::CloudFormation::Interface": {
                "ParameterGroups": [
                    {
                        "Label": {"default": "MLOps Framework Settings"},
                        "Parameters": [
                            notification_email.logical_id,
                            git_address.logical_id,
                            existing_bucket.logical_id,
                        ],
                    }
                ],
                "ParameterLabels": {
                    f"{notification_email.logical_id}": {"default": "Notification Email (Required)"},
                    f"{git_address.logical_id}": {"default": "CodeCommit Repo URL Address (Optional)"},
                    f"{existing_bucket.logical_id}": {"default": "Name of an Existing S3 Bucket (Optional)"},
                },
            }
        }
        # Outputs #
        core.CfnOutput(
            self,
            id="BlueprintsBucket",
            value=f"https://s3.console.aws.amazon.com/s3/buckets/{blueprint_repository_bucket.bucket_name}",
            description="S3 Bucket to upload MLOps Framework Blueprints",
        )
        core.CfnOutput(
            self,
            id="AssetsBucket",
            value=f"https://s3.console.aws.amazon.com/s3/buckets/{assets_s3_bucket_name}",
            description="S3 Bucket to upload model artifact",
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # API Gateway needs to have resource policy granting FHIR Works on AWS lambda
        # execute permissions. Lambda function ARN will be passed during deployment as CDK context variable
        # FHIR Works lambda will need to have policy attached to its execution role
        # allowing it to invoke API
        # From --context resource-router-lambda-role="arn:aws:iam::123456789012:role/rolename"
        imported_resource_router_lambda_role = self.node.try_get_context(
            "resource-router-lambda-role"
        )
        # Amazon ECS on AWS Fargate container implementing connection manager
        # will be launched into a VPC that needs to have private and public subnets
        # and NAT gateway or instance
        # From --context vpc-id="vpc-123456"
        vpc_id = self.node.try_get_context("vpc-id")

        # The following parameters specify name of the HL7 server
        # that will be receiving transformed HL7v2 messages and TCP port
        # that it will be listening on
        # From --context hl7-server-name="hl7.example.com"
        # From --context hl7-port="2575"
        hl7_server_name = self.node.try_get_context("hl7-server-name")
        hl7_port = self.node.try_get_context("hl7-port")

        # In this proof of concept source of data for read interactions
        # is S3 bucket where mock HL7 server stores processed HL7 messages
        # From --context test-server-output-bucket-name="DOC-EXAMPLE-BUCKET"
        test_server_output_bucket_name = self.node.try_get_context(
            "test-server-output-bucket-name"
        )

        # SQS queue
        # Custom transform lambda communicates with Connectivity Manager using this SQS queue
        queue = sqs.Queue(
            self, f"{COMPONENT_PREFIX}Queue", encryption=sqs.QueueEncryption.KMS_MANAGED
        )

        # S3 Bucket to retrieve HL7v2 messages in proof of concept deployment
        test_server_output_bucket = s3.Bucket.from_bucket_name(
            self, f"{COMPONENT_PREFIX}OutputBucket", test_server_output_bucket_name
        )

        # Transform Lambda
        # Reference implementation of Custom Transform component of Transform Execution Environment

        transform_lambda = lambda_.Function(
            self,
            f"{COMPONENT_PREFIX}TransformLambda",
            handler="transform.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset(
                path.join(dirname, "../../lambda"),
                bundling={
                    "image": lambda_.Runtime.PYTHON_3_8.bundling_docker_image,
                    "command": [
                        "bash",
                        "-c",
                        " && ".join(
                            [
                                "pip install --no-cache-dir -r requirements.txt -t /asset-output",
                                "(tar -c --exclude-from=exclude.lst -f - .)|(cd /asset-output; tar -xf -)",
                            ]
                        ),
                    ],
                },
            ),
            timeout=core.Duration.seconds(60),
            environment=dict(
                SQS_QUEUE=queue.queue_url,
                # The following parameter is optional
                S3_BUCKET_NAME=test_server_output_bucket_name,
            ),
        )
        queue.grant_send_messages(transform_lambda)

        # API Gateway with Lambda construct (using https://aws.amazon.com/solutions/constructs/patterns)
        # Reference implementation of Custom Transform component of Transform Execution Environment

        api_lambda = apigw_lambda.ApiGatewayToLambda(
            self,
            "ApiGw",
            existing_lambda_obj=transform_lambda,
            api_gateway_props=apigw.LambdaRestApiProps(
                handler=transform_lambda,
                proxy=False,
                rest_api_name=f"{COMPONENT_PREFIX_DASHES}-api",
                endpoint_export_name=f"{COMPONENT_PREFIX}ApiEndPoint",
                description=f"{COMPONENT_PREFIX} APIGW with Transform Lambda (FHIR to HL7v2)",
                default_method_options=apigw.MethodOptions(
                    authorization_type=apigw.AuthorizationType.IAM,
                ),
                policy=iam.PolicyDocument(
                    statements=[
                        iam.PolicyStatement(
                            actions=["execute-api:Invoke"],
                            effect=iam.Effect.ALLOW,
                            principals=[
                                iam.ArnPrincipal(imported_resource_router_lambda_role),
                            ],
                            resources=["execute-api:/*/*/*"],
                        )
                    ]
                ),
            ),
        )
        rest_api = api_lambda.api_gateway
        persistence = rest_api.root.add_resource("persistence")
        resource_type = persistence.add_resource("{resource_type}")
        resource_type.add_method("POST")
        resource_id = resource_type.add_resource("{id}")
        resource_id.add_method("GET")
        resource_id.add_method("PUT")
        resource_id.add_method("DELETE")

        # ECS Fargate Container (HL7v2 sender)
        # This container implements Connectivity Manager component
        # of Transform Execution Environment

        vpc = ec2.Vpc.from_lookup(self, "DefaultVpc", vpc_id=vpc_id)

        cluster = ecs.Cluster(self, f"{COMPONENT_PREFIX}Cluster", vpc=vpc)

        ecs_patterns.QueueProcessingFargateService(
            self,
            f"{COMPONENT_PREFIX}Service",
            cluster=cluster,
            image=ecs.ContainerImage.from_asset(path.join(dirname, "../../container")),
            queue=queue,
            desired_task_count=1,
            log_driver=ecs.LogDriver.aws_logs(
                stream_prefix=f"{COMPONENT_PREFIX}HL7Client",
                log_retention=logs.RetentionDays.ONE_DAY,
            ),
            environment=dict(
                SERVER_NAME=hl7_server_name,
                PORT_NUMBER=hl7_port,
            ),
        )

        # The following permission grants are needed to support
        # read interactions with integration transform
        test_server_output_bucket.grant_read(transform_lambda)

        transform_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["s3:ListBucket"],
                effect=iam.Effect.ALLOW,
                resources=[test_server_output_bucket.bucket_arn],
            )
        )
        transform_lambda.add_to_role_policy(
            iam.PolicyStatement(
                actions=["s3:GetObject"],
                effect=iam.Effect.ALLOW,
                resources=[test_server_output_bucket.arn_for_objects("*")],
            )
        )

        # CloudFormation Stack outputs
        # The following outputs needed to configure FHIR Works on AWS API interface
        core.CfnOutput(
            self,
            "TransformApiRootUrl",
            value=rest_api.url,
            export_name="TransformApiRootUrl",
        )
        core.CfnOutput(
            self,
            "TransformApiRegion",
            value=self.region,
            export_name="TransformApiRegion",
        )
        core.CfnOutput(
            self,
            "TransformApiAccountId",
            value=self.account,
            export_name="TransformApiAccountId",
        )
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 *,
                 multi_account=False,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get stack parameters:
        notification_email = create_notification_email_parameter(self)
        git_address = create_git_address_parameter(self)
        # Get the optional S3 assets bucket to use
        existing_bucket = create_existing_bucket_parameter(self)
        # Get the optional S3 assets bucket to use
        existing_ecr_repo = create_existing_ecr_repo_parameter(self)
        # create only if multi_account template
        if multi_account:
            # create development parameters
            account_type = "development"
            dev_account_id = create_account_id_parameter(
                self, "DEV_ACCOUNT_ID", account_type)
            dev_org_id = create_org_id_parameter(self, "DEV_ORG_ID",
                                                 account_type)
            # create staging parameters
            account_type = "staging"
            staging_account_id = create_account_id_parameter(
                self, "STAGING_ACCOUNT_ID", account_type)
            staging_org_id = create_org_id_parameter(self, "STAGING_ORG_ID",
                                                     account_type)
            # create production parameters
            account_type = "production"
            prod_account_id = create_account_id_parameter(
                self, "PROD_ACCOUNT_ID", account_type)
            prod_org_id = create_org_id_parameter(self, "PROD_ORG_ID",
                                                  account_type)

        # Conditions
        git_address_provided = create_git_address_provided_condition(
            self, git_address)

        # client provided an existing S3 bucket name, to be used for assets
        existing_bucket_provided = create_existing_bucket_provided_condition(
            self, existing_bucket)

        # client provided an existing Amazon ECR name
        existing_ecr_provided = create_existing_ecr_provided_condition(
            self, existing_ecr_repo)

        # S3 bucket needs to be created for assets
        create_new_bucket = create_new_bucket_condition(self, existing_bucket)

        # Amazon ECR repo needs too be created for custom Algorithms
        create_new_ecr_repo = create_new_ecr_repo_condition(
            self, existing_ecr_repo)

        # Constants
        pipeline_stack_name = "mlops-pipeline"

        # CDK Resources setup
        access_logs_bucket = s3.Bucket(
            self,
            "accessLogs",
            encryption=s3.BucketEncryption.S3_MANAGED,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        # Apply secure transfer bucket policy
        apply_secure_bucket_policy(access_logs_bucket)

        # This is a logging bucket.
        access_logs_bucket.node.default_child.cfn_options.metadata = suppress_s3_access_policy(
        )

        # Import user provide S3 bucket, if any. s3.Bucket.from_bucket_arn is used instead of
        # s3.Bucket.from_bucket_name to allow cross account bucket.
        client_existing_bucket = s3.Bucket.from_bucket_arn(
            self,
            "ClientExistingBucket",
            f"arn:aws:s3:::{existing_bucket.value_as_string.strip()}",
        )

        # Create the resource if existing_bucket_provided condition is True
        core.Aspects.of(client_existing_bucket).add(
            ConditionalResources(existing_bucket_provided))

        # Import user provided Amazon ECR repository

        client_erc_repo = ecr.Repository.from_repository_name(
            self, "ClientExistingECRReo", existing_ecr_repo.value_as_string)
        # Create the resource if existing_ecr_provided condition is True
        core.Aspects.of(client_erc_repo).add(
            ConditionalResources(existing_ecr_provided))

        # Creating assets bucket so that users can upload ML Models to it.
        assets_bucket = s3.Bucket(
            self,
            "pipeline-assets-" + str(uuid.uuid4()),
            versioned=True,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix="assets_bucket_access_logs",
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )

        # Apply secure transport bucket policy
        apply_secure_bucket_policy(assets_bucket)
        s3_actions = ["s3:GetObject", "s3:ListBucket"]
        # if multi account
        if multi_account:
            # add permissions for other accounts to access the assets bucket

            assets_bucket.add_to_resource_policy(
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=s3_actions,
                    principals=[
                        iam.AccountPrincipal(dev_account_id.value_as_string),
                        iam.AccountPrincipal(
                            staging_account_id.value_as_string),
                        iam.AccountPrincipal(prod_account_id.value_as_string),
                    ],
                    resources=[
                        assets_bucket.bucket_arn,
                        f"{assets_bucket.bucket_arn}/*"
                    ],
                ))

        # Create the resource if create_new_bucket condition is True
        core.Aspects.of(assets_bucket).add(
            ConditionalResources(create_new_bucket))

        # Get assets S3 bucket's name/arn, based on the condition
        assets_s3_bucket_name = core.Fn.condition_if(
            existing_bucket_provided.logical_id,
            client_existing_bucket.bucket_name,
            assets_bucket.bucket_name,
        ).to_string()

        # Creating Amazon ECR repository
        ecr_repo = ecr.Repository(self, "ECRRepo", image_scan_on_push=True)

        # if multi account
        if multi_account:
            # add permissios to other account to pull images
            ecr_repo.add_to_resource_policy(
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=[
                        "ecr:DescribeImages",
                        "ecr:DescribeRepositories",
                        "ecr:GetDownloadUrlForLayer",
                        "ecr:BatchGetImage",
                        "ecr:BatchCheckLayerAvailability",
                    ],
                    principals=[
                        iam.AccountPrincipal(dev_account_id.value_as_string),
                        iam.AccountPrincipal(
                            staging_account_id.value_as_string),
                        iam.AccountPrincipal(prod_account_id.value_as_string),
                    ],
                ))
        # Create the resource if create_new_ecr condition is True
        core.Aspects.of(ecr_repo).add(
            ConditionalResources(create_new_ecr_repo))

        # Get ECR repo's name based on the condition
        ecr_repo_name = core.Fn.condition_if(
            existing_ecr_provided.logical_id,
            client_erc_repo.repository_name,
            ecr_repo.repository_name,
        ).to_string()

        # Get ECR repo's arn based on the condition
        ecr_repo_arn = core.Fn.condition_if(
            existing_ecr_provided.logical_id,
            client_erc_repo.repository_arn,
            ecr_repo.repository_arn,
        ).to_string()

        blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4())
        blueprint_repository_bucket = s3.Bucket(
            self,
            blueprints_bucket_name,
            encryption=s3.BucketEncryption.S3_MANAGED,
            server_access_logs_bucket=access_logs_bucket,
            server_access_logs_prefix=blueprints_bucket_name,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
        )
        # Apply secure transport bucket policy
        apply_secure_bucket_policy(blueprint_repository_bucket)

        # if multi account
        if multi_account:
            # add permissions for other accounts to access the blueprint bucket
            blueprint_repository_bucket.add_to_resource_policy(
                iam.PolicyStatement(
                    effect=iam.Effect.ALLOW,
                    actions=s3_actions,
                    principals=[
                        iam.AccountPrincipal(dev_account_id.value_as_string),
                        iam.AccountPrincipal(
                            staging_account_id.value_as_string),
                        iam.AccountPrincipal(prod_account_id.value_as_string),
                    ],
                    resources=[
                        blueprint_repository_bucket.bucket_arn,
                        f"{blueprint_repository_bucket.bucket_arn}/*"
                    ],
                ))

        # Custom resource to copy source bucket content to blueprints bucket
        custom_resource_lambda_fn = lambda_.Function(
            self,
            "CustomResourceLambda",
            code=lambda_.Code.from_asset("lambdas/custom_resource"),
            handler="index.on_event",
            runtime=lambda_.Runtime.PYTHON_3_8,
            environment={
                "source_bucket": "https://%%BUCKET_NAME%%-" + core.Aws.REGION +
                ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%",
                "destination_bucket": blueprint_repository_bucket.bucket_name,
                "LOG_LEVEL": "INFO",
            },
            timeout=core.Duration.seconds(60),
        )

        custom_resource_lambda_fn.node.default_child.cfn_options.metadata = suppress_lambda_policies(
        )
        blueprint_repository_bucket.grant_write(custom_resource_lambda_fn)
        custom_resource = core.CustomResource(
            self,
            "CustomResourceCopyAssets",
            service_token=custom_resource_lambda_fn.function_arn,
        )
        custom_resource.node.add_dependency(blueprint_repository_bucket)
        # IAM policies setup ###
        cloudformation_role = iam.Role(
            self,
            "mlopscloudformationrole",
            assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"),
        )
        lambda_invoke_action = "lambda:InvokeFunction"
        # Cloudformation policy setup
        orchestrator_policy = iam.Policy(
            self,
            "lambdaOrchestratorPolicy",
            statements=[
                iam.PolicyStatement(
                    actions=[
                        "cloudformation:CreateStack",
                        "cloudformation:DeleteStack",
                        "cloudformation:UpdateStack",
                        "cloudformation:ListStackResources",
                    ],
                    resources=[
                        (f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:"
                         f"{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*"
                         ),
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "iam:CreateRole",
                        "iam:DeleteRole",
                        "iam:DeleteRolePolicy",
                        "iam:GetRole",
                        "iam:GetRolePolicy",
                        "iam:PassRole",
                        "iam:PutRolePolicy",
                        "iam:AttachRolePolicy",
                        "iam:DetachRolePolicy",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*"
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "ecr:CreateRepository",
                        "ecr:DescribeRepositories",
                    ],
                    resources=[
                        (f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:"
                         f"{core.Aws.ACCOUNT_ID}:repository/{ecr_repo_name}")
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codebuild:CreateProject",
                        "codebuild:DeleteProject",
                        "codebuild:BatchGetProjects",
                    ],
                    resources=[
                        (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                         f"{core.Aws.ACCOUNT_ID}:project/ContainerFactory*"),
                        (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                         f"{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*"),
                        (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:"
                         f"{core.Aws.ACCOUNT_ID}:report-group/*"),
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "lambda:CreateFunction",
                        "lambda:DeleteFunction",
                        lambda_invoke_action,
                        "lambda:PublishLayerVersion",
                        "lambda:DeleteLayerVersion",
                        "lambda:GetLayerVersion",
                        "lambda:GetFunctionConfiguration",
                        "lambda:GetFunction",
                        "lambda:AddPermission",
                        "lambda:RemovePermission",
                        "lambda:UpdateFunctionConfiguration",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*",
                        f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=s3_actions,
                    resources=[
                        blueprint_repository_bucket.bucket_arn,
                        blueprint_repository_bucket.arn_for_objects("*"),
                        f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "codepipeline:CreatePipeline",
                        "codepipeline:UpdatePipeline",
                        "codepipeline:DeletePipeline",
                        "codepipeline:GetPipeline",
                        "codepipeline:GetPipelineState",
                    ],
                    resources=
                    [(f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:"
                      f"{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*")],
                ),
                iam.PolicyStatement(
                    actions=[
                        "apigateway:POST",
                        "apigateway:PATCH",
                        "apigateway:DELETE",
                        "apigateway:GET",
                        "apigateway:PUT",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans",
                        f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "logs:CreateLogGroup",
                        "logs:DescribeLogGroups",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*",
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "s3:CreateBucket",
                        "s3:PutEncryptionConfiguration",
                        "s3:PutBucketVersioning",
                        "s3:PutBucketPublicAccessBlock",
                        "s3:PutBucketLogging",
                    ],
                    resources=[f"arn:{core.Aws.PARTITION}:s3:::*"],
                ),
                iam.PolicyStatement(
                    actions=[
                        "s3:PutObject",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*"
                    ],
                ),
                iam.PolicyStatement(
                    actions=[
                        "sns:CreateTopic",
                        "sns:DeleteTopic",
                        "sns:Subscribe",
                        "sns:Unsubscribe",
                        "sns:GetTopicAttributes",
                        "sns:SetTopicAttributes",
                    ],
                    resources=
                    [(f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:"
                      f"{pipeline_stack_name}*-*PipelineNotification*")],
                ),
                iam.PolicyStatement(
                    actions=[
                        "events:PutRule",
                        "events:DescribeRule",
                        "events:PutTargets",
                        "events:RemoveTargets",
                        "events:DeleteRule",
                        "events:PutEvents",
                    ],
                    resources=[
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*",
                        f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*",
                    ],
                ),
            ],
        )
        orchestrator_policy.attach_to_role(cloudformation_role)

        # Lambda function IAM setup
        lambda_passrole_policy = iam.PolicyStatement(
            actions=["iam:passrole"], resources=[cloudformation_role.role_arn])
        # create sagemaker layer
        sm_layer = sagemaker_layer(self, blueprint_repository_bucket)
        # make sure the sagemaker code is uploaded first to the blueprints bucket
        sm_layer.node.add_dependency(custom_resource)
        # API Gateway and lambda setup to enable provisioning pipelines through API calls
        provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda(
            self,
            "PipelineOrchestration",
            lambda_function_props={
                "runtime": lambda_.Runtime.PYTHON_3_8,
                "handler": "index.handler",
                "code":
                lambda_.Code.from_asset("lambdas/pipeline_orchestration"),
                "layers": [sm_layer],
                "timeout": core.Duration.minutes(10),
            },
            api_gateway_props={
                "defaultMethodOptions": {
                    "authorizationType": apigw.AuthorizationType.IAM,
                },
                "restApiName": f"{core.Aws.STACK_NAME}-orchestrator",
                "proxy": False,
                "dataTraceEnabled": True,
            },
        )

        # add lambda supressions
        provisioner_apigw_lambda.lambda_function.node.default_child.cfn_options.metadata = suppress_lambda_policies(
        )

        provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource(
            "provisionpipeline")
        provision_resource.add_method("POST")
        status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource(
            "pipelinestatus")
        status_resource.add_method("POST")
        blueprint_repository_bucket.grant_read(
            provisioner_apigw_lambda.lambda_function)
        provisioner_apigw_lambda.lambda_function.add_to_role_policy(
            lambda_passrole_policy)
        orchestrator_policy.attach_to_role(
            provisioner_apigw_lambda.lambda_function.role)

        # Environment variables setup
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET_URL",
            value=str(blueprint_repository_bucket.bucket_regional_domain_name),
        )
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="BLUEPRINT_BUCKET",
            value=str(blueprint_repository_bucket.bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ASSETS_BUCKET", value=str(assets_s3_bucket_name))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn))
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="PIPELINE_STACK_NAME", value=pipeline_stack_name)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="NOTIFICATION_EMAIL", value=notification_email.value_as_string)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="REGION", value=core.Aws.REGION)
        provisioner_apigw_lambda.lambda_function.add_environment(
            key="IS_MULTI_ACCOUNT", value=str(multi_account))

        # if multi account
        if multi_account:
            provisioner_apigw_lambda.lambda_function.add_environment(
                key="DEV_ACCOUNT_ID", value=dev_account_id.value_as_string)
            provisioner_apigw_lambda.lambda_function.add_environment(
                key="DEV_ORG_ID", value=dev_org_id.value_as_string)

            provisioner_apigw_lambda.lambda_function.add_environment(
                key="STAGING_ACCOUNT_ID",
                value=staging_account_id.value_as_string)
            provisioner_apigw_lambda.lambda_function.add_environment(
                key="STAGING_ORG_ID", value=staging_org_id.value_as_string)

            provisioner_apigw_lambda.lambda_function.add_environment(
                key="PROD_ACCOUNT_ID", value=prod_account_id.value_as_string)
            provisioner_apigw_lambda.lambda_function.add_environment(
                key="PROD_ORG_ID", value=prod_org_id.value_as_string)

        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ECR_REPO_NAME", value=ecr_repo_name)

        provisioner_apigw_lambda.lambda_function.add_environment(
            key="ECR_REPO_ARN", value=ecr_repo_arn)

        provisioner_apigw_lambda.lambda_function.add_environment(
            key="LOG_LEVEL", value="DEBUG")
        cfn_policy_for_lambda = orchestrator_policy.node.default_child
        cfn_policy_for_lambda.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [{
                    "id":
                    "W76",
                    "reason":
                    "A complex IAM policy is required for this resource.",
                }]
            }
        }

        # Codepipeline with Git source definitions ###
        source_output = codepipeline.Artifact()
        # processing git_address to retrieve repo name
        repo_name_split = core.Fn.split("/", git_address.value_as_string)
        repo_name = core.Fn.select(5, repo_name_split)
        # getting codecommit repo cdk object using 'from_repository_name'
        repo = codecommit.Repository.from_repository_name(
            self, "AWSMLOpsFrameworkRepository", repo_name)
        codebuild_project = codebuild.PipelineProject(
            self,
            "Take config file",
            build_spec=codebuild.BuildSpec.from_object({
                "version": "0.2",
                "phases": {
                    "build": {
                        "commands": [
                            "ls -a",
                            "aws lambda invoke --function-name " +
                            provisioner_apigw_lambda.lambda_function.
                            function_name +
                            " --payload fileb://mlops-config.json response.json"
                            + " --invocation-type RequestResponse",
                        ]
                    }
                },
            }),
        )
        # Defining a Codepipeline project with CodeCommit as source
        codecommit_pipeline = codepipeline.Pipeline(
            self,
            "MLOpsCodeCommitPipeline",
            stages=[
                codepipeline.StageProps(
                    stage_name="Source",
                    actions=[
                        codepipeline_actions.CodeCommitSourceAction(
                            action_name="CodeCommit",
                            repository=repo,
                            branch="main",
                            output=source_output,
                        )
                    ],
                ),
                codepipeline.StageProps(
                    stage_name="TakeConfig",
                    actions=[
                        codepipeline_actions.CodeBuildAction(
                            action_name="provision_pipeline",
                            input=source_output,
                            outputs=[],
                            project=codebuild_project,
                        )
                    ],
                ),
            ],
            cross_account_keys=False,
        )
        codecommit_pipeline.add_to_role_policy(
            iam.PolicyStatement(
                actions=[lambda_invoke_action],
                resources=[
                    provisioner_apigw_lambda.lambda_function.function_arn
                ],
            ))
        codebuild_project.add_to_role_policy(
            iam.PolicyStatement(
                actions=[lambda_invoke_action],
                resources=[
                    provisioner_apigw_lambda.lambda_function.function_arn
                ],
            ))
        pipeline_child_nodes = codecommit_pipeline.node.find_all()
        pipeline_child_nodes[1].node.default_child.cfn_options.metadata = {
            "cfn_nag": {
                "rules_to_suppress": [
                    {
                        "id":
                        "W35",
                        "reason":
                        "This is a managed bucket generated by CDK for codepipeline.",
                    },
                    {
                        "id":
                        "W51",
                        "reason":
                        "This is a managed bucket generated by CDK for codepipeline.",
                    },
                ]
            }
        }

        # custom resource for operational metrics###
        metrics_mapping = core.CfnMapping(
            self,
            "AnonymousData",
            mapping={"SendAnonymousData": {
                "Data": "Yes"
            }})
        metrics_condition = core.CfnCondition(
            self,
            "AnonymousDatatoAWS",
            expression=core.Fn.condition_equals(
                metrics_mapping.find_in_map("SendAnonymousData", "Data"),
                "Yes"),
        )

        helper_function = lambda_.Function(
            self,
            "SolutionHelper",
            code=lambda_.Code.from_asset("lambdas/solution_helper"),
            handler="lambda_function.handler",
            runtime=lambda_.Runtime.PYTHON_3_8,
            timeout=core.Duration.seconds(60),
        )

        helper_function.node.default_child.cfn_options.metadata = suppress_lambda_policies(
        )
        create_id_function = core.CustomResource(
            self,
            "CreateUniqueID",
            service_token=helper_function.function_arn,
            properties={"Resource": "UUID"},
            resource_type="Custom::CreateUUID",
        )

        send_data_function = core.CustomResource(
            self,
            "SendAnonymousData",
            service_token=helper_function.function_arn,
            properties={
                "Resource": "AnonymousMetric",
                "UUID": create_id_function.get_att_string("UUID"),
                "gitSelected": git_address.value_as_string,
                "Region": core.Aws.REGION,
                "SolutionId": "SO0136",
                "Version": "%%VERSION%%",
            },
            resource_type="Custom::AnonymousData",
        )

        core.Aspects.of(helper_function).add(
            ConditionalResources(metrics_condition))
        core.Aspects.of(create_id_function).add(
            ConditionalResources(metrics_condition))
        core.Aspects.of(send_data_function).add(
            ConditionalResources(metrics_condition))

        # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source
        core.Aspects.of(repo).add(ConditionalResources(git_address_provided))
        core.Aspects.of(codecommit_pipeline).add(
            ConditionalResources(git_address_provided))
        core.Aspects.of(codebuild_project).add(
            ConditionalResources(git_address_provided))

        # Create Template Interface
        paramaters_list = [
            notification_email.logical_id,
            git_address.logical_id,
            existing_bucket.logical_id,
            existing_ecr_repo.logical_id,
        ]

        # if multi account
        if multi_account:
            paramaters_list.extend([
                dev_account_id.logical_id,
                dev_org_id.logical_id,
                staging_account_id.logical_id,
                staging_org_id.logical_id,
                prod_account_id.logical_id,
                prod_org_id.logical_id,
            ])

        paramaters_labels = {
            f"{notification_email.logical_id}": {
                "default": "Notification Email (Required)"
            },
            f"{git_address.logical_id}": {
                "default": "CodeCommit Repo URL Address (Optional)"
            },
            f"{existing_bucket.logical_id}": {
                "default": "Name of an Existing S3 Bucket (Optional)"
            },
            f"{existing_ecr_repo.logical_id}": {
                "default":
                "Name of an Existing Amazon ECR repository (Optional)"
            },
        }

        if multi_account:
            paramaters_labels.update({
                f"{dev_account_id.logical_id}": {
                    "default": "Development Account ID (Required)"
                },
                f"{dev_org_id.logical_id}": {
                    "default":
                    "Development Account Organizational Unit ID (Required)"
                },
                f"{staging_account_id.logical_id}": {
                    "default": "Staging Account ID (Required)"
                },
                f"{staging_org_id.logical_id}": {
                    "default":
                    "Staging Account Organizational Unit ID (Required)"
                },
                f"{prod_account_id.logical_id}": {
                    "default": "Production Account ID (Required)"
                },
                f"{prod_org_id.logical_id}": {
                    "default":
                    "Production Account Organizational Unit ID (Required)"
                },
            })
        self.template_options.metadata = {
            "AWS::CloudFormation::Interface": {
                "ParameterGroups": [{
                    "Label": {
                        "default": "MLOps Framework Settings"
                    },
                    "Parameters": paramaters_list,
                }],
                "ParameterLabels":
                paramaters_labels,
            }
        }
        # Outputs #
        core.CfnOutput(
            self,
            id="BlueprintsBucket",
            value=
            f"https://s3.console.aws.amazon.com/s3/buckets/{blueprint_repository_bucket.bucket_name}",
            description="S3 Bucket to upload MLOps Framework Blueprints",
        )
        core.CfnOutput(
            self,
            id="AssetsBucket",
            value=
            f"https://s3.console.aws.amazon.com/s3/buckets/{assets_s3_bucket_name}",
            description="S3 Bucket to upload model artifact",
        )
        core.CfnOutput(
            self,
            id="ECRRepoName",
            value=ecr_repo_name,
            description="Amazon ECR repository's name",
        )
        core.CfnOutput(
            self,
            id="ECRRepoArn",
            value=ecr_repo_arn,
            description="Amazon ECR repository's arn",
        )
示例#6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Parameteres #
        assets_bucket_name = create_assets_bucket_name_parameter(self)
        blueprint_bucket_name = create_blueprint_bucket_name_parameter(self)
        custom_algorithms_ecr_repo_arn = create_custom_algorithms_ecr_repo_arn_parameter(
            self)
        kms_key_arn = create_kms_key_arn_parameter(self)
        algorithm_image_uri = create_algorithm_image_uri_parameter(self)
        model_name = create_model_name_parameter(self)
        model_artifact_location = create_model_artifact_location_parameter(
            self)
        data_capture_location = create_data_capture_location_parameter(self)
        inference_instance = create_inference_instance_parameter(self)

        # Conditions
        custom_algorithms_ecr_repo_arn_provided = create_custom_algorithms_ecr_repo_arn_provided_condition(
            self, custom_algorithms_ecr_repo_arn)
        kms_key_arn_provided = create_kms_key_arn_provided_condition(
            self, kms_key_arn)

        # Resources #
        # getting blueprint bucket object from its name - will be used later in the stack
        blueprint_bucket = s3.Bucket.from_bucket_name(
            self, "BlueprintBucket", blueprint_bucket_name.value_as_string)

        # provision api gateway and lambda for inference using solution constructs
        inference_api_gateway = aws_apigateway_lambda.ApiGatewayToLambda(
            self,
            "BYOMInference",
            lambda_function_props={
                "runtime":
                lambda_.Runtime.PYTHON_3_8,
                "handler":
                "main.handler",
                "code":
                lambda_.Code.from_bucket(
                    blueprint_bucket, "blueprints/byom/lambdas/inference.zip"),
            },
            api_gateway_props={
                "defaultMethodOptions": {
                    "authorizationType": apigw.AuthorizationType.IAM,
                },
                "restApiName": f"{core.Aws.STACK_NAME}-inference",
                "proxy": False,
            },
        )
        # add supressions
        inference_api_gateway.lambda_function.node.default_child.cfn_options.metadata = suppress_lambda_policies(
        )
        provision_resource = inference_api_gateway.api_gateway.root.add_resource(
            "inference")
        provision_resource.add_method("POST")

        # create Sagemaker role
        sagemaker_role = create_sagemaker_role(
            self,
            "MLOpsRealtimeSagemakerRole",
            custom_algorithms_ecr_arn=custom_algorithms_ecr_repo_arn.
            value_as_string,
            kms_key_arn=kms_key_arn.value_as_string,
            assets_bucket_name=assets_bucket_name.value_as_string,
            input_bucket_name=assets_bucket_name.value_as_string,
            input_s3_location=assets_bucket_name.value_as_string,
            output_s3_location=data_capture_location.value_as_string,
            ecr_repo_arn_provided_condition=
            custom_algorithms_ecr_repo_arn_provided,
            kms_key_arn_provided_condition=kms_key_arn_provided,
        )

        # create sagemaker model
        sagemaker_model = create_sagemaker_model(
            self,
            "MLOpsSagemakerModel",
            execution_role=sagemaker_role,
            primary_container={
                "image":
                algorithm_image_uri.value_as_string,
                "modelDataUrl":
                f"s3://{assets_bucket_name.value_as_string}/{model_artifact_location.value_as_string}",
            },
            tags=[{
                "key": "model_name",
                "value": model_name.value_as_string
            }],
        )

        # Create Sagemaker EndpointConfg
        sagemaker_endpoint_config = create_sagemaker_endpoint_config(
            self,
            "MLOpsSagemakerEndpointConfig",
            sagemaker_model.attr_model_name,
            model_name.value_as_string,
            inference_instance.value_as_string,
            data_capture_location.value_as_string,
            core.Fn.condition_if(kms_key_arn_provided.logical_id,
                                 kms_key_arn.value_as_string,
                                 core.Aws.NO_VALUE).to_string(),
        )

        # create a dependency on the model
        sagemaker_endpoint_config.add_depends_on(sagemaker_model)

        # create Sagemaker endpoint
        sagemaker_endpoint = create_sagemaker_endpoint(
            self,
            "MLOpsSagemakerEndpoint",
            sagemaker_endpoint_config.attr_endpoint_config_name,
            model_name.value_as_string,
        )

        # add dependency on endpoint config
        sagemaker_endpoint.add_depends_on(sagemaker_endpoint_config)

        # Create Lambda - sagemakerendpoint
        LambdaToSagemakerEndpoint(
            self,
            "LambdaSagmakerEndpoint",
            existing_sagemaker_endpoint_obj=sagemaker_endpoint,
            existing_lambda_obj=inference_api_gateway.lambda_function,
        )

        # Outputs #
        core.CfnOutput(
            self,
            id="SageMakerModelName",
            value=sagemaker_model.attr_model_name,
        )
        core.CfnOutput(
            self,
            id="SageMakerEndpointConfigName",
            value=sagemaker_endpoint_config.attr_endpoint_config_name,
        )
        core.CfnOutput(
            self,
            id="SageMakerEndpointName",
            value=sagemaker_endpoint.attr_endpoint_name,
        )
        core.CfnOutput(
            self,
            id="EndpointDataCaptureLocation",
            value=
            f"https://s3.console.aws.amazon.com/s3/buckets/{data_capture_location.value_as_string}/",
            description=
            "Endpoint data capture location (to be used by Model Monitor)",
        )
示例#7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        bundling_options = core.BundlingOptions(
            image=_lambda.Runtime.PYTHON_3_7.bundling_docker_image,
            command=[
                'bash',
                '-c',
                'pip install -r requirements.txt -t /asset-output && rsync -r . /asset-output',
            ])
        self.hello_lambda_source_code = _lambda.Code.from_asset(
            'lambda', bundling=bundling_options)

        self.hit_lambda_source_code = _lambda.Code.from_asset(
            'hit_lambda', bundling=bundling_options)

        self.hello_func = _lambda.Function(
            self,
            'HelloHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler='hello.handler',
            code=self.hello_lambda_source_code,
            environment={'testkey': 'testvalue'})

        self.hit_func = _lambda.Function(
            self,
            'HitHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            handler='hitcounter.handler',
            code=self.hit_lambda_source_code,
            environment={
                'DOWNSTREAM_FUNCTION_NAME': self.hello_func.function_name
            },
            initial_policy=[
                iam.PolicyStatement(
                    actions=[
                        "dynamodb:PutItem", "dynamodb:DescribeTable",
                        "dynamodb:UpdateItem"
                    ],
                    resources=["arn:aws:dynamodb:*:*:table/Hits"])
            ])

        self.hit_counter = lambda_ddb.LambdaToDynamoDB(
            self,
            'LambdaToDynamoDB',
            deploy_lambda=False,
            existing_lambda_obj=self.hit_func,
            dynamo_table_props=ddb.TableProps(
                table_name='Hits',
                partition_key={
                    'name': 'path',
                    'type': ddb.AttributeType.STRING
                },
                removal_policy=core.RemovalPolicy.DESTROY))

        self.hello_func.grant_invoke(self.hit_counter.lambda_function)

        # The code that defines your stack goes here
        apigw_lambda.ApiGatewayToLambda(
            self,
            'ApiGatewayToLambda',
            deploy_lambda=False,
            existing_lambda_obj=self.hit_counter.lambda_function,
            api_gateway_props=apigw.RestApiProps(
                default_method_options=apigw.MethodOptions(
                    authorization_type=apigw.AuthorizationType.NONE)))