def CdkDeploySimplePipeline(self, name: str, repo, branch: str, src: str, output): cdk_deploy = self.CdkDeployProject(f"{name}-CDKDeploy", stage=branch) cdk_deploy.role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=["*"], actions=["CloudFormation:*", "ec2:*", "s3:*"])) return codepipeline.Pipeline( self, name, stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit_Source", repository=repo, branch=branch, output=src) ]), codepipeline.StageProps( stage_name="Deploy", actions=[ codepipeline_actions.CodeBuildAction( action_name="CdkDeploy", project=cdk_deploy, input=src, outputs=[output]) ]), ])
def __init__(self, app: core.App, id: str, props, **kwargs) -> None: super().__init__(app, id, **kwargs) # define the s3 artifact source_output = aws_codepipeline.Artifact(artifact_name='source') # define the pipeline pipeline = aws_codepipeline.Pipeline( self, "Pipeline", pipeline_name=f"{props['namespace']}", artifact_bucket=props['bucket'], stages=[ aws_codepipeline.StageProps( stage_name='Source', actions=[ aws_codepipeline_actions.S3SourceAction( bucket=props['bucket'], bucket_key='source.zip', action_name='S3Source', run_order=1, output=source_output, trigger=aws_codepipeline_actions.S3Trigger.POLL), ]), aws_codepipeline.StageProps( stage_name='Build', actions=[ aws_codepipeline_actions.CodeBuildAction( action_name='DockerBuildImages', input=source_output, project=props['cb_docker_build'], run_order=1, ) ]) ]) # give pipelinerole read write to the bucket props['bucket'].grant_read_write(pipeline.role) #pipeline param to get the pipeline_param = aws_ssm.StringParameter( self, "PPipeline", parameter_name=f"{props['namespace']}-pipeline", string_value=pipeline.pipeline_name, description='IoT playground pipeline bucket') # cfn output core.CfnOutput(self, "PipelineOut", description="Pipeline", value=pipeline.pipeline_name)
def __init__(self, scope: core.Construct, id: str, code_commit_repo: str, default_branch: str = 'mainline', **kwargs) -> None: super().__init__(scope, id, **kwargs) code = codecommit.Repository.from_repository_name( self, "codecommitrepo", code_commit_repo) # Cloudformation permission for project builds # right now setting admin permission on policy # modify this to load custom policy per pipeline from policy statement document # iam_cfn_admin_json = Policies.get_iam_cfn_admin_access_policy() policy_statement = iam.PolicyStatement() policy_statement.add_actions("*") policy_statement.add_resources("*") policy_statement.effect = iam.Effect.ALLOW serverless_build = codebuild.PipelineProject(self, "buildpipeline") # add cfn iam statements to build project serverless_build.add_to_role_policy(policy_statement) build_output = codepipeline.Artifact("BuildOutput") codepipeline.Pipeline( self, "imageBuilderDeploymentPipeline", pipeline_name="ImageBuilderDeploymentPipeline", stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="SourceCode", branch=default_branch, repository=code, output=build_output) ]), codepipeline.StageProps( stage_name="Deploy", actions=[ codepipeline_actions.CodeBuildAction( action_name="CodeDeploy", project=serverless_build, input=build_output) ]) ])
def setup_api_pipeline(self): """Setup the build pipeline for API. Using codepipeline to create a Pipeline with 3 steps * Source: CodeCommitSourceAction * Build: CodeBuildActioin * Deploy: EcsDeployAction: deploy to ECS service Returns ------- aws_codepipeline.Pipeline """ source_output = cp.Artifact() build_output = cp.Artifact(self.config.build_output) return cp.Pipeline( self, 'ApiPipeline', pipeline_name=self.config.api.pipeline, stages=[ cp.StageProps(stage_name='Source', actions=[ cp_actions.CodeCommitSourceAction( action_name='Source', repository=self.api_source, branch='master', output=source_output, ) ]), cp.StageProps(stage_name='Build', actions=[ cp_actions.CodeBuildAction( action_name='Build', project=self.api_build_project, input=source_output, outputs=[build_output]) ]), cp.StageProps( stage_name='Deploy', actions=[ cp_actions.EcsDeployAction( action_name='Deploy', service=self.service.service, input=build_output, # image_file=build_output.at_path('imagedefinitions.json') ) ]) ])
def setup_web_pipeline(self): """Setup the build pipeline. Using codepipeline to create a Web Pipeline with 3 stages: * Source: CodeCommitSourceAction * Build : CodeBuildActioin * Deploy: S3DeployAction Returns ------- aws_codepipeline.Pipeline """ source_output = cp.Artifact() build_output = cp.Artifact(self.config.web.build_output) return cp.Pipeline( self, 'WebPipeline', pipeline_name=self.config.web.pipeline, stages=[ cp.StageProps(stage_name='Source', actions=[ cp_actions.CodeCommitSourceAction( action_name='Source', repository=self.web_source, branch='master', output=source_output, ) ]), cp.StageProps(stage_name='Build', actions=[ cp_actions.CodeBuildAction( action_name='Build', project=self.web_build_project, input=source_output, outputs=[build_output]) ]), cp.StageProps( stage_name='Deploy', actions=[ cp_actions.S3DeployAction( action_name='Deploy', bucket=self.web_bucket, input=build_output, access_control=s3.BucketAccessControl.PUBLIC_READ) ]) ])
def create_deploy_stage(self, source_build_output): deploy_stage=_cp.StageProps( stage_name='Deploy', actions=[ _cpa.CodeDeployEcsDeployAction( action_name='Deploy', container_image_inputs=[ _cpa.CodeDeployEcsContainerImageInput( input=source_build_output, task_definition_placeholder='IMAGE_NAME' ) ], run_order=1, deployment_group=_cd.EcsDeploymentGroup.from_ecs_deployment_group_attributes( self, 'DeploymentGroupAttributes', application=_cd.EcsApplication.from_ecs_application_name( self, 'ApplicationName', 'AppECS-DEMO-CLUSTER-DEMO-SERVICE' ), deployment_group_name='DgpECS-DEMO-CLUSTER-DEMO-SERVICE' ), app_spec_template_file=_cp.ArtifactPath( source_build_output, 'appspec.yml' ), task_definition_template_file=_cp.ArtifactPath( source_build_output, 'taskdef.json' ) ) ] ) return deploy_stage
def __init__(self, app: core.App, id: str, props, repo_name: str=None, **kwargs) -> None: super().__init__(app, id, **kwargs) source_output = aws_codepipeline.Artifact(artifact_name='source') code = aws_codecommit.Repository.from_repository_name(self, "ImportedRepo", repo_name) codepipeline = aws_codepipeline.Pipeline(self, "CodePipeline", pipeline_name="flask-pipeline", artifact_bucket=props['bucket'], stages=[ aws_codepipeline.StageProps( stage_name='Source', actions=[ aws_codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit", repository=code, output=source_output, run_order=1, ), ] ), aws_codepipeline.StageProps( stage_name='Build', actions=[ aws_codepipeline_actions.CodeBuildAction( action_name='DockerBuildImages', input=source_output, project=props['ecr_build'], run_order=1, ) ] ), aws_codepipeline.StageProps( stage_name='Build2', actions=[ aws_codepipeline_actions.CodeBuildAction( action_name='ECSBuild', input=source_output, project=props['ecs_build'], run_order=1, ) ] ) ] )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here for stage in ["prd", "dev"] : target_function = self.create_function(stage) project = self.create_project(target_function, stage) source_output = codepipeline.Artifact(self.create_name(stage)); branch = "master" if stage == "prd" else "develop" codepipeline.Pipeline(self, self.create_id("Pipeline", stage), pipeline_name=self.create_name(stage), stages=[ codepipeline.StageProps( stage_name="Source", actions=[self.create_source_action(branch, source_output)]), codepipeline.StageProps( stage_name="Build", actions=[self.create_build_action(project, source_output)]) ])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) try: with open('../.secrets/github_token.txt') as f: github_token = f.read() except FileNotFoundError: print( "Create ../.secrets/github_token.txt and put the token which you create in the github interface into it." ) source_output = aws_codepipeline.Artifact(artifact_name='source') ecr, cb_docker_build = self._get_build_project() pipeline = aws_codepipeline.Pipeline( self, "Pipeline", pipeline_name="cdk-pipeline", stages=[ aws_codepipeline.StageProps( stage_name='Source', actions=[ aws_codepipeline_actions.GitHubSourceAction( output=source_output, action_name="Source", oauth_token=core.SecretValue(github_token), owner='arron1993', repo="arronmoore.com", branch="develop") ]), aws_codepipeline.StageProps( stage_name='Build', actions=[ aws_codepipeline_actions.CodeBuildAction( action_name='DockerBuildImages', input=source_output, project=cb_docker_build, run_order=1, ) ]) ])
def create_approval_stage(self): approval_stage=_cp.StageProps( stage_name='Approval', actions=[ _cpa.ManualApprovalAction( action_name='Approval', run_order=1 ) ] ) return approval_stage
def generate_pipeline_stages(codebuild_project, role, beanstalk_application, beanstalk_environment, codestar_connection): source_output = codepipeline.Artifact("SourceOutput") source_stage = codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.BitBucketSourceAction( connection_arn=codestar_connection.attr_connection_arn, output=source_output, repo="santos-devops-challenge-tier1", owner="Jayvee1413", action_name="Github", code_build_clone_output=True, run_order=1), ], ) codebuild_output = codepipeline.Artifact("CodebuildOutput") codebuild_stage = codepipeline.StageProps( stage_name="Build", actions=[ codepipeline_actions.CodeBuildAction( input=source_output, project=codebuild_project, outputs=[codebuild_output], action_name="codebuild", run_order=2, ) ]) deploy_stage = codepipeline.StageProps( stage_name="Deploy", actions=[ ElasticBeanStalkDeployAction( action_name='Deploy', role=role, application_name=beanstalk_application.application_name, input=codebuild_output, environment_name=beanstalk_environment.environment_name, run_order=3) ]) return [source_stage, codebuild_stage, deploy_stage]
def _create_pipeline( self, build_pipeline_name: str, github_source: aws_codepipeline_actions.GitHubSourceAction, codebuild_project: aws_codebuild.PipelineProject, config_file_source_bucket_name: str, df_project: DeviceFarmProject, device_farm_pool_arn: str): artifact_bucket = self._create_artifact_bucket( f"pipeline-assets-{build_pipeline_name.lower()}-{self.account}") self.code_build_project = self._create_codebuild_project( "AmplifyAndroidCodeBuildProject") amplify_android_build_output = aws_codepipeline.Artifact( "AmplifyAndroidBuildOutput") pipeline = aws_codepipeline.Pipeline( self, f"{build_pipeline_name}Pipeline", pipeline_name=build_pipeline_name, artifact_bucket=artifact_bucket, stages=[ aws_codepipeline.StageProps(stage_name="Source", actions=[github_source]), aws_codepipeline.StageProps( stage_name="Build", actions=[ self._create_build_and_assemble_action( input_artifact=github_source.action_properties. outputs[0], output_artifact=amplify_android_build_output, pipeline_project=codebuild_project, config_source_bucket=config_file_source_bucket_name ) ]) ]) self._add_devicefarm_test_runner_permissions_to_role(pipeline.role) self._add_devicefarm_test_stage(pipeline, df_project.get_project_id(), device_farm_pool_arn) return pipeline
def create_source_stage(self, source_output, from_bucket): source_stage=_cp.StageProps( stage_name='Source', actions=[ _cpa.GitHubSourceAction( action_name='source_from_github', owner='koren-kobatake', repo='aws-study', branch='master', trigger=_cpa.GitHubTrigger.POLL, oauth_token=SecretValue.plain_text('21063a42a37fa2b93804b3ed776a9f4fbe450f2f'), output=source_output ) ] ) return source_stage
def create_build_stage(self, source_output, build_project): build_stage=_cp.StageProps( stage_name='Build', actions=[ _cpa.CodeBuildAction( action_name='Build', input=source_output, project=build_project, run_order=1, environment_variables={ 'ENV': _cb.BuildEnvironmentVariable(value='develop'), 'FAMILY_NAME': _cb.BuildEnvironmentVariable(value='DEMO-TASK'), }, outputs=[_cp.Artifact(artifact_name='BuildArtifact')], ) ] ) return build_stage
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) code = codecommit.Repository( self, "CodeRepo", repository_name="iot-gg-cicd-workshop-repo") prod_deploy_param_bucket = s3.Bucket( self, "ProdDeployBucket", versioned=True, ) prod_source_bucket = s3.Bucket( self, "ProdSourceBucket", versioned=True, ) ssm.StringParameter( self, "ProdSourceBucketParameter", parameter_name="/iot-gg-cicd-workshop/s3/prod_source_bucket", string_value=prod_source_bucket.bucket_name, ) ssm.StringParameter( self, "ProdDeployBucketParameter", parameter_name="/iot-gg-cicd-workshop/s3/prod_deploy_param_bucket", string_value=prod_deploy_param_bucket.bucket_name, ) cdk_build = codebuild.PipelineProject( self, "Build", project_name="iot-gg-cicd-workshop-build", build_spec=codebuild.BuildSpec.from_source_filename( "buildspec.yml"), environment_variables={ "AWS_DEFAULT_REGION": codebuild.BuildEnvironmentVariable(value=kwargs['env'].region) }) add_policies(cdk_build, [ "AWSCloudFormationFullAccess", "AmazonSSMFullAccess", "AmazonS3FullAccess", "AWSLambdaFullAccess", "IAMFullAccess", ]) cdk_deploy_canary = codebuild.PipelineProject( self, "Deploy", project_name="iot-gg-cicd-workshop-deploy-canary", build_spec=codebuild.BuildSpec.from_source_filename( "deployspec.yml"), environment_variables={ "AWS_DEFAULT_REGION": codebuild.BuildEnvironmentVariable(value=kwargs['env'].region) }) add_policies(cdk_deploy_canary, [ "AWSCloudFormationFullAccess", "AWSGreengrassFullAccess", "AmazonSSMFullAccess", "ResourceGroupsandTagEditorReadOnlyAccess", "AWSLambdaFullAccess", "AWSIoTFullAccess" ]) source_output = codepipeline.Artifact() cdk_build_output = codepipeline.Artifact("CdkBuildOutput") codepipeline.Pipeline( self, "Pipeline", pipeline_name="iot-gg-cicd-workshop-pipeline-canary", stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit_Source", repository=code, output=source_output) ]), codepipeline.StageProps( stage_name="Build_Package_Deploy_Lambda", actions=[ codepipeline_actions.CodeBuildAction( action_name="Build_Package_Deploy", project=cdk_build, input=source_output, outputs=[cdk_build_output]) ]), codepipeline.StageProps( stage_name="Deploy_GreenGrass_Canary", actions=[ codepipeline_actions.CodeBuildAction( action_name="Deploy_Canary", project=cdk_deploy_canary, input=cdk_build_output) ]), ]) cdk_deploy_prod = codebuild.PipelineProject( self, "DeployProd", project_name="iot-gg-cicd-workshop-deploy-main", build_spec=codebuild.BuildSpec.from_object( dict( version="0.2", phases=dict(install=dict(commands=[ "apt-get install zip", "PROD_SOURCE_BUCKET=$(aws ssm get-parameter --name '/iot-gg-cicd-workshop/s3/prod_source_bucket' --with-decryption --query 'Parameter.Value' --output text)", "aws s3 cp s3://$PROD_SOURCE_BUCKET/prod_deploy.zip prod_deploy.zip", "unzip -o prod_deploy.zip", "ls -la", "make clean init" ]), build=dict(commands=[ "ls -la", "make deploy-greengrass-prod", ])), artifacts={ "base-directory": ".", "files": ["**/*"] }, environment=dict( buildImage=codebuild.LinuxBuildImage.STANDARD_2_0)))) add_policies(cdk_deploy_prod, [ "AWSCloudFormationFullAccess", "AWSGreengrassFullAccess", "AmazonSSMFullAccess", "ResourceGroupsandTagEditorReadOnlyAccess", "AWSLambdaFullAccess" ]) prod_source_output = codepipeline.Artifact() codepipeline.Pipeline( self, "PipelineProd", pipeline_name="iot-gg-cicd-workshop-pipeline-main", stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.S3SourceAction( action_name="S3_Source", bucket=prod_deploy_param_bucket, bucket_key="deploy_params.zip", output=prod_source_output) ]), codepipeline.StageProps( stage_name="Deploy_GreenGrass_Prod", actions=[ codepipeline_actions.CodeBuildAction( action_name="Deploy_Prod", project=cdk_deploy_prod, input=prod_source_output) ]), ]) prod_source_bucket.grant_read_write(cdk_deploy_canary.role) prod_source_bucket.grant_read(cdk_deploy_prod.role) prod_deploy_param_bucket.grant_read_write(cdk_deploy_canary.role)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Parameteres # notification_email = core.CfnParameter( self, "NOTIFICATION_EMAIL", type="String", description="email for pipeline outcome notifications", allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", constraint_description="Please enter an email address with correct format ([email protected])", min_length=5, max_length=320, ) blueprint_bucket_name = core.CfnParameter( self, "BLUEPRINT_BUCKET", type="String", description="Bucket name for blueprints of different types of ML Pipelines.", min_length=3, ) assets_bucket_name = core.CfnParameter( self, "ASSETS_BUCKET", type="String", description="Bucket name for access logs.", min_length=3 ) endpoint_name = core.CfnParameter( self, "ENDPOINT_NAME", type="String", description="The name of the ednpoint to monitor", min_length=1 ) baseline_job_output_location = core.CfnParameter( self, "BASELINE_JOB_OUTPUT_LOCATION", type="String", description="S3 prefix to store the Data Baseline Job's output.", ) monitoring_output_location = core.CfnParameter( self, "MONITORING_OUTPUT_LOCATION", type="String", description="S3 prefix to store the Monitoring Schedule output.", ) schedule_expression = core.CfnParameter( self, "SCHEDULE_EXPRESSION", type="String", description="cron expression to run the monitoring schedule. E.g., cron(0 * ? * * *), cron(0 0 ? * * *), etc.", allowed_pattern="^cron(\\S+\\s){5}\\S+$", ) training_data = core.CfnParameter( self, "TRAINING_DATA", type="String", description="Location of the training data in PipelineAssets S3 Bucket.", ) instance_type = core.CfnParameter( self, "INSTANCE_TYPE", type="String", description="Inference instance that inference requests will be running on. E.g., ml.m5.large", allowed_pattern="^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", min_length=7, ) instance_volume_size = core.CfnParameter( self, "INSTANCE_VOLUME_SIZE", type="Number", description="Instance volume size used in model moniroing jobs. E.g., 20", ) monitoring_type = core.CfnParameter( self, "MONITORING_TYPE", type="String", allowed_values=["dataquality", "modelquality", "modelbias", "modelexplainability"], default="dataquality", description="Type of model monitoring. Possible values: DataQuality | ModelQuality | ModelBias | ModelExplainability ", ) max_runtime_seconds = core.CfnParameter( self, "MAX_RUNTIME_SIZE", type="Number", description="Max runtime in secodns the job is allowed to run. E.g., 3600", ) baseline_job_name = core.CfnParameter( self, "BASELINE_JOB_NAME", type="String", description="Unique name of the data baseline job", min_length=3, max_length=63, ) monitoring_schedule_name = core.CfnParameter( self, "MONITORING_SCHEDULE_NAME", type="String", description="Unique name of the monitoring schedule job", min_length=3, max_length=63, ) # Resources # assets_bucket = s3.Bucket.from_bucket_name(self, "AssetsBucket", assets_bucket_name.value_as_string) # getting blueprint bucket object from its name - will be used later in the stack blueprint_bucket = s3.Bucket.from_bucket_name(self, "BlueprintBucket", blueprint_bucket_name.value_as_string) # Defining pipeline stages # source stage source_output, source_action_definition = source_action_model_monitor(training_data, assets_bucket) # deploy stage # creating data baseline job baseline_lambda_arn, create_baseline_job_definition = create_data_baseline_job( self, blueprint_bucket, assets_bucket, baseline_job_name, training_data, baseline_job_output_location, endpoint_name, instance_type, instance_volume_size, max_runtime_seconds, core.Aws.STACK_NAME, ) # creating monitoring schedule monitor_lambda_arn, create_monitoring_schedule_definition = create_monitoring_schedule( self, blueprint_bucket, assets_bucket, baseline_job_output_location, baseline_job_name, monitoring_schedule_name, monitoring_output_location, schedule_expression, endpoint_name, instance_type, instance_volume_size, max_runtime_seconds, monitoring_type, core.Aws.STACK_NAME, ) # create invoking lambda policy invoke_lambdas_policy = iam.PolicyStatement( actions=[ "lambda:InvokeFunction", ], resources=[baseline_lambda_arn, monitor_lambda_arn], ) # createing pipeline stages source_stage = codepipeline.StageProps(stage_name="Source", actions=[source_action_definition]) deploy_stage_model_monitor = codepipeline.StageProps( stage_name="Deploy", actions=[ create_baseline_job_definition, create_monitoring_schedule_definition, ], ) pipeline_notification_topic = sns.Topic( self, "ModelMonitorPipelineNotification", ) pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns() pipeline_notification_topic.add_subscription( subscriptions.EmailSubscription(email_address=notification_email.value_as_string) ) # constructing Model Monitor pipelines model_monitor_pipeline = codepipeline.Pipeline( self, "ModelMonitorPipeline", stages=[source_stage, deploy_stage_model_monitor], cross_account_keys=False, ) model_monitor_pipeline.on_state_change( "NotifyUser", description="Notify user of the outcome of the pipeline", target=targets.SnsTopic( pipeline_notification_topic, message=events.RuleTargetInput.from_text( ( f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. " f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}" ) ), ), event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}), ) model_monitor_pipeline.add_to_role_policy( iam.PolicyStatement( actions=["events:PutEvents"], resources=[ f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*", ], ) ) # add lambda permissons model_monitor_pipeline.add_to_role_policy(invoke_lambdas_policy) pipeline_child_nodes = model_monitor_pipeline.node.find_all() pipeline_child_nodes[1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket() pipeline_child_nodes[6].node.default_child.cfn_options.metadata = suppress_iam_complex() pipeline_child_nodes[13].node.default_child.cfn_options.metadata = suppress_iam_complex() pipeline_child_nodes[19].node.default_child.cfn_options.metadata = suppress_list_function_policy() pipeline_child_nodes[24].node.default_child.cfn_options.metadata = suppress_list_function_policy() # attaching iam permissions to the pipelines pipeline_permissions(model_monitor_pipeline, assets_bucket) # Outputs # core.CfnOutput( self, id="MonitorPipeline", value=( f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/" f"{model_monitor_pipeline.pipeline_name}/view?region={core.Aws.REGION}" ), ) core.CfnOutput( self, id="DataBaselineJobName", value=baseline_job_name.value_as_string, ) core.CfnOutput( self, id="MonitoringScheduleJobName", value=monitoring_schedule_name.value_as_string, ) core.CfnOutput( self, id="MonitoringScheduleType", value=monitoring_type.value_as_string, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Parameteres # notification_email = core.CfnParameter( self, "NOTIFICATION_EMAIL", type="String", description="email for pipeline outcome notifications", allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", constraint_description="Please enter an email address with correct format ([email protected])", min_length=5, max_length=320, ) blueprint_bucket_name = core.CfnParameter( self, "BLUEPRINT_BUCKET", type="String", description="Bucket name for blueprints of different types of ML Pipelines.", min_length=3, ) assets_bucket_name = core.CfnParameter( self, "ASSETS_BUCKET", type="String", description="Bucket name for access logs.", min_length=3 ) custom_container = core.CfnParameter( self, "CUSTOM_CONTAINER", default="", type="String", description=( "Should point to a zip file containing dockerfile and assets for building a custom model. " "If empty it will beusing containers from SageMaker Registry" ), ) model_framework = core.CfnParameter( self, "MODEL_FRAMEWORK", default="", type="String", description="The ML framework which is used for training the model. E.g., xgboost, kmeans, etc.", ) model_framework_version = core.CfnParameter( self, "MODEL_FRAMEWORK_VERSION", default="", type="String", description="The version of the ML framework which is used for training the model. E.g., 1.1-2", ) model_name = core.CfnParameter( self, "MODEL_NAME", type="String", description="An arbitrary name for the model.", min_length=1 ) model_artifact_location = core.CfnParameter( self, "MODEL_ARTIFACT_LOCATION", type="String", description="Path to model artifact inside assets bucket.", ) inference_instance = core.CfnParameter( self, "INFERENCE_INSTANCE", type="String", description="Inference instance that inference requests will be running on. E.g., ml.m5.large", allowed_pattern="^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", min_length=7, ) # Resources # # access_bucket = s3.Bucket.from_bucket_name(self, "AccessBucket", access_bucket_name.value_as_string) assets_bucket = s3.Bucket.from_bucket_name(self, "AssetsBucket", assets_bucket_name.value_as_string) # getting blueprint bucket object from its name - will be used later in the stack blueprint_bucket = s3.Bucket.from_bucket_name(self, "BlueprintBucket", blueprint_bucket_name.value_as_string) # Defining pipeline stages # source stage source_output, source_action_definition = source_action_custom( model_artifact_location, assets_bucket, custom_container ) # build stage build_action_definition, container_uri = build_action(self, source_output) # deploy stage sm_layer = sagemaker_layer(self, blueprint_bucket) # creating a sagemaker model model_lambda_arn, create_model_definition = create_model( self, blueprint_bucket, assets_bucket, model_name, model_artifact_location, custom_container, model_framework, model_framework_version, container_uri, sm_layer, ) # creating a sagemaker endpoint endpoint_lambda_arn, create_endpoint_definition = create_endpoint( self, blueprint_bucket, assets_bucket, model_name, inference_instance ) # Share stage configure_lambda_arn, configure_inference_definition = configure_inference(self, blueprint_bucket) # create invoking lambda policy invoke_lambdas_policy = iam.PolicyStatement( actions=[ "lambda:InvokeFunction", ], resources=[model_lambda_arn, endpoint_lambda_arn, configure_lambda_arn], ) pipeline_notification_topic = sns.Topic( self, "PipelineNotification", ) pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns() pipeline_notification_topic.add_subscription( subscriptions.EmailSubscription(email_address=notification_email.value_as_string) ) # createing pipeline stages source_stage = codepipeline.StageProps(stage_name="Source", actions=[source_action_definition]) build_stage = codepipeline.StageProps(stage_name="Build", actions=[build_action_definition]) deploy_stage_realtime = codepipeline.StageProps( stage_name="Deploy", actions=[ create_model_definition, create_endpoint_definition, ], ) share_stage = codepipeline.StageProps(stage_name="Share", actions=[configure_inference_definition]) realtime_build_pipeline = codepipeline.Pipeline( self, "BYOMPipelineReatimeBuild", stages=[source_stage, build_stage, deploy_stage_realtime, share_stage], cross_account_keys=False, ) realtime_build_pipeline.on_state_change( "NotifyUser", description="Notify user of the outcome of the pipeline", target=targets.SnsTopic( pipeline_notification_topic, message=events.RuleTargetInput.from_text( ( f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. " f"Pipeline execution result is {events.EventField.from_path('$.detail.state')}" ) ), ), event_pattern=events.EventPattern(detail={"state": ["SUCCEEDED", "FAILED"]}), ) realtime_build_pipeline.add_to_role_policy( iam.PolicyStatement( actions=["events:PutEvents"], resources=[ f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*", ], ) ) # add lambda permissons realtime_build_pipeline.add_to_role_policy(invoke_lambdas_policy) # Enhancement: This is to find CDK object nodes so that unnecessary cfn-nag warnings can be suppressed # There is room for improving the method in future versions to find CDK nodes without having to use # hardocded index numbers pipeline_child_nodes = realtime_build_pipeline.node.find_all() pipeline_child_nodes[1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket() pipeline_child_nodes[6].node.default_child.cfn_options.metadata = suppress_iam_complex() pipeline_child_nodes[13].node.default_child.cfn_options.metadata = suppress_iam_complex() pipeline_child_nodes[19].node.default_child.cfn_options.metadata = suppress_list_function_policy() pipeline_child_nodes[25].node.default_child.cfn_options.metadata = suppress_list_function_policy() pipeline_child_nodes[30].node.default_child.cfn_options.metadata = suppress_list_function_policy() pipeline_child_nodes[36].node.default_child.cfn_options.metadata = suppress_list_function_policy() # attaching iam permissions to the pipelines pipeline_permissions(realtime_build_pipeline, assets_bucket) # Outputs # core.CfnOutput( self, id="Pipelines", value=( f"https://console.aws.amazon.com/codesuite/codepipeline/pipelines/" f"{realtime_build_pipeline.pipeline_name}/view?region={core.Aws.REGION}" ), ) core.CfnOutput( self, id="SageMakerModelName", value=model_name.value_as_string, ) core.CfnOutput( self, id="SageMakerEndpointConfigName", value=f"{model_name.value_as_string}-endpoint-config", ) core.CfnOutput( self, id="SageMakerEndpointName", value=f"{model_name.value_as_string}-endpoint", ) core.CfnOutput( self, id="EndpointDataCaptureLocation", value=f"https://s3.console.aws.amazon.com/s3/buckets/{assets_bucket.bucket_name}/datacapture", description="Endpoint data capture location (to be used by Model Monitor)", )
iam.ManagedPolicy.from_aws_managed_policy_name('PowerUserAccess'), iam.ManagedPolicy.from_aws_managed_policy_name('IAMFullAccess') ], ) pipeline = codepipeline.Pipeline( stack, 'CodePipeline', pipeline_name='EMR_Launch_Release', restart_execution_on_update=True, artifact_bucket=artifacts_bucket, stages=[ codepipeline.StageProps(stage_name='Source', actions=[ codepipeline_actions.GitHubSourceAction( action_name='GitHub_Source', repo='aws-emr-launch', branch=pipeline_params['github-branch'], owner=pipeline_params['github-owner'], oauth_token=core.SecretValue.secrets_manager( secret_id=deployment_secret['secret-id'], json_field=deployment_secret['json-fields']['github-oauth-token']), trigger=codepipeline_actions.GitHubTrigger.WEBHOOK, output=source_output, )]), codepipeline.StageProps(stage_name='Self-Update', actions=[ codepipeline_actions.CodeBuildAction( action_name='Self_Deploy', project=codebuild.PipelineProject( stack, 'CodePipelineBuild', build_spec=codebuild.BuildSpec.from_source_filename( 'codepipeline/pipelines-buildspec.yaml'), role=code_build_role, environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.STANDARD_4_0,
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Parameteres # notification_email = core.CfnParameter( self, "NOTIFICATION_EMAIL", type="String", description="email for pipeline outcome notifications", allowed_pattern='^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$', constraint_description= "Please enter an email address with correct format ([email protected])", min_length=5, max_length=320) blueprint_bucket_name = core.CfnParameter( self, "BLUEPRINT_BUCKET", type="String", description= "Bucket name for blueprints of different types of ML Pipelines.", min_length=3) access_bucket_name = core.CfnParameter( self, "ACCESS_BUCKET", type="String", description="Bucket name for access logs.", min_length=3) custom_container = core.CfnParameter( self, "CUSTOM_CONTAINER", type="String", description= "Should point to a zip file containing dockerfile and assets for building a custom model. If empty it will beusing containers from SageMaker Registry", ) model_framework = core.CfnParameter( self, "MODEL_FRAMEWORK", type="String", description= "The ML framework which is used for training the model. E.g., xgboost, kmeans, etc.", ) model_framework_version = core.CfnParameter( self, "MODEL_FRAMEWORK_VERSION", type="String", description= "The version of the ML framework which is used for training the model. E.g., 1.1-2", ) model_name = core.CfnParameter( self, "MODEL_NAME", type="String", description="An arbitrary name for the model.", min_length=1) model_artifact_location = core.CfnParameter( self, "MODEL_ARTIFACT_LOCATION", type="String", description="Path to model artifact inside assets bucket.", ) training_data = core.CfnParameter( self, "TRAINING_DATA", type="String", description= "Location of the training data in PipelineAssets S3 Bucket.", ) inference_instance = core.CfnParameter( self, "INFERENCE_INSTANCE", type="String", description= "Inference instance that inference requests will be running on. E.g., ml.m5.large", allowed_pattern='^[a-zA-Z0-9_.+-]+\.[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$', min_length=7) inference_type = core.CfnParameter( self, "INFERENCE_TYPE", type="String", allowed_values=["batch", "realtime"], default="realtime", description="Type of inference. Possible values: batch | realtime", ) batch_inference_data = core.CfnParameter( self, "BATCH_INFERENCE_DATA", type="String", default="", description= "Location of batch inference data if inference type is set to batch. Otherwise, can be left empty.", ) # Resources # access_bucket = s3.Bucket.from_bucket_name( self, "AccessBucket", access_bucket_name.value_as_string) # getting blueprint bucket object from its name - will be used later in the stack blueprint_bucket = s3.Bucket.from_bucket_name( self, "BlueprintBucket", blueprint_bucket_name.value_as_string) # Creating assets bucket so that users can upload ML Models to it. assets_bucket = s3.Bucket( self, "pipeline-assets-" + str(uuid.uuid4()), versioned=True, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_bucket, server_access_logs_prefix="BYOMBatchBuiltinStack", block_public_access=s3.BlockPublicAccess.BLOCK_ALL) assets_bucket.node.default_child.cfn_options.metadata = suppress_assets_bucket( ) # Defining pipeline stages # source stage source_output, source_action_definition = source_action( model_artifact_location, assets_bucket) # deploy stage sm_layer = sagemaker_layer(self, blueprint_bucket) # creating a sagemaker model create_model_definition = create_model( self, blueprint_bucket, assets_bucket, model_name, model_artifact_location, custom_container, model_framework, model_framework_version, "", sm_layer, ) # creating a batch transform job batch_transform_definition = batch_transform( self, blueprint_bucket, assets_bucket, model_name, inference_instance, batch_inference_data, sm_layer, ) pipeline_notification_topic = sns.Topic( self, "PipelineNotification", ) pipeline_notification_topic.node.default_child.cfn_options.metadata = suppress_sns( ) pipeline_notification_topic.add_subscription( subscriptions.EmailSubscription( email_address=notification_email.value_as_string)) # createing pipeline stages source_stage = codepipeline.StageProps( stage_name="Source", actions=[source_action_definition]) deploy_stage_batch = codepipeline.StageProps( stage_name="Deploy", actions=[create_model_definition, batch_transform_definition], ) batch_nobuild_pipeline = codepipeline.Pipeline( self, "BYOMPipelineBatchBuiltIn", stages=[source_stage, deploy_stage_batch], cross_account_keys=False, ) pipeline_rule = batch_nobuild_pipeline.on_state_change( "NotifyUser", description="Notify user of the outcome of the pipeline", target=targets.SnsTopic( pipeline_notification_topic, message=events.RuleTargetInput.from_text( f"Pipeline {events.EventField.from_path('$.detail.pipeline')} finished executing. Pipeline execution result is {events.EventField.from_path('$.detail.state')}" )), event_pattern=events.EventPattern( detail={'state': ['SUCCEEDED', 'FAILED']}), ) batch_nobuild_pipeline.add_to_role_policy( iam.PolicyStatement( actions=["events:PutEvents"], resources=[ f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*", ])) # Enhancement: This is to find CDK object nodes so that unnecessary cfn-nag warnings can be suppressed # There is room for improving the method in future versions to find CDK nodes without having to use # hardocded index numbers pipeline_child_nodes = batch_nobuild_pipeline.node.find_all() pipeline_child_nodes[ 1].node.default_child.cfn_options.metadata = suppress_pipeline_bucket( ) pipeline_child_nodes[ 6].node.default_child.cfn_options.metadata = suppress_iam_complex( ) pipeline_child_nodes[ 13].node.default_child.cfn_options.metadata = suppress_list_function_policy( ) pipeline_child_nodes[ 19].node.default_child.cfn_options.metadata = suppress_list_function_policy( ) pipeline_child_nodes[ 24].node.default_child.cfn_options.metadata = suppress_list_function_policy( ) # pipeline_child_nodes[30].node.default_child.cfn_options.metadata = suppress_list_function_policy() # attaching iam permissions to the pipeline pipeline_permissions(batch_nobuild_pipeline, assets_bucket) # Outputs # core.CfnOutput(self, id="AssetsBucket", value='https://s3.console.aws.amazon.com/s3/buckets/' + assets_bucket.bucket_name, description="S3 Bucket to upload model artifact") core.CfnOutput( self, id="Pipelines", value= f'https://console.aws.amazon.com/codesuite/codepipeline/pipelines/{batch_nobuild_pipeline.pipeline_name}/view?region={core.Aws.REGION}' )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Get stack parameters: email and repo address notification_email = core.CfnParameter( self, "Email Address", type="String", description= "Specify an email to receive notifications about pipeline outcomes.", allowed_pattern='^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$', min_length=5, max_length=320, constraint_description= "Please enter an email address with correct format ([email protected])" ) git_address = core.CfnParameter( self, "CodeCommit Repo Address", type="String", description= "AWS CodeCommit repository clone URL to connect to the framework.", allowed_pattern= '^(((https:\/\/|ssh:\/\/)(git\-codecommit)\.[a-zA-Z0-9_.+-]+(amazonaws\.com\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+$)|)', min_length=0, max_length=320, constraint_description= "CodeCommit address must follow the pattern: ssh or https://git-codecommit.REGION.amazonaws.com/version/repos/REPONAME" ) # Conditions git_address_provided = core.CfnCondition( self, "GitAddressProvided", expression=core.Fn.condition_not( core.Fn.condition_equals(git_address, "")), ) # Constants pipeline_stack_name = "MLOps-pipeline" # CDK Resources setup access_logs_bucket = s3.Bucket( self, "accessLogs", encryption=s3.BucketEncryption.S3_MANAGED, block_public_access=s3.BlockPublicAccess.BLOCK_ALL) access_logs_bucket.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W35", "reason": "This is the access bucket." }, { "id": "W51", "reason": "This S3 bucket does not need a bucket policy.", }, ] } } source_bucket = s3.Bucket.from_bucket_name(self, "BucketByName", "%%BUCKET_NAME%%") blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4()) blueprint_repository_bucket = s3.Bucket( self, blueprints_bucket_name, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix=blueprints_bucket_name, block_public_access=s3.BlockPublicAccess.BLOCK_ALL) blueprint_repository_bucket.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W51", "reason": "This S3 bucket does not need a bucket policy. All access to this bucket is restricted by IAM (CDK grant_read method)", }] } } # Custom resource to copy source bucket content to blueprints bucket custom_resource_lambda_fn = lambda_.Function( self, "CustomResourceLambda", code=lambda_.Code.from_asset("lambdas/custom_resource"), handler="index.on_event", runtime=lambda_.Runtime.PYTHON_3_8, environment={ "source_bucket": "https://%%BUCKET_NAME%%-" + core.Aws.REGION + ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%", "destination_bucket": blueprint_repository_bucket.bucket_name, "LOG_LEVEL": "INFO", }, timeout=core.Duration.seconds(60), ) custom_resource_lambda_fn.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W58", "reason": "The lambda functions role already has permissions to write cloudwatch logs", }] } } blueprint_repository_bucket.grant_write(custom_resource_lambda_fn) custom_resource = core.CustomResource( self, "CustomResourceCopyAssets", service_token=custom_resource_lambda_fn.function_arn, ) custom_resource.node.add_dependency(blueprint_repository_bucket) ### IAM policies setup ### cloudformation_role = iam.Role( self, "mlopscloudformationrole", assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"), ) # Cloudformation policy setup orchestrator_policy = iam.Policy( self, "lambdaOrchestratorPolicy", statements=[ iam.PolicyStatement( actions=[ "cloudformation:CreateStack", "cloudformation:DeleteStack", "cloudformation:UpdateStack", "cloudformation:ListStackResources", ], resources=[ f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*", ], ), iam.PolicyStatement( actions=[ "iam:CreateRole", "iam:DeleteRole", "iam:DeleteRolePolicy", "iam:GetRole", "iam:GetRolePolicy", "iam:PassRole", "iam:PutRolePolicy", "iam:AttachRolePolicy", "iam:DetachRolePolicy", ], resources=[ f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*" ], ), iam.PolicyStatement( actions=[ "ecr:CreateRepository", "ecr:DeleteRepository", "ecr:DescribeRepositories", ], resources=[ f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:repository/awsmlopsmodels*" ], ), iam.PolicyStatement( actions=[ "codebuild:CreateProject", "codebuild:DeleteProject", "codebuild:BatchGetProjects", ], resources=[ f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:project/ContainerFactory*", f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*", f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:report-group/*", ], ), iam.PolicyStatement( actions=[ "lambda:CreateFunction", "lambda:DeleteFunction", "lambda:InvokeFunction", "lambda:PublishLayerVersion", "lambda:DeleteLayerVersion", "lambda:GetLayerVersion", "lambda:GetFunctionConfiguration", "lambda:GetFunction", "lambda:AddPermission", "lambda:RemovePermission", "lambda:UpdateFunctionConfiguration", ], resources=[ f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*", f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*", ], ), iam.PolicyStatement( actions=["s3:GetObject"], resources=[ blueprint_repository_bucket.bucket_arn, blueprint_repository_bucket.arn_for_objects("*"), f"arn:{core.Aws.PARTITION}:s3:::pipeline-assets-*", ], ), iam.PolicyStatement( actions=[ "codepipeline:CreatePipeline", "codepipeline:DeletePipeline", "codepipeline:GetPipeline", "codepipeline:GetPipelineState", ], resources=[ f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*" ], ), iam.PolicyStatement( actions=[ "apigateway:POST", "apigateway:PATCH", "apigateway:DELETE", "apigateway:GET", "apigateway:PUT", ], resources=[ f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*", ], ), iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:DescribeLogGroups", ], resources=[ f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*", ], ), iam.PolicyStatement( actions=[ "s3:CreateBucket", "s3:PutEncryptionConfiguration", "s3:PutBucketVersioning", "s3:PutBucketPublicAccessBlock", "s3:PutBucketLogging", ], resources=["arn:" + core.Aws.PARTITION + ":s3:::*"], ), iam.PolicyStatement( actions=[ "sns:CreateTopic", "sns:DeleteTopic", "sns:Subscribe", "sns:Unsubscribe", "sns:GetTopicAttributes", "sns:SetTopicAttributes", ], resources=[ f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*-PipelineNotification*", ], ), iam.PolicyStatement( actions=[ "events:PutRule", "events:DescribeRule", "events:PutTargets", "events:RemoveTargets", "events:DeleteRule", "events:PutEvents", ], resources=[ f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*", f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*", ], ), ], ) orchestrator_policy.attach_to_role(cloudformation_role) # Lambda function IAM setup lambda_passrole_policy = iam.PolicyStatement( actions=["iam:passrole"], resources=[cloudformation_role.role_arn]) # API Gateway and lambda setup to enable provisioning pipelines through API calls provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda( self, "PipelineOrchestration", lambda_function_props={ "runtime": lambda_.Runtime.PYTHON_3_8, "handler": "index.handler", "code": lambda_.Code.from_asset("lambdas/pipeline_orchestration"), }, api_gateway_props={ "defaultMethodOptions": { "authorizationType": apigw.AuthorizationType.IAM, }, "restApiName": f"{core.Aws.STACK_NAME}-orchestrator", "proxy": False }, ) provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource( 'provisionpipeline') provision_resource.add_method('POST') status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource( 'pipelinestatus') status_resource.add_method('POST') blueprint_repository_bucket.grant_read( provisioner_apigw_lambda.lambda_function) provisioner_apigw_lambda.lambda_function.add_to_role_policy( lambda_passrole_policy) orchestrator_policy.attach_to_role( provisioner_apigw_lambda.lambda_function.role) provisioner_apigw_lambda.lambda_function.add_to_role_policy( iam.PolicyStatement(actions=["xray:PutTraceSegments"], resources=["*"])) lambda_node = provisioner_apigw_lambda.lambda_function.node.default_child lambda_node.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W12", "reason": "The xray permissions PutTraceSegments is not able to be bound to resources.", }] } } # Environment variables setup provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET_URL", value=str(blueprint_repository_bucket.bucket_regional_domain_name), ) provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET", value=str(blueprint_repository_bucket.bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn)) provisioner_apigw_lambda.lambda_function.add_environment( key="PIPELINE_STACK_NAME", value=pipeline_stack_name) provisioner_apigw_lambda.lambda_function.add_environment( key="NOTIFICATION_EMAIL", value=notification_email.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="LOG_LEVEL", value="DEBUG") cfn_policy_for_lambda = orchestrator_policy.node.default_child cfn_policy_for_lambda.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W76", "reason": "A complex IAM policy is required for this resource.", }] } } ### Codepipeline with Git source definitions ### source_output = codepipeline.Artifact() # processing git_address to retrieve repo name repo_name_split = core.Fn.split("/", git_address.value_as_string) repo_name = core.Fn.select(5, repo_name_split) # getting codecommit repo cdk object using 'from_repository_name' repo = codecommit.Repository.from_repository_name( self, "AWSMLOpsFrameworkRepository", repo_name) codebuild_project = codebuild.PipelineProject( self, "Take config file", build_spec=codebuild.BuildSpec.from_object({ "version": "0.2", "phases": { "build": { "commands": [ "ls -a", "aws lambda invoke --function-name " + provisioner_apigw_lambda.lambda_function. function_name + " --payload fileb://mlops-config.json response.json" + " --invocation-type RequestResponse", ] } }, }), ) # Defining a Codepipeline project with CodeCommit as source codecommit_pipeline = codepipeline.Pipeline( self, "MLOpsCodeCommitPipeline", stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit", repository=repo, output=source_output, ) ], ), codepipeline.StageProps( stage_name="TakeConfig", actions=[ codepipeline_actions.CodeBuildAction( action_name="provision_pipeline", input=source_output, outputs=[], project=codebuild_project, ) ], ), ], cross_account_keys=False, ) codecommit_pipeline.add_to_role_policy( iam.PolicyStatement( actions=["lambda:InvokeFunction"], resources=[ provisioner_apigw_lambda.lambda_function.function_arn ], )) codebuild_project.add_to_role_policy( iam.PolicyStatement( actions=["lambda:InvokeFunction"], resources=[ provisioner_apigw_lambda.lambda_function.function_arn ], )) pipeline_child_nodes = codecommit_pipeline.node.find_all() pipeline_child_nodes[1].node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W35", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, { "id": "W51", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, ] } } ###custom resource for operational metrics### metricsMapping = core.CfnMapping( self, 'AnonymousData', mapping={'SendAnonymousData': { 'Data': 'Yes' }}) metrics_condition = core.CfnCondition( self, 'AnonymousDatatoAWS', expression=core.Fn.condition_equals( metricsMapping.find_in_map('SendAnonymousData', 'Data'), 'Yes')) helper_function = lambda_.Function( self, "SolutionHelper", code=lambda_.Code.from_asset("lambdas/solution_helper"), handler="lambda_function.handler", runtime=lambda_.Runtime.PYTHON_3_8, timeout=core.Duration.seconds(60), ) createIdFunction = core.CustomResource( self, 'CreateUniqueID', service_token=helper_function.function_arn, properties={'Resource': 'UUID'}, resource_type='Custom::CreateUUID') sendDataFunction = core.CustomResource( self, 'SendAnonymousData', service_token=helper_function.function_arn, properties={ 'Resource': 'AnonymousMetric', 'UUID': createIdFunction.get_att_string('UUID'), 'gitSelected': git_address.value_as_string, 'Region': core.Aws.REGION, 'SolutionId': 'SO0136', 'Version': '%%VERSION%%', }, resource_type='Custom::AnonymousData') core.Aspects.of(helper_function).add( ConditionalResources(metrics_condition)) core.Aspects.of(createIdFunction).add( ConditionalResources(metrics_condition)) core.Aspects.of(sendDataFunction).add( ConditionalResources(metrics_condition)) helper_function.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W58", "reason": "The lambda functions role already has permissions to write cloudwatch logs", }] } } # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source core.Aspects.of(repo).add(ConditionalResources(git_address_provided)) core.Aspects.of(codecommit_pipeline).add( ConditionalResources(git_address_provided)) core.Aspects.of(codebuild_project).add( ConditionalResources(git_address_provided))
def __init__(self, scope: core.Construct, id: str, *, multi_account=False, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Get stack parameters: notification_email = create_notification_email_parameter(self) git_address = create_git_address_parameter(self) # Get the optional S3 assets bucket to use existing_bucket = create_existing_bucket_parameter(self) # Get the optional S3 assets bucket to use existing_ecr_repo = create_existing_ecr_repo_parameter(self) # create only if multi_account template if multi_account: # create development parameters account_type = "development" dev_account_id = create_account_id_parameter( self, "DEV_ACCOUNT_ID", account_type) dev_org_id = create_org_id_parameter(self, "DEV_ORG_ID", account_type) # create staging parameters account_type = "staging" staging_account_id = create_account_id_parameter( self, "STAGING_ACCOUNT_ID", account_type) staging_org_id = create_org_id_parameter(self, "STAGING_ORG_ID", account_type) # create production parameters account_type = "production" prod_account_id = create_account_id_parameter( self, "PROD_ACCOUNT_ID", account_type) prod_org_id = create_org_id_parameter(self, "PROD_ORG_ID", account_type) # Conditions git_address_provided = create_git_address_provided_condition( self, git_address) # client provided an existing S3 bucket name, to be used for assets existing_bucket_provided = create_existing_bucket_provided_condition( self, existing_bucket) # client provided an existing Amazon ECR name existing_ecr_provided = create_existing_ecr_provided_condition( self, existing_ecr_repo) # S3 bucket needs to be created for assets create_new_bucket = create_new_bucket_condition(self, existing_bucket) # Amazon ECR repo needs too be created for custom Algorithms create_new_ecr_repo = create_new_ecr_repo_condition( self, existing_ecr_repo) # Constants pipeline_stack_name = "mlops-pipeline" # CDK Resources setup access_logs_bucket = s3.Bucket( self, "accessLogs", encryption=s3.BucketEncryption.S3_MANAGED, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transfer bucket policy apply_secure_bucket_policy(access_logs_bucket) # This is a logging bucket. access_logs_bucket.node.default_child.cfn_options.metadata = suppress_s3_access_policy( ) # Import user provide S3 bucket, if any. s3.Bucket.from_bucket_arn is used instead of # s3.Bucket.from_bucket_name to allow cross account bucket. client_existing_bucket = s3.Bucket.from_bucket_arn( self, "ClientExistingBucket", f"arn:aws:s3:::{existing_bucket.value_as_string.strip()}", ) # Create the resource if existing_bucket_provided condition is True core.Aspects.of(client_existing_bucket).add( ConditionalResources(existing_bucket_provided)) # Import user provided Amazon ECR repository client_erc_repo = ecr.Repository.from_repository_name( self, "ClientExistingECRReo", existing_ecr_repo.value_as_string) # Create the resource if existing_ecr_provided condition is True core.Aspects.of(client_erc_repo).add( ConditionalResources(existing_ecr_provided)) # Creating assets bucket so that users can upload ML Models to it. assets_bucket = s3.Bucket( self, "pipeline-assets-" + str(uuid.uuid4()), versioned=True, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix="assets_bucket_access_logs", block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transport bucket policy apply_secure_bucket_policy(assets_bucket) s3_actions = ["s3:GetObject", "s3:ListBucket"] # if multi account if multi_account: # add permissions for other accounts to access the assets bucket assets_bucket.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=s3_actions, principals=[ iam.AccountPrincipal(dev_account_id.value_as_string), iam.AccountPrincipal( staging_account_id.value_as_string), iam.AccountPrincipal(prod_account_id.value_as_string), ], resources=[ assets_bucket.bucket_arn, f"{assets_bucket.bucket_arn}/*" ], )) # Create the resource if create_new_bucket condition is True core.Aspects.of(assets_bucket).add( ConditionalResources(create_new_bucket)) # Get assets S3 bucket's name/arn, based on the condition assets_s3_bucket_name = core.Fn.condition_if( existing_bucket_provided.logical_id, client_existing_bucket.bucket_name, assets_bucket.bucket_name, ).to_string() # Creating Amazon ECR repository ecr_repo = ecr.Repository(self, "ECRRepo", image_scan_on_push=True) # if multi account if multi_account: # add permissios to other account to pull images ecr_repo.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ecr:DescribeImages", "ecr:DescribeRepositories", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", ], principals=[ iam.AccountPrincipal(dev_account_id.value_as_string), iam.AccountPrincipal( staging_account_id.value_as_string), iam.AccountPrincipal(prod_account_id.value_as_string), ], )) # Create the resource if create_new_ecr condition is True core.Aspects.of(ecr_repo).add( ConditionalResources(create_new_ecr_repo)) # Get ECR repo's name based on the condition ecr_repo_name = core.Fn.condition_if( existing_ecr_provided.logical_id, client_erc_repo.repository_name, ecr_repo.repository_name, ).to_string() # Get ECR repo's arn based on the condition ecr_repo_arn = core.Fn.condition_if( existing_ecr_provided.logical_id, client_erc_repo.repository_arn, ecr_repo.repository_arn, ).to_string() blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4()) blueprint_repository_bucket = s3.Bucket( self, blueprints_bucket_name, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix=blueprints_bucket_name, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transport bucket policy apply_secure_bucket_policy(blueprint_repository_bucket) # if multi account if multi_account: # add permissions for other accounts to access the blueprint bucket blueprint_repository_bucket.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=s3_actions, principals=[ iam.AccountPrincipal(dev_account_id.value_as_string), iam.AccountPrincipal( staging_account_id.value_as_string), iam.AccountPrincipal(prod_account_id.value_as_string), ], resources=[ blueprint_repository_bucket.bucket_arn, f"{blueprint_repository_bucket.bucket_arn}/*" ], )) # Custom resource to copy source bucket content to blueprints bucket custom_resource_lambda_fn = lambda_.Function( self, "CustomResourceLambda", code=lambda_.Code.from_asset("lambdas/custom_resource"), handler="index.on_event", runtime=lambda_.Runtime.PYTHON_3_8, environment={ "source_bucket": "https://%%BUCKET_NAME%%-" + core.Aws.REGION + ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%", "destination_bucket": blueprint_repository_bucket.bucket_name, "LOG_LEVEL": "INFO", }, timeout=core.Duration.seconds(60), ) custom_resource_lambda_fn.node.default_child.cfn_options.metadata = suppress_lambda_policies( ) blueprint_repository_bucket.grant_write(custom_resource_lambda_fn) custom_resource = core.CustomResource( self, "CustomResourceCopyAssets", service_token=custom_resource_lambda_fn.function_arn, ) custom_resource.node.add_dependency(blueprint_repository_bucket) # IAM policies setup ### cloudformation_role = iam.Role( self, "mlopscloudformationrole", assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"), ) lambda_invoke_action = "lambda:InvokeFunction" # Cloudformation policy setup orchestrator_policy = iam.Policy( self, "lambdaOrchestratorPolicy", statements=[ iam.PolicyStatement( actions=[ "cloudformation:CreateStack", "cloudformation:DeleteStack", "cloudformation:UpdateStack", "cloudformation:ListStackResources", ], resources=[ (f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*" ), ], ), iam.PolicyStatement( actions=[ "iam:CreateRole", "iam:DeleteRole", "iam:DeleteRolePolicy", "iam:GetRole", "iam:GetRolePolicy", "iam:PassRole", "iam:PutRolePolicy", "iam:AttachRolePolicy", "iam:DetachRolePolicy", ], resources=[ f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*" ], ), iam.PolicyStatement( actions=[ "ecr:CreateRepository", "ecr:DescribeRepositories", ], resources=[ (f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:repository/{ecr_repo_name}") ], ), iam.PolicyStatement( actions=[ "codebuild:CreateProject", "codebuild:DeleteProject", "codebuild:BatchGetProjects", ], resources=[ (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:project/ContainerFactory*"), (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*"), (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:report-group/*"), ], ), iam.PolicyStatement( actions=[ "lambda:CreateFunction", "lambda:DeleteFunction", lambda_invoke_action, "lambda:PublishLayerVersion", "lambda:DeleteLayerVersion", "lambda:GetLayerVersion", "lambda:GetFunctionConfiguration", "lambda:GetFunction", "lambda:AddPermission", "lambda:RemovePermission", "lambda:UpdateFunctionConfiguration", ], resources=[ f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*", f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*", ], ), iam.PolicyStatement( actions=s3_actions, resources=[ blueprint_repository_bucket.bucket_arn, blueprint_repository_bucket.arn_for_objects("*"), f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*", ], ), iam.PolicyStatement( actions=[ "codepipeline:CreatePipeline", "codepipeline:UpdatePipeline", "codepipeline:DeletePipeline", "codepipeline:GetPipeline", "codepipeline:GetPipelineState", ], resources= [(f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*")], ), iam.PolicyStatement( actions=[ "apigateway:POST", "apigateway:PATCH", "apigateway:DELETE", "apigateway:GET", "apigateway:PUT", ], resources=[ f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*", ], ), iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:DescribeLogGroups", ], resources=[ f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*", ], ), iam.PolicyStatement( actions=[ "s3:CreateBucket", "s3:PutEncryptionConfiguration", "s3:PutBucketVersioning", "s3:PutBucketPublicAccessBlock", "s3:PutBucketLogging", ], resources=[f"arn:{core.Aws.PARTITION}:s3:::*"], ), iam.PolicyStatement( actions=[ "s3:PutObject", ], resources=[ f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*" ], ), iam.PolicyStatement( actions=[ "sns:CreateTopic", "sns:DeleteTopic", "sns:Subscribe", "sns:Unsubscribe", "sns:GetTopicAttributes", "sns:SetTopicAttributes", ], resources= [(f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:" f"{pipeline_stack_name}*-*PipelineNotification*")], ), iam.PolicyStatement( actions=[ "events:PutRule", "events:DescribeRule", "events:PutTargets", "events:RemoveTargets", "events:DeleteRule", "events:PutEvents", ], resources=[ f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*", f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*", ], ), ], ) orchestrator_policy.attach_to_role(cloudformation_role) # Lambda function IAM setup lambda_passrole_policy = iam.PolicyStatement( actions=["iam:passrole"], resources=[cloudformation_role.role_arn]) # create sagemaker layer sm_layer = sagemaker_layer(self, blueprint_repository_bucket) # make sure the sagemaker code is uploaded first to the blueprints bucket sm_layer.node.add_dependency(custom_resource) # API Gateway and lambda setup to enable provisioning pipelines through API calls provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda( self, "PipelineOrchestration", lambda_function_props={ "runtime": lambda_.Runtime.PYTHON_3_8, "handler": "index.handler", "code": lambda_.Code.from_asset("lambdas/pipeline_orchestration"), "layers": [sm_layer], "timeout": core.Duration.minutes(10), }, api_gateway_props={ "defaultMethodOptions": { "authorizationType": apigw.AuthorizationType.IAM, }, "restApiName": f"{core.Aws.STACK_NAME}-orchestrator", "proxy": False, "dataTraceEnabled": True, }, ) # add lambda supressions provisioner_apigw_lambda.lambda_function.node.default_child.cfn_options.metadata = suppress_lambda_policies( ) provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource( "provisionpipeline") provision_resource.add_method("POST") status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource( "pipelinestatus") status_resource.add_method("POST") blueprint_repository_bucket.grant_read( provisioner_apigw_lambda.lambda_function) provisioner_apigw_lambda.lambda_function.add_to_role_policy( lambda_passrole_policy) orchestrator_policy.attach_to_role( provisioner_apigw_lambda.lambda_function.role) # Environment variables setup provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET_URL", value=str(blueprint_repository_bucket.bucket_regional_domain_name), ) provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET", value=str(blueprint_repository_bucket.bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="ASSETS_BUCKET", value=str(assets_s3_bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn)) provisioner_apigw_lambda.lambda_function.add_environment( key="PIPELINE_STACK_NAME", value=pipeline_stack_name) provisioner_apigw_lambda.lambda_function.add_environment( key="NOTIFICATION_EMAIL", value=notification_email.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="REGION", value=core.Aws.REGION) provisioner_apigw_lambda.lambda_function.add_environment( key="IS_MULTI_ACCOUNT", value=str(multi_account)) # if multi account if multi_account: provisioner_apigw_lambda.lambda_function.add_environment( key="DEV_ACCOUNT_ID", value=dev_account_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="DEV_ORG_ID", value=dev_org_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="STAGING_ACCOUNT_ID", value=staging_account_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="STAGING_ORG_ID", value=staging_org_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="PROD_ACCOUNT_ID", value=prod_account_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="PROD_ORG_ID", value=prod_org_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="ECR_REPO_NAME", value=ecr_repo_name) provisioner_apigw_lambda.lambda_function.add_environment( key="ECR_REPO_ARN", value=ecr_repo_arn) provisioner_apigw_lambda.lambda_function.add_environment( key="LOG_LEVEL", value="DEBUG") cfn_policy_for_lambda = orchestrator_policy.node.default_child cfn_policy_for_lambda.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W76", "reason": "A complex IAM policy is required for this resource.", }] } } # Codepipeline with Git source definitions ### source_output = codepipeline.Artifact() # processing git_address to retrieve repo name repo_name_split = core.Fn.split("/", git_address.value_as_string) repo_name = core.Fn.select(5, repo_name_split) # getting codecommit repo cdk object using 'from_repository_name' repo = codecommit.Repository.from_repository_name( self, "AWSMLOpsFrameworkRepository", repo_name) codebuild_project = codebuild.PipelineProject( self, "Take config file", build_spec=codebuild.BuildSpec.from_object({ "version": "0.2", "phases": { "build": { "commands": [ "ls -a", "aws lambda invoke --function-name " + provisioner_apigw_lambda.lambda_function. function_name + " --payload fileb://mlops-config.json response.json" + " --invocation-type RequestResponse", ] } }, }), ) # Defining a Codepipeline project with CodeCommit as source codecommit_pipeline = codepipeline.Pipeline( self, "MLOpsCodeCommitPipeline", stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit", repository=repo, branch="main", output=source_output, ) ], ), codepipeline.StageProps( stage_name="TakeConfig", actions=[ codepipeline_actions.CodeBuildAction( action_name="provision_pipeline", input=source_output, outputs=[], project=codebuild_project, ) ], ), ], cross_account_keys=False, ) codecommit_pipeline.add_to_role_policy( iam.PolicyStatement( actions=[lambda_invoke_action], resources=[ provisioner_apigw_lambda.lambda_function.function_arn ], )) codebuild_project.add_to_role_policy( iam.PolicyStatement( actions=[lambda_invoke_action], resources=[ provisioner_apigw_lambda.lambda_function.function_arn ], )) pipeline_child_nodes = codecommit_pipeline.node.find_all() pipeline_child_nodes[1].node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W35", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, { "id": "W51", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, ] } } # custom resource for operational metrics### metrics_mapping = core.CfnMapping( self, "AnonymousData", mapping={"SendAnonymousData": { "Data": "Yes" }}) metrics_condition = core.CfnCondition( self, "AnonymousDatatoAWS", expression=core.Fn.condition_equals( metrics_mapping.find_in_map("SendAnonymousData", "Data"), "Yes"), ) helper_function = lambda_.Function( self, "SolutionHelper", code=lambda_.Code.from_asset("lambdas/solution_helper"), handler="lambda_function.handler", runtime=lambda_.Runtime.PYTHON_3_8, timeout=core.Duration.seconds(60), ) helper_function.node.default_child.cfn_options.metadata = suppress_lambda_policies( ) create_id_function = core.CustomResource( self, "CreateUniqueID", service_token=helper_function.function_arn, properties={"Resource": "UUID"}, resource_type="Custom::CreateUUID", ) send_data_function = core.CustomResource( self, "SendAnonymousData", service_token=helper_function.function_arn, properties={ "Resource": "AnonymousMetric", "UUID": create_id_function.get_att_string("UUID"), "gitSelected": git_address.value_as_string, "Region": core.Aws.REGION, "SolutionId": "SO0136", "Version": "%%VERSION%%", }, resource_type="Custom::AnonymousData", ) core.Aspects.of(helper_function).add( ConditionalResources(metrics_condition)) core.Aspects.of(create_id_function).add( ConditionalResources(metrics_condition)) core.Aspects.of(send_data_function).add( ConditionalResources(metrics_condition)) # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source core.Aspects.of(repo).add(ConditionalResources(git_address_provided)) core.Aspects.of(codecommit_pipeline).add( ConditionalResources(git_address_provided)) core.Aspects.of(codebuild_project).add( ConditionalResources(git_address_provided)) # Create Template Interface paramaters_list = [ notification_email.logical_id, git_address.logical_id, existing_bucket.logical_id, existing_ecr_repo.logical_id, ] # if multi account if multi_account: paramaters_list.extend([ dev_account_id.logical_id, dev_org_id.logical_id, staging_account_id.logical_id, staging_org_id.logical_id, prod_account_id.logical_id, prod_org_id.logical_id, ]) paramaters_labels = { f"{notification_email.logical_id}": { "default": "Notification Email (Required)" }, f"{git_address.logical_id}": { "default": "CodeCommit Repo URL Address (Optional)" }, f"{existing_bucket.logical_id}": { "default": "Name of an Existing S3 Bucket (Optional)" }, f"{existing_ecr_repo.logical_id}": { "default": "Name of an Existing Amazon ECR repository (Optional)" }, } if multi_account: paramaters_labels.update({ f"{dev_account_id.logical_id}": { "default": "Development Account ID (Required)" }, f"{dev_org_id.logical_id}": { "default": "Development Account Organizational Unit ID (Required)" }, f"{staging_account_id.logical_id}": { "default": "Staging Account ID (Required)" }, f"{staging_org_id.logical_id}": { "default": "Staging Account Organizational Unit ID (Required)" }, f"{prod_account_id.logical_id}": { "default": "Production Account ID (Required)" }, f"{prod_org_id.logical_id}": { "default": "Production Account Organizational Unit ID (Required)" }, }) self.template_options.metadata = { "AWS::CloudFormation::Interface": { "ParameterGroups": [{ "Label": { "default": "MLOps Framework Settings" }, "Parameters": paramaters_list, }], "ParameterLabels": paramaters_labels, } } # Outputs # core.CfnOutput( self, id="BlueprintsBucket", value= f"https://s3.console.aws.amazon.com/s3/buckets/{blueprint_repository_bucket.bucket_name}", description="S3 Bucket to upload MLOps Framework Blueprints", ) core.CfnOutput( self, id="AssetsBucket", value= f"https://s3.console.aws.amazon.com/s3/buckets/{assets_s3_bucket_name}", description="S3 Bucket to upload model artifact", ) core.CfnOutput( self, id="ECRRepoName", value=ecr_repo_name, description="Amazon ECR repository's name", ) core.CfnOutput( self, id="ECRRepoArn", value=ecr_repo_arn, description="Amazon ECR repository's arn", )
def __init__(self, scope: core.Construct, id: str, config, *, lambda_code: lambda_.CfnParametersCode = None, **kwargs) -> None: super().__init__(scope, id, **kwargs) repo_name = self.node.try_get_context( "name") or config["Default"]["name"] repo_count = self.node.try_get_context( "count") or config["Default"]["count"] for i in range(1, int(repo_count) + 1): code = codecommit.Repository.from_repository_name( self, "ImportedRepo" + str(i), repo_name + '-' + str(i)) cdk_build = codebuild.PipelineProject( self, "CdkBuild" + str(i), project_name="cdk-codebuild-proj-for-" + repo_name + "-" + str(i), build_spec=codebuild.BuildSpec.from_object( dict( version="0.2", phases=dict(install=dict(commands=[ "npm install -g aws-cdk", "python -m ensurepip --upgrade", "python -m pip install --upgrade pip", "python -m pip install --upgrade virtualenv", "virtualenv .env", ". .env/bin/activate", "pip install -r requirements.txt", "pip install aws_cdk.aws_codedeploy aws_cdk.aws_lambda aws_cdk.aws_codebuild aws_cdk.aws_codepipeline", "pip install aws_cdk.aws_apigateway aws_cdk.aws_codecommit aws_cdk.aws_codepipeline_actions aws_cdk.aws_s3" ]), build=dict(commands=[ "cdk synth CdkServerlessStack" ])), artifacts={ "base-directory": "cdk.out", "files": ["LambdaStack.template.json"] }, environment=dict(buildImage=codebuild.LinuxBuildImage. STANDARD_2_0)))) lambda_build = codebuild.PipelineProject( self, 'LambdaBuild' + str(i), project_name="lambda-codebuild-proj-for-" + repo_name + "-" + str(i), build_spec=codebuild.BuildSpec.from_object( dict(version="0.2", phases=dict(install=dict( commands=["cd lambda", "npm install"]), build=dict(commands="npm run build")), artifacts={ "base-directory": "lambda", "files": ["index.js", "node_modules/**/*"] }, environment=dict(buildImage=codebuild.LinuxBuildImage. STANDARD_2_0)))) source_output = codepipeline.Artifact() cdk_build_output = codepipeline.Artifact("CdkBuildOutput") lambda_build_output = codepipeline.Artifact("LambdaBuildOutput") lambda_location = lambda_build_output.s3_location codepipeline.Pipeline( self, "Pipeline" + str(i), pipeline_name="pipeline-for-" + repo_name + "-" + str(i), stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit_Source", repository=code, output=source_output) ]), codepipeline.StageProps( stage_name="Build", actions=[ codepipeline_actions.CodeBuildAction( action_name="Lambda_Build", project=lambda_build, input=source_output, outputs=[lambda_build_output]), codepipeline_actions.CodeBuildAction( action_name="CDK_Build", project=cdk_build, input=source_output, outputs=[cdk_build_output]) ]), codepipeline.StageProps( stage_name="Deploy", actions=[ codepipeline_actions. CloudFormationCreateUpdateStackAction( action_name="Lambda_CFN_Deploy", template_path=cdk_build_output.at_path( "LambdaStack.template.json"), stack_name="lambda-deployment-stack-" + repo_name + "-" + str(i), admin_permissions=True, parameter_overrides=dict( lambda_code.assign( bucket_name=lambda_location. bucket_name, object_key=lambda_location.object_key, object_version=lambda_location. object_version)), extra_inputs=[lambda_build_output]) ]) ])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Get stack parameters: email and repo address notification_email = core.CfnParameter( self, "Email Address", type="String", description="Specify an email to receive notifications about pipeline outcomes.", allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", min_length=5, max_length=320, constraint_description="Please enter an email address with correct format ([email protected])", ) git_address = core.CfnParameter( self, "CodeCommit Repo Address", type="String", description="AWS CodeCommit repository clone URL to connect to the framework.", allowed_pattern=( "^(((https:\/\/|ssh:\/\/)(git\-codecommit)\.[a-zA-Z0-9_.+-]+(amazonaws\.com\/)[a-zA-Z0-9-.]" "+(\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+$)|^$)" ), min_length=0, max_length=320, constraint_description=( "CodeCommit address must follow the pattern: ssh or " "https://git-codecommit.REGION.amazonaws.com/version/repos/REPONAME" ), ) # Get the optional S3 assets bucket to use existing_bucket = core.CfnParameter( self, "ExistingS3Bucket", type="String", description="Name of existing S3 bucket to be used for ML assests. S3 Bucket must be in the same region as the deployed stack, and has versioning enabled. If not provided, a new S3 bucket will be created.", allowed_pattern="((?=^.{3,63}$)(?!^(\d+\.)+\d+$)(^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])$)|^$)", min_length=0, max_length=63, ) # Conditions git_address_provided = core.CfnCondition( self, "GitAddressProvided", expression=core.Fn.condition_not(core.Fn.condition_equals(git_address, "")), ) # client provided an existing S3 bucket name, to be used for assets existing_bucket_provided = core.CfnCondition( self, "S3BucketProvided", expression=core.Fn.condition_not(core.Fn.condition_equals(existing_bucket.value_as_string.strip(), "")), ) # S3 bucket needs to be created for assets create_new_bucket = core.CfnCondition( self, "CreateS3Bucket", expression=core.Fn.condition_equals(existing_bucket.value_as_string.strip(), ""), ) # Constants pipeline_stack_name = "MLOps-pipeline" # CDK Resources setup access_logs_bucket = s3.Bucket( self, "accessLogs", encryption=s3.BucketEncryption.S3_MANAGED, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transfer bucket policy apply_secure_bucket_policy(access_logs_bucket) # This is a logging bucket. access_logs_bucket.node.default_child.cfn_options.metadata = suppress_s3_access_policy() # Import user provide S3 bucket, if any. s3.Bucket.from_bucket_arn is used instead of s3.Bucket.from_bucket_name to allow cross account bucket. client_existing_bucket = s3.Bucket.from_bucket_arn( self, "ClientExistingBucket", f"arn:aws:s3:::{existing_bucket.value_as_string.strip()}", ) # Create the resource if existing_bucket_provided condition is True core.Aspects.of(client_existing_bucket).add(ConditionalResources(existing_bucket_provided)) # Creating assets bucket so that users can upload ML Models to it. assets_bucket = s3.Bucket( self, "pipeline-assets-" + str(uuid.uuid4()), versioned=True, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix="assets_bucket_access_logs", block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transport bucket policy apply_secure_bucket_policy(assets_bucket) # Create the resource if create_new_bucket condition is True core.Aspects.of(assets_bucket).add(ConditionalResources(create_new_bucket)) # Get assets S3 bucket's name/arn, based on the condition assets_s3_bucket_name = core.Fn.condition_if( existing_bucket_provided.logical_id, client_existing_bucket.bucket_name, assets_bucket.bucket_name, ).to_string() blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4()) blueprint_repository_bucket = s3.Bucket( self, blueprints_bucket_name, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix=blueprints_bucket_name, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transport bucket policy apply_secure_bucket_policy(blueprint_repository_bucket) # Custom resource to copy source bucket content to blueprints bucket custom_resource_lambda_fn = lambda_.Function( self, "CustomResourceLambda", code=lambda_.Code.from_asset("lambdas/custom_resource"), handler="index.on_event", runtime=lambda_.Runtime.PYTHON_3_8, environment={ "source_bucket": "https://%%BUCKET_NAME%%-" + core.Aws.REGION + ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%", "destination_bucket": blueprint_repository_bucket.bucket_name, "LOG_LEVEL": "INFO", }, timeout=core.Duration.seconds(60), ) custom_resource_lambda_fn.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W58", "reason": "The lambda functions role already has permissions to write cloudwatch logs", } ] } } blueprint_repository_bucket.grant_write(custom_resource_lambda_fn) custom_resource = core.CustomResource( self, "CustomResourceCopyAssets", service_token=custom_resource_lambda_fn.function_arn, ) custom_resource.node.add_dependency(blueprint_repository_bucket) # IAM policies setup ### cloudformation_role = iam.Role( self, "mlopscloudformationrole", assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"), ) # Cloudformation policy setup orchestrator_policy = iam.Policy( self, "lambdaOrchestratorPolicy", statements=[ iam.PolicyStatement( actions=[ "cloudformation:CreateStack", "cloudformation:DeleteStack", "cloudformation:UpdateStack", "cloudformation:ListStackResources", ], resources=[ ( f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*" ), ], ), iam.PolicyStatement( actions=[ "iam:CreateRole", "iam:DeleteRole", "iam:DeleteRolePolicy", "iam:GetRole", "iam:GetRolePolicy", "iam:PassRole", "iam:PutRolePolicy", "iam:AttachRolePolicy", "iam:DetachRolePolicy", ], resources=[f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*"], ), iam.PolicyStatement( actions=[ "ecr:CreateRepository", "ecr:DeleteRepository", "ecr:DescribeRepositories", ], resources=[ ( f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:repository/awsmlopsmodels*" ) ], ), iam.PolicyStatement( actions=[ "codebuild:CreateProject", "codebuild:DeleteProject", "codebuild:BatchGetProjects", ], resources=[ ( f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:project/ContainerFactory*" ), ( f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*" ), ( f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:report-group/*" ), ], ), iam.PolicyStatement( actions=[ "lambda:CreateFunction", "lambda:DeleteFunction", "lambda:InvokeFunction", "lambda:PublishLayerVersion", "lambda:DeleteLayerVersion", "lambda:GetLayerVersion", "lambda:GetFunctionConfiguration", "lambda:GetFunction", "lambda:AddPermission", "lambda:RemovePermission", "lambda:UpdateFunctionConfiguration", ], resources=[ f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*", f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*", ], ), iam.PolicyStatement( actions=["s3:GetObject"], resources=[ blueprint_repository_bucket.bucket_arn, blueprint_repository_bucket.arn_for_objects("*"), f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*", ], ), iam.PolicyStatement( actions=[ "codepipeline:CreatePipeline", "codepipeline:DeletePipeline", "codepipeline:GetPipeline", "codepipeline:GetPipelineState", ], resources=[ ( f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*" ) ], ), iam.PolicyStatement( actions=[ "apigateway:POST", "apigateway:PATCH", "apigateway:DELETE", "apigateway:GET", "apigateway:PUT", ], resources=[ f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*", ], ), iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:DescribeLogGroups", ], resources=[ f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*", ], ), iam.PolicyStatement( actions=[ "s3:CreateBucket", "s3:PutEncryptionConfiguration", "s3:PutBucketVersioning", "s3:PutBucketPublicAccessBlock", "s3:PutBucketLogging", ], resources=["arn:" + core.Aws.PARTITION + ":s3:::*"], ), iam.PolicyStatement( actions=[ "sns:CreateTopic", "sns:DeleteTopic", "sns:Subscribe", "sns:Unsubscribe", "sns:GetTopicAttributes", "sns:SetTopicAttributes", ], resources=[ ( f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:" f"{pipeline_stack_name}*-PipelineNotification*" ), ( f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:" f"{pipeline_stack_name}*-ModelMonitorPipelineNotification*" ), ], ), iam.PolicyStatement( actions=[ "events:PutRule", "events:DescribeRule", "events:PutTargets", "events:RemoveTargets", "events:DeleteRule", "events:PutEvents", ], resources=[ f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*", f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*", ], ), ], ) orchestrator_policy.attach_to_role(cloudformation_role) # Lambda function IAM setup lambda_passrole_policy = iam.PolicyStatement(actions=["iam:passrole"], resources=[cloudformation_role.role_arn]) # API Gateway and lambda setup to enable provisioning pipelines through API calls provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda( self, "PipelineOrchestration", lambda_function_props={ "runtime": lambda_.Runtime.PYTHON_3_8, "handler": "index.handler", "code": lambda_.Code.from_asset("lambdas/pipeline_orchestration"), }, api_gateway_props={ "defaultMethodOptions": { "authorizationType": apigw.AuthorizationType.IAM, }, "restApiName": f"{core.Aws.STACK_NAME}-orchestrator", "proxy": False, "dataTraceEnabled": True, }, ) provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource("provisionpipeline") provision_resource.add_method("POST") status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource("pipelinestatus") status_resource.add_method("POST") blueprint_repository_bucket.grant_read(provisioner_apigw_lambda.lambda_function) provisioner_apigw_lambda.lambda_function.add_to_role_policy(lambda_passrole_policy) orchestrator_policy.attach_to_role(provisioner_apigw_lambda.lambda_function.role) provisioner_apigw_lambda.lambda_function.add_to_role_policy( iam.PolicyStatement(actions=["xray:PutTraceSegments"], resources=["*"]) ) lambda_node = provisioner_apigw_lambda.lambda_function.node.default_child lambda_node.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W12", "reason": "The xray permissions PutTraceSegments is not able to be bound to resources.", } ] } } # Environment variables setup provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET_URL", value=str(blueprint_repository_bucket.bucket_regional_domain_name), ) provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET", value=str(blueprint_repository_bucket.bucket_name) ) provisioner_apigw_lambda.lambda_function.add_environment( key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name) ) provisioner_apigw_lambda.lambda_function.add_environment(key="ASSETS_BUCKET", value=str(assets_s3_bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn) ) provisioner_apigw_lambda.lambda_function.add_environment(key="PIPELINE_STACK_NAME", value=pipeline_stack_name) provisioner_apigw_lambda.lambda_function.add_environment( key="NOTIFICATION_EMAIL", value=notification_email.value_as_string ) provisioner_apigw_lambda.lambda_function.add_environment(key="LOG_LEVEL", value="DEBUG") cfn_policy_for_lambda = orchestrator_policy.node.default_child cfn_policy_for_lambda.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W76", "reason": "A complex IAM policy is required for this resource.", } ] } } # Codepipeline with Git source definitions ### source_output = codepipeline.Artifact() # processing git_address to retrieve repo name repo_name_split = core.Fn.split("/", git_address.value_as_string) repo_name = core.Fn.select(5, repo_name_split) # getting codecommit repo cdk object using 'from_repository_name' repo = codecommit.Repository.from_repository_name(self, "AWSMLOpsFrameworkRepository", repo_name) codebuild_project = codebuild.PipelineProject( self, "Take config file", build_spec=codebuild.BuildSpec.from_object( { "version": "0.2", "phases": { "build": { "commands": [ "ls -a", "aws lambda invoke --function-name " + provisioner_apigw_lambda.lambda_function.function_name + " --payload fileb://mlops-config.json response.json" + " --invocation-type RequestResponse", ] } }, } ), ) # Defining a Codepipeline project with CodeCommit as source codecommit_pipeline = codepipeline.Pipeline( self, "MLOpsCodeCommitPipeline", stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit", repository=repo, branch="main", output=source_output, ) ], ), codepipeline.StageProps( stage_name="TakeConfig", actions=[ codepipeline_actions.CodeBuildAction( action_name="provision_pipeline", input=source_output, outputs=[], project=codebuild_project, ) ], ), ], cross_account_keys=False, ) codecommit_pipeline.add_to_role_policy( iam.PolicyStatement( actions=["lambda:InvokeFunction"], resources=[provisioner_apigw_lambda.lambda_function.function_arn], ) ) codebuild_project.add_to_role_policy( iam.PolicyStatement( actions=["lambda:InvokeFunction"], resources=[provisioner_apigw_lambda.lambda_function.function_arn], ) ) pipeline_child_nodes = codecommit_pipeline.node.find_all() pipeline_child_nodes[1].node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W35", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, { "id": "W51", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, ] } } # custom resource for operational metrics### metricsMapping = core.CfnMapping(self, "AnonymousData", mapping={"SendAnonymousData": {"Data": "Yes"}}) metrics_condition = core.CfnCondition( self, "AnonymousDatatoAWS", expression=core.Fn.condition_equals(metricsMapping.find_in_map("SendAnonymousData", "Data"), "Yes"), ) helper_function = lambda_.Function( self, "SolutionHelper", code=lambda_.Code.from_asset("lambdas/solution_helper"), handler="lambda_function.handler", runtime=lambda_.Runtime.PYTHON_3_8, timeout=core.Duration.seconds(60), ) createIdFunction = core.CustomResource( self, "CreateUniqueID", service_token=helper_function.function_arn, properties={"Resource": "UUID"}, resource_type="Custom::CreateUUID", ) sendDataFunction = core.CustomResource( self, "SendAnonymousData", service_token=helper_function.function_arn, properties={ "Resource": "AnonymousMetric", "UUID": createIdFunction.get_att_string("UUID"), "gitSelected": git_address.value_as_string, "Region": core.Aws.REGION, "SolutionId": "SO0136", "Version": "%%VERSION%%", }, resource_type="Custom::AnonymousData", ) core.Aspects.of(helper_function).add(ConditionalResources(metrics_condition)) core.Aspects.of(createIdFunction).add(ConditionalResources(metrics_condition)) core.Aspects.of(sendDataFunction).add(ConditionalResources(metrics_condition)) helper_function.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W58", "reason": "The lambda functions role already has permissions to write cloudwatch logs", } ] } } # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source core.Aspects.of(repo).add(ConditionalResources(git_address_provided)) core.Aspects.of(codecommit_pipeline).add(ConditionalResources(git_address_provided)) core.Aspects.of(codebuild_project).add(ConditionalResources(git_address_provided)) # Create Template Interface self.template_options.metadata = { "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": {"default": "MLOps Framework Settings"}, "Parameters": [ notification_email.logical_id, git_address.logical_id, existing_bucket.logical_id, ], } ], "ParameterLabels": { f"{notification_email.logical_id}": {"default": "Notification Email (Required)"}, f"{git_address.logical_id}": {"default": "CodeCommit Repo URL Address (Optional)"}, f"{existing_bucket.logical_id}": {"default": "Name of an Existing S3 Bucket (Optional)"}, }, } } # Outputs # core.CfnOutput( self, id="BlueprintsBucket", value=f"https://s3.console.aws.amazon.com/s3/buckets/{blueprint_repository_bucket.bucket_name}", description="S3 Bucket to upload MLOps Framework Blueprints", ) core.CfnOutput( self, id="AssetsBucket", value=f"https://s3.console.aws.amazon.com/s3/buckets/{assets_s3_bucket_name}", description="S3 Bucket to upload model artifact", )
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here #Creating an empty pipeline # pipeline = codepipeline.Pipeline(self, "MyCDKPipeline", # pipeline_name="MyCDKPipeline" # ) #Creating a source stage with GitHub as source source_output = codepipeline.Artifact() source_action = codepipelineactions.GitHubSourceAction( action_name="GitHub_Source", owner="aman1505", repo="cdk-practice", oauth_token=cdk.SecretValue.secrets_manager( "my-github-token", json_field='my-github-token'), output=source_output, branch="main") #Adding Source stage created in previous # pipeline.add_stage( # stage_name="Source", # actions=[source_action] # ) environemt = "dev" action_list = [] with open(f"config/dev.yaml", 'r') as stream: try: config = yaml.safe_load(stream) module_list = config["modules"] except yaml.YAMLError as exc: print(exc) # Iterating over the module list and creating code build project for module in module_list: module_name = module["stack_name"] project_name = module_name + "-" + environemt project = BuildProjects.getProjectDefination( self, module_name, environemt, project_name) project.role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AdministratorAccess')) action = codepipelineactions.CodeBuildAction( action_name="CodeBuild" + module_name, project=project, input= source_output, # The build action must use the CodeCommitSourceAction output as input. outputs=[codepipeline.Artifact()], run_order=module["run_order"]) action_list.append(action) #Creating Build stage and adding build_action_s3 in it # pipeline.add_stage( # stage_name="Build", # actions=action_list # ) # codepipeline.Pipeline(self, "MyCDKPipeline", # pipeline_name="MyCDKPipeline", # stages=[{ # "stage_name": "Source", # "actions": [source_action] # }, # { # "stage_name": "Build", # "actions": action_list # } # ] # ) codepipeline.Pipeline(self, "Pipeline", stages=[ codepipeline.StageProps( stage_name="Source", actions=[source_action]), codepipeline.StageProps(stage_name="Build", actions=action_list) ], pipeline_name="MyCDKPipeline")
def __init__(self, scope: core.Construct, id: str, eks, redis, rds_cluster, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.eks = eks self.redis = redis self.rds_cluster = rds_cluster # create ECR ecr_repo = ecr.Repository(self, "ECRRep", repository_name="springboot-multiarch") # create code repo code = codecommit.Repository(self, "CodeRep", repository_name="springboot-multiarch") core.CfnOutput(self, "CodeCommitOutput", value=code.repository_clone_url_http) # create code builds arm_build = codebuild.PipelineProject( self, "ARMBuild", build_spec=codebuild.BuildSpec.from_source_filename( "cdk/pipeline/armbuild.yml"), environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_ARM, privileged=True), environment_variables=self.get_build_env_vars(ecr_repo)) self.add_role_access_to_build(arm_build) amd_build = codebuild.PipelineProject( self, "AMDBuild", build_spec=codebuild.BuildSpec.from_source_filename( "cdk/pipeline/amdbuild.yml"), environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3, privileged=True), environment_variables=self.get_build_env_vars(ecr_repo)) self.add_role_access_to_build(amd_build) post_build = codebuild.PipelineProject( self, "PostBuild", build_spec=codebuild.BuildSpec.from_source_filename( "cdk/pipeline/post_build.yml"), environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3, privileged=True), environment_variables=self.get_build_env_vars(ecr_repo)) self.add_role_access_to_build(post_build) # create pipeline source_output = codepipeline.Artifact() arm_build_output = codepipeline.Artifact("ARMBuildOutput") amd_build_output = codepipeline.Artifact("AMDBuildOutput") post_build_output = codepipeline.Artifact("PostBuildOutput") codepipeline.Pipeline( self, "Pipeline", stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit_Source", repository=code, output=source_output) ]), codepipeline.StageProps( stage_name="Build", actions=[ codepipeline_actions.CodeBuildAction( action_name="ARM_Build", project=arm_build, input=source_output, outputs=[arm_build_output]), codepipeline_actions.CodeBuildAction( action_name="AMD_Build", project=amd_build, input=source_output, outputs=[amd_build_output]), ]), codepipeline.StageProps( stage_name="PostBuild", actions=[ codepipeline_actions.CodeBuildAction( action_name="Post_Build", project=post_build, input=source_output, outputs=[post_build_output]) ]), ])
def __init__(self, scope: core.Construct, id: str, vpc: aws_ec2.Vpc, ecs_cluster=aws_ecs.Cluster, alb=elbv2.ApplicationLoadBalancer, albTestListener=elbv2.ApplicationListener, albProdListener=elbv2.ApplicationListener, blueGroup=elbv2.ApplicationTargetGroup, greenGroup=elbv2.ApplicationTargetGroup, **kwargs) -> None: super().__init__(scope, id, **kwargs) ECS_APP_NAME = "Nginx-app", ECS_DEPLOYMENT_GROUP_NAME = "NginxAppECSBlueGreen" ECS_DEPLOYMENT_CONFIG_NAME = "CodeDeployDefault.ECSLinear10PercentEvery1Minutes" ECS_TASKSET_TERMINATION_WAIT_TIME = 10 ECS_TASK_FAMILY_NAME = "Nginx-microservice" ECS_APP_NAME = "Nginx-microservice" ECS_APP_LOG_GROUP_NAME = "/ecs/Nginx-microservice" DUMMY_TASK_FAMILY_NAME = "sample-Nginx-microservice" DUMMY_APP_NAME = "sample-Nginx-microservice" DUMMY_APP_LOG_GROUP_NAME = "/ecs/sample-Nginx-microservice" DUMMY_CONTAINER_IMAGE = "smuralee/nginx" # ============================================================================= # ECR and CodeCommit repositories for the Blue/ Green deployment # ============================================================================= # ECR repository for the docker images NginxecrRepo = aws_ecr.Repository(self, "NginxRepo", image_scan_on_push=True) NginxCodeCommitrepo = aws_codecommit.Repository( self, "NginxRepository", repository_name=ECS_APP_NAME, description="Oussama application hosted on NGINX") # ============================================================================= # CODE BUILD and ECS TASK ROLES for the Blue/ Green deployment # ============================================================================= # IAM role for the Code Build project codeBuildServiceRole = aws_iam.Role( self, "codeBuildServiceRole", assumed_by=aws_iam.ServicePrincipal('codebuild.amazonaws.com')) inlinePolicyForCodeBuild = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability", "ecr:InitiateLayerUpload", "ecr:UploadLayerPart", "ecr:CompleteLayerUpload", "ecr:PutImage" ], resources=["*"]) codeBuildServiceRole.add_to_policy(inlinePolicyForCodeBuild) # ECS task role ecsTaskRole = aws_iam.Role( self, "ecsTaskRoleForWorkshop", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com')) ecsTaskRole.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonECSTaskExecutionRolePolicy")) # ============================================================================= # CODE DEPLOY APPLICATION for the Blue/ Green deployment # ============================================================================= # Creating the code deploy application codeDeployApplication = codedeploy.EcsApplication( self, "NginxAppCodeDeploy") # Creating the code deploy service role codeDeployServiceRole = aws_iam.Role( self, "codeDeployServiceRole", assumed_by=aws_iam.ServicePrincipal('codedeploy.amazonaws.com')) codeDeployServiceRole.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AWSCodeDeployRoleForECS")) # IAM role for custom lambda function customLambdaServiceRole = aws_iam.Role( self, "codeDeployCustomLambda", assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com')) inlinePolicyForLambda = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "iam:PassRole", "sts:AssumeRole", "codedeploy:List*", "codedeploy:Get*", "codedeploy:UpdateDeploymentGroup", "codedeploy:CreateDeploymentGroup", "codedeploy:DeleteDeploymentGroup" ], resources=["*"]) customLambdaServiceRole.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole')) customLambdaServiceRole.add_to_policy(inlinePolicyForLambda) # Custom resource to create the deployment group createDeploymentGroupLambda = aws_lambda.Function( self, 'createDeploymentGroupLambda', code=aws_lambda.Code.from_asset("custom_resources"), runtime=aws_lambda.Runtime.PYTHON_3_8, handler='create_deployment_group.handler', role=customLambdaServiceRole, description="Custom resource to create deployment group", memory_size=128, timeout=core.Duration.seconds(60)) # ================================================================================================ # CloudWatch Alarms for 4XX errors blue4xxMetric = aws_cloudwatch.Metric( namespace='AWS/ApplicationELB', metric_name='HTTPCode_Target_4XX_Count', dimensions={ "TargetGroup": blueGroup.target_group_full_name, "LoadBalancer": alb.load_balancer_full_name }, statistic="sum", period=core.Duration.minutes(1)) blueGroupAlarm = aws_cloudwatch.Alarm( self, "blue4xxErrors", alarm_name="Blue_4xx_Alarm", alarm_description= "CloudWatch Alarm for the 4xx errors of Blue target group", metric=blue4xxMetric, threshold=1, evaluation_periods=1) green4xxMetric = aws_cloudwatch.Metric( namespace='AWS/ApplicationELB', metric_name='HTTPCode_Target_4XX_Count', dimensions={ "TargetGroup": greenGroup.target_group_full_name, "LoadBalancer": alb.load_balancer_full_name }, statistic="sum", period=core.Duration.minutes(1)) greenGroupAlarm = aws_cloudwatch.Alarm( self, "green4xxErrors", alarm_name="Green_4xx_Alarm", alarm_description= "CloudWatch Alarm for the 4xx errors of Green target group", metric=green4xxMetric, threshold=1, evaluation_periods=1) # ================================================================================================ # DUMMY TASK DEFINITION for the initial service creation # This is required for the service being made available to create the CodeDeploy Deployment Group # ================================================================================================ sampleTaskDefinition = aws_ecs.FargateTaskDefinition( self, "sampleTaskDefn", family=DUMMY_TASK_FAMILY_NAME, cpu=256, memory_limit_mib=1024, task_role=ecsTaskRole, execution_role=ecsTaskRole) sampleContainerDefn = sampleTaskDefinition.add_container( "sampleAppContainer", image=aws_ecs.ContainerImage.from_registry(DUMMY_CONTAINER_IMAGE), logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup( self, "sampleAppLogGroup", log_group_name=DUMMY_APP_LOG_GROUP_NAME, removal_policy=core.RemovalPolicy.DESTROY), stream_prefix=DUMMY_APP_NAME), docker_labels={"name": DUMMY_APP_NAME}) port_mapping = aws_ecs.PortMapping(container_port=80, protocol=aws_ecs.Protocol.TCP) sampleContainerDefn.add_port_mappings(port_mapping) # ================================================================================================ # ECS task definition using ECR image # Will be used by the CODE DEPLOY for Blue/Green deployment # ================================================================================================ NginxTaskDefinition = aws_ecs.FargateTaskDefinition( self, "appTaskDefn", family=ECS_TASK_FAMILY_NAME, cpu=256, memory_limit_mib=1024, task_role=ecsTaskRole, execution_role=ecsTaskRole) NginxcontainerDefinition = NginxTaskDefinition.add_container( "NginxAppContainer", image=aws_ecs.ContainerImage.from_ecr_repository( NginxecrRepo, "latest"), logging=aws_ecs.AwsLogDriver(log_group=aws_logs.LogGroup( self, "NginxAppLogGroup", log_group_name=ECS_APP_LOG_GROUP_NAME, removal_policy=core.RemovalPolicy.DESTROY), stream_prefix=ECS_APP_NAME), docker_labels={"name": ECS_APP_NAME}) NginxcontainerDefinition.add_port_mappings(port_mapping) # ============================================================================= # ECS SERVICE for the Blue/ Green deployment # ============================================================================= NginxAppService = aws_ecs.FargateService( self, "NginxAppService", cluster=ecs_cluster, task_definition=NginxTaskDefinition, health_check_grace_period=core.Duration.seconds(10), desired_count=3, deployment_controller={ "type": aws_ecs.DeploymentControllerType.CODE_DEPLOY }, service_name=ECS_APP_NAME) NginxAppService.connections.allow_from(alb, aws_ec2.Port.tcp(80)) NginxAppService.connections.allow_from(alb, aws_ec2.Port.tcp(8080)) NginxAppService.attach_to_application_target_group(blueGroup) # ============================================================================= # CODE DEPLOY - Deployment Group CUSTOM RESOURCE for the Blue/ Green deployment # ============================================================================= core.CustomResource( self, 'customEcsDeploymentGroup', service_token=createDeploymentGroupLambda.function_arn, properties={ "ApplicationName": codeDeployApplication.application_name, "DeploymentGroupName": ECS_DEPLOYMENT_GROUP_NAME, "DeploymentConfigName": ECS_DEPLOYMENT_CONFIG_NAME, "ServiceRoleArn": codeDeployServiceRole.role_arn, "BlueTargetGroup": blueGroup.target_group_name, "GreenTargetGroup": greenGroup.target_group_name, "ProdListenerArn": albProdListener.listener_arn, "TestListenerArn": albTestListener.listener_arn, "EcsClusterName": ecs_cluster.cluster_name, "EcsServiceName": NginxAppService.service_name, "TerminationWaitTime": ECS_TASKSET_TERMINATION_WAIT_TIME, "BlueGroupAlarm": blueGroupAlarm.alarm_name, "GreenGroupAlarm": greenGroupAlarm.alarm_name, }) ecsDeploymentGroup = codedeploy.EcsDeploymentGroup.from_ecs_deployment_group_attributes( self, "ecsDeploymentGroup", application=codeDeployApplication, deployment_group_name=ECS_DEPLOYMENT_GROUP_NAME, deployment_config=codedeploy.EcsDeploymentConfig. from_ecs_deployment_config_name(self, "ecsDeploymentConfig", ECS_DEPLOYMENT_CONFIG_NAME)) # ============================================================================= # CODE BUILD PROJECT for the Blue/ Green deployment # ============================================================================= # Creating the code build project NginxAppcodebuild = aws_codebuild.Project( self, "NginxAppCodeBuild", role=codeBuildServiceRole, environment=aws_codebuild.BuildEnvironment( build_image=aws_codebuild.LinuxBuildImage.STANDARD_4_0, compute_type=aws_codebuild.ComputeType.SMALL, privileged=True, environment_variables={ 'REPOSITORY_URI': { 'value': NginxecrRepo.repository_uri, 'type': aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT }, 'TASK_EXECUTION_ARN': { 'value': ecsTaskRole.role_arn, 'type': aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT }, 'TASK_FAMILY': { 'value': ECS_TASK_FAMILY_NAME, 'type': aws_codebuild.BuildEnvironmentVariableType.PLAINTEXT } }), source=aws_codebuild.Source.code_commit( repository=NginxCodeCommitrepo)) # ============================================================================= # CODE PIPELINE for Blue/Green ECS deployment # ============================================================================= codePipelineServiceRole = aws_iam.Role( self, "codePipelineServiceRole", assumed_by=aws_iam.ServicePrincipal('codepipeline.amazonaws.com')) inlinePolicyForCodePipeline = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "iam:PassRole", "sts:AssumeRole", "codecommit:Get*", "codecommit:List*", "codecommit:GitPull", "codecommit:UploadArchive", "codecommit:CancelUploadArchive", "codebuild:BatchGetBuilds", "codebuild:StartBuild", "codedeploy:CreateDeployment", "codedeploy:Get*", "codedeploy:RegisterApplicationRevision", "s3:Get*", "s3:List*", "s3:PutObject" ], resources=["*"]) codePipelineServiceRole.add_to_policy(inlinePolicyForCodePipeline) sourceArtifact = codepipeline.Artifact('sourceArtifact') buildArtifact = codepipeline.Artifact('buildArtifact') # S3 bucket for storing the code pipeline artifacts NginxAppArtifactsBucket = s3.Bucket( self, "NginxAppArtifactsBucket", encryption=s3.BucketEncryption.S3_MANAGED, block_public_access=s3.BlockPublicAccess.BLOCK_ALL) # S3 bucket policy for the code pipeline artifacts denyUnEncryptedObjectUploads = aws_iam.PolicyStatement( effect=aws_iam.Effect.DENY, actions=["s3:PutObject"], principals=[aws_iam.AnyPrincipal()], resources=[NginxAppArtifactsBucket.bucket_arn + "/*"], conditions={ "StringNotEquals": { "s3:x-amz-server-side-encryption": "aws:kms" } }) denyInsecureConnections = aws_iam.PolicyStatement( effect=aws_iam.Effect.DENY, actions=["s3:*"], principals=[aws_iam.AnyPrincipal()], resources=[NginxAppArtifactsBucket.bucket_arn + "/*"], conditions={"Bool": { "aws:SecureTransport": "false" }}) NginxAppArtifactsBucket.add_to_resource_policy( denyUnEncryptedObjectUploads) NginxAppArtifactsBucket.add_to_resource_policy(denyInsecureConnections) # Code Pipeline - CloudWatch trigger event is created by CDK codepipeline.Pipeline( self, "ecsBlueGreen", role=codePipelineServiceRole, artifact_bucket=NginxAppArtifactsBucket, stages=[ codepipeline.StageProps( stage_name='Source', actions=[ aws_codepipeline_actions.CodeCommitSourceAction( action_name='Source', repository=NginxCodeCommitrepo, output=sourceArtifact, ) ]), codepipeline.StageProps( stage_name='Build', actions=[ aws_codepipeline_actions.CodeBuildAction( action_name='Build', project=NginxAppcodebuild, input=sourceArtifact, outputs=[buildArtifact]) ]), codepipeline.StageProps( stage_name='Deploy', actions=[ aws_codepipeline_actions.CodeDeployEcsDeployAction( action_name='Deploy', deployment_group=ecsDeploymentGroup, app_spec_template_input=buildArtifact, task_definition_template_input=buildArtifact, ) ]) ]) # ============================================================================= # Export the outputs # ============================================================================= core.CfnOutput(self, "ecsBlueGreenCodeRepo", description="Demo app code commit repository", export_name="ecsBlueGreenDemoAppRepo", value=NginxCodeCommitrepo.repository_clone_url_http) core.CfnOutput(self, "ecsBlueGreenLBDns", description="Load balancer DNS", export_name="ecsBlueGreenLBDns", value=alb.load_balancer_dns_name)
def __init__(self, scope: core.Construct, id: str, website: WebsiteConstruct, **kwargs) -> None: super().__init__(scope, id, **kwargs) stack = core.Stack.of(self) repo = codecommit.Repository(self, 'Repository', repository_name=stack.stack_name.lower()) project = codebuild.PipelineProject( self, 'Builder', environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3, compute_type=codebuild.ComputeType.LARGE), cache=codebuild.Cache.local(codebuild.LocalCacheMode.CUSTOM, ), build_spec=codebuild.BuildSpec.from_object({ 'version': 0.2, 'cache': { 'paths': ['nodemodules/**/*'], }, 'phases': { 'install': { 'runtime-versions': { 'nodejs': 12 } }, 'pre_build': { 'commands': ['echo Pre-build started on `date`', 'yarn install'] }, 'build': { 'commands': ['echo Build started on `date`', 'yarn build'] } }, 'artifacts': { 'files': ['**/*'], 'base-directory': 'dist' } }), ) source_artifact = codepipeline.Artifact('SourceArtifact') build_artifact = codepipeline.Artifact('BuildArtifact') pipeline = codepipeline.Pipeline( self, 'Pipeline', cross_account_keys=False, restart_execution_on_update=True, stages=[ codepipeline.StageProps( stage_name='Source', actions=[ codepipeline_actions.CodeCommitSourceAction( action_name='Source', repository=repo, output=source_artifact, ) ]), codepipeline.StageProps( stage_name='Build', actions=[ codepipeline_actions.CodeBuildAction( action_name='Build', project=project, input=source_artifact, outputs=[build_artifact], ) ]), codepipeline.StageProps( stage_name='Deploy', actions=[ codepipeline_actions.S3DeployAction( action_name='Deploy', input=build_artifact, bucket=website.bucket, extract=True, ) ]) ])
def __init__(self, scope: Stack, prefix: str, artifacts_bucket: IBucket, source_repository: aws_codecommit.Repository, ecr_repository: aws_ecr.Repository, task_def: str, app_spec: str, main_listener: aws_elasticloadbalancingv2.CfnListener, deployments_listener: aws_elasticloadbalancingv2.CfnListener, ecs_cluster: aws_ecs.Cluster, ecs_service: CfnService, production_target_group, deployment_target_group): self.application = aws_codedeploy.EcsApplication( scope, prefix + 'FargateCodeDeployApplication', application_name=prefix + 'FargateCodeDeployApplication', ) self.deployment_group_custom = DeploymentGroup( stack=scope, prefix=prefix, code_repository=source_repository, task_definition=task_def, app_spec=app_spec, ecs_application=self.application, main_listener=main_listener, deployments_listener=deployments_listener, ecs_cluster=ecs_cluster, production_target_group=production_target_group, deployment_target_group=deployment_target_group).get_resource() self.deployment_group_custom.node.add_dependency(ecs_service) self.deployment_group_custom.node.add_dependency(ecs_cluster) self.deployment_group = aws_codedeploy.EcsDeploymentGroup.from_ecs_deployment_group_attributes( scope, prefix + 'FargateDeploymentGroup', application=self.application, deployment_group_name=prefix + 'FargateDeploymentGroup', ) self.deployment_group.node.add_dependency(self.deployment_group_custom) self.deployment_config_repository = aws_codecommit.Repository( scope, prefix + 'FargateDeploymentConfigRepository', description= 'Repository containing appspec and taskdef files for ecs code-deploy blue/green deployments.', repository_name=prefix.lower() + '-deployment-config') self.commit_custom = DeploymentConfig( stack=scope, prefix=prefix, code_repository=self.deployment_config_repository, task_definition=task_def, app_spec=app_spec).get_resource() self.ecr_repository_output_artifact = aws_codepipeline.Artifact( 'EcsImage') self.config_output_artifact = aws_codepipeline.Artifact('EcsConfig') self.ecr_to_ecs_pipeline = aws_codepipeline.Pipeline( scope, prefix + 'FargateEcrToEcsPipeline', artifact_bucket=artifacts_bucket, pipeline_name=prefix + 'FargateEcrToEcsPipeline', stages=[ aws_codepipeline.StageProps( stage_name='SourceStage', actions=[ aws_codepipeline_actions.EcrSourceAction( action_name='SourceEcrAction', output=self.ecr_repository_output_artifact, repository=ecr_repository, run_order=1, ), aws_codepipeline_actions.CodeCommitSourceAction( action_name='SourceCodeCommitAction', output=self.config_output_artifact, repository=self.deployment_config_repository, branch='master', run_order=1, ) ]), aws_codepipeline.StageProps( stage_name='DeployStage', actions=[ aws_codepipeline_actions.CodeDeployEcsDeployAction( action_name='DeployAction', deployment_group=self.deployment_group, app_spec_template_input=self. config_output_artifact, task_definition_template_input=self. config_output_artifact, container_image_inputs=[ aws_codepipeline_actions. CodeDeployEcsContainerImageInput( input=self.ecr_repository_output_artifact, task_definition_placeholder='IMAGE1_NAME') ], run_order=1) ]) ]) self.ecr_to_ecs_pipeline.node.add_dependency(self.commit_custom)
def __init__( self, scope: core.Stack, prefix: str, pipeline_params: PipelineParameters, lambda_params: LambdaParameters, vpc_params: VpcParameters ): """ AWS CDK package that helps deploying a lambda function. :param scope: A scope in which resources shall be created. :param prefix: Prefix for all of your resource IDs and names. :param pipeline_params: Parameters, letting you supply ssh key for accessing remote repositories. :param lambda_params: Parameters, focusing on the Lambda function itself. :param vpc_params: Parameters, focused on Virtual Private Cloud settings. """ # CodeCommmit repository to store your function source code. self.project_repository = aws_codecommit.Repository( scope, prefix + 'CiCdLambdaCodeCommitRepo', repository_name=prefix + 'CiCdLambdaCodeCommitRepo', ) # The lambda function for which this package is made. self.function = aws_lambda.Function( scope, prefix + 'Function', code=aws_lambda.Code.from_inline( 'def runner():\n' ' return \'Hello, World!\'' ), handler=lambda_params.lambda_handler, runtime=lambda_params.lambda_runtime, description=f'Lambda function {prefix}.', environment=lambda_params.environment, function_name=prefix, memory_size=lambda_params.lambda_memory, reserved_concurrent_executions=5, role=lambda_params.execution_role, security_groups=vpc_params.security_groups, timeout=core.Duration.seconds(lambda_params.lambda_timeout), vpc=vpc_params.vpc, vpc_subnets=aws_ec2.SubnetSelection(subnets=vpc_params.subnets) ) # Create alarms for the function. if lambda_params.alarms_sns_topic: self.alarms = LambdaAlarms(scope, prefix, lambda_params.alarms_sns_topic, self.function) else: self.alarms = None # Convert bucket name to an S3 friendly one. bucket_name = self.__convert(prefix + 'CiCdLambdaArtifactsBucket') self.bucket = EmptyS3Bucket( scope, prefix + 'CiCdLambdaDeploymentBucket', bucket_name=bucket_name ) # Create a BuildSpec object for CodeBuild self.buildspec = BuildSpecObject( prefix, self.bucket, pipeline_params.ssh_params.secret_id, pipeline_params.ssh_params.private_key, pipeline_params.install_args, pipeline_params.test_args, pipeline_params.custom_pre_build_commands ) # CodeBuild project, that installs functions dependencies, runs tests and deploys it to Lambda. self.code_build_project = aws_codebuild.PipelineProject( scope, prefix + 'CiCdLambdaCodeBuildProject', project_name=prefix + 'CiCdLambdaCodeBuildProject', environment=aws_codebuild.BuildEnvironment( build_image=aws_codebuild.LinuxBuildImage.STANDARD_3_0, compute_type=aws_codebuild.ComputeType.SMALL, privileged=True ), build_spec=aws_codebuild.BuildSpec.from_object(self.buildspec.get_object()), ) # Adding permissions that allow CodeBuild to do the aforementioned things. self.code_build_project.role.add_to_policy( statement=aws_iam.PolicyStatement( actions=[ 's3:*', 'lambda:UpdateFunctionCode', ], resources=['*'], effect=aws_iam.Effect.ALLOW) ) # If a secret is provided, we allow CodeBuild to read it. if pipeline_params.ssh_params.secret_arn is not None: self.code_build_project.role.add_to_policy( statement=aws_iam.PolicyStatement( actions=[ 'secretsmanager:GetSecretValue' ], resources=[pipeline_params.ssh_params.secret_arn], effect=aws_iam.Effect.ALLOW) ) # If KMS key is provided, we allow CodeBuild to decrypt using it. if pipeline_params.ssh_params.kms_key_arn is not None: self.code_build_project.role.add_to_policy( statement=aws_iam.PolicyStatement( actions=[ "kms:Decrypt" ], effect=aws_iam.Effect.ALLOW, resources=[pipeline_params.ssh_params.kms_key_arn] ) ) # Push hte initial commit to CodeCommit. self.initial_commit = InitialCommit( scope, prefix, self.project_repository ).get_resource() self.source_artifact = aws_codepipeline.Artifact(artifact_name=prefix + 'CiCdLambdaSourceArtifact') # CodePipeline source action to read from CodeCommit. self.source_action = aws_codepipeline_actions.CodeCommitSourceAction( repository=self.project_repository, branch='master', action_name='CodeCommitSource', run_order=1, trigger=aws_codepipeline_actions.CodeCommitTrigger.EVENTS, output=self.source_artifact ) # CodePipeline build action that uses the CodeBuild project. self.build_action = aws_codepipeline_actions.CodeBuildAction( input=self.source_artifact, project=self.code_build_project, action_name='BuildAction', run_order=1 ) # CodePipeline pipeline that executes both actions. self.codecommit_to_lambda_pipeline = aws_codepipeline.Pipeline( scope, prefix + 'CiCdLambdaPipeline', pipeline_name=prefix + 'CiCdLambdaPipeline', artifact_bucket=self.bucket, stages=[ aws_codepipeline.StageProps( stage_name='SourceStage', actions=[self.source_action] ), aws_codepipeline.StageProps( stage_name='BuildStage', actions=[self.build_action] ) ] )
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) # pipeline requires versioned bucket bucket = s3.Bucket( self, "SourceBucket", bucket_name=f"{props['namespace'].lower()}-{core.Aws.ACCOUNT_ID}", versioned=True, removal_policy=core.RemovalPolicy.DESTROY) # ecr repo to push docker container into ecr = aws_ecr.Repository( self, "ECR", repository_name=props['namespace'], removal_policy=core.RemovalPolicy.DESTROY ) # codebuild project meant to run in pipeline cb_docker_build = aws_codebuild.PipelineProject( self, "DockerBuild", project_name=f"{props['namespace']}-Docker-Build", build_spec=aws_codebuild.BuildSpec.from_source_filename( filename='./docker/buildspec.yml'), environment=aws_codebuild.BuildEnvironment( privileged=True, ), environment_variables={ 'IMAGE_REPO_NAME': aws_codebuild.BuildEnvironmentVariable( value=ecr.repository_name), 'IMAGE_TAG': aws_codebuild.BuildEnvironmentVariable( value='latest'), 'AWS_ACCOUNT_ID': aws_codebuild.BuildEnvironmentVariable( value=core.Aws.ACCOUNT_ID), 'AWS_DEFAULT_REGION': aws_codebuild.BuildEnvironmentVariable( value=core.Aws.REGION) }, description='Pipeline for CodeBuild', timeout=core.Duration.minutes(60), ) # codebuild iam permissions to read write s3 bucket.grant_read_write(cb_docker_build) # codebuild permissions to interact with ecr ecr.grant_pull_push(cb_docker_build) # define the s3 artifact source_output = aws_codepipeline.Artifact(artifact_name='source') # define the pipeline pipeline = aws_codepipeline.Pipeline( self, "Pipeline", pipeline_name=f"{props['namespace']}-pipeline", artifact_bucket=bucket, restart_execution_on_update=True, stages=[ aws_codepipeline.StageProps( stage_name='Source', actions=[ aws_codepipeline_actions.GitHubSourceAction( action_name='Checkout', owner=props['github-owner'], repo=props['github-repository'], oauth_token=core.SecretValue.secrets_manager('GitHubToken'), output=source_output, trigger=aws_codepipeline_actions.GitHubTrigger.WEBHOOK, ), ] ), aws_codepipeline.StageProps( stage_name='Build', actions=[ aws_codepipeline_actions.CodeBuildAction( action_name='DockerBuildImage', input=source_output, project=cb_docker_build, run_order=1, ) ] ) ] ) # give pipeline role read write to the bucket bucket.grant_read_write(pipeline.role) self.output_props = props.copy() self.output_props['ecr'] = ecr # cfn output core.CfnOutput( self, "PipelineOut", description="Pipeline", value=pipeline.pipeline_name ) core.CfnOutput( self, "ECRURI", description="ECR URI", value=ecr.repository_uri, ) core.CfnOutput( self, "S3Bucket", description="S3 Bucket", value=bucket.bucket_name )