def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Get stack parameters: email and repo address notification_email = core.CfnParameter( self, "Email Address", type="String", description="Specify an email to receive notifications about pipeline outcomes.", allowed_pattern="^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", min_length=5, max_length=320, constraint_description="Please enter an email address with correct format ([email protected])", ) git_address = core.CfnParameter( self, "CodeCommit Repo Address", type="String", description="AWS CodeCommit repository clone URL to connect to the framework.", allowed_pattern=( "^(((https:\/\/|ssh:\/\/)(git\-codecommit)\.[a-zA-Z0-9_.+-]+(amazonaws\.com\/)[a-zA-Z0-9-.]" "+(\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+$)|^$)" ), min_length=0, max_length=320, constraint_description=( "CodeCommit address must follow the pattern: ssh or " "https://git-codecommit.REGION.amazonaws.com/version/repos/REPONAME" ), ) # Get the optional S3 assets bucket to use existing_bucket = core.CfnParameter( self, "ExistingS3Bucket", type="String", description="Name of existing S3 bucket to be used for ML assests. S3 Bucket must be in the same region as the deployed stack, and has versioning enabled. If not provided, a new S3 bucket will be created.", allowed_pattern="((?=^.{3,63}$)(?!^(\d+\.)+\d+$)(^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])$)|^$)", min_length=0, max_length=63, ) # Conditions git_address_provided = core.CfnCondition( self, "GitAddressProvided", expression=core.Fn.condition_not(core.Fn.condition_equals(git_address, "")), ) # client provided an existing S3 bucket name, to be used for assets existing_bucket_provided = core.CfnCondition( self, "S3BucketProvided", expression=core.Fn.condition_not(core.Fn.condition_equals(existing_bucket.value_as_string.strip(), "")), ) # S3 bucket needs to be created for assets create_new_bucket = core.CfnCondition( self, "CreateS3Bucket", expression=core.Fn.condition_equals(existing_bucket.value_as_string.strip(), ""), ) # Constants pipeline_stack_name = "MLOps-pipeline" # CDK Resources setup access_logs_bucket = s3.Bucket( self, "accessLogs", encryption=s3.BucketEncryption.S3_MANAGED, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transfer bucket policy apply_secure_bucket_policy(access_logs_bucket) # This is a logging bucket. access_logs_bucket.node.default_child.cfn_options.metadata = suppress_s3_access_policy() # Import user provide S3 bucket, if any. s3.Bucket.from_bucket_arn is used instead of s3.Bucket.from_bucket_name to allow cross account bucket. client_existing_bucket = s3.Bucket.from_bucket_arn( self, "ClientExistingBucket", f"arn:aws:s3:::{existing_bucket.value_as_string.strip()}", ) # Create the resource if existing_bucket_provided condition is True core.Aspects.of(client_existing_bucket).add(ConditionalResources(existing_bucket_provided)) # Creating assets bucket so that users can upload ML Models to it. assets_bucket = s3.Bucket( self, "pipeline-assets-" + str(uuid.uuid4()), versioned=True, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix="assets_bucket_access_logs", block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transport bucket policy apply_secure_bucket_policy(assets_bucket) # Create the resource if create_new_bucket condition is True core.Aspects.of(assets_bucket).add(ConditionalResources(create_new_bucket)) # Get assets S3 bucket's name/arn, based on the condition assets_s3_bucket_name = core.Fn.condition_if( existing_bucket_provided.logical_id, client_existing_bucket.bucket_name, assets_bucket.bucket_name, ).to_string() blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4()) blueprint_repository_bucket = s3.Bucket( self, blueprints_bucket_name, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix=blueprints_bucket_name, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transport bucket policy apply_secure_bucket_policy(blueprint_repository_bucket) # Custom resource to copy source bucket content to blueprints bucket custom_resource_lambda_fn = lambda_.Function( self, "CustomResourceLambda", code=lambda_.Code.from_asset("lambdas/custom_resource"), handler="index.on_event", runtime=lambda_.Runtime.PYTHON_3_8, environment={ "source_bucket": "https://%%BUCKET_NAME%%-" + core.Aws.REGION + ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%", "destination_bucket": blueprint_repository_bucket.bucket_name, "LOG_LEVEL": "INFO", }, timeout=core.Duration.seconds(60), ) custom_resource_lambda_fn.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W58", "reason": "The lambda functions role already has permissions to write cloudwatch logs", } ] } } blueprint_repository_bucket.grant_write(custom_resource_lambda_fn) custom_resource = core.CustomResource( self, "CustomResourceCopyAssets", service_token=custom_resource_lambda_fn.function_arn, ) custom_resource.node.add_dependency(blueprint_repository_bucket) # IAM policies setup ### cloudformation_role = iam.Role( self, "mlopscloudformationrole", assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"), ) # Cloudformation policy setup orchestrator_policy = iam.Policy( self, "lambdaOrchestratorPolicy", statements=[ iam.PolicyStatement( actions=[ "cloudformation:CreateStack", "cloudformation:DeleteStack", "cloudformation:UpdateStack", "cloudformation:ListStackResources", ], resources=[ ( f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*" ), ], ), iam.PolicyStatement( actions=[ "iam:CreateRole", "iam:DeleteRole", "iam:DeleteRolePolicy", "iam:GetRole", "iam:GetRolePolicy", "iam:PassRole", "iam:PutRolePolicy", "iam:AttachRolePolicy", "iam:DetachRolePolicy", ], resources=[f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*"], ), iam.PolicyStatement( actions=[ "ecr:CreateRepository", "ecr:DeleteRepository", "ecr:DescribeRepositories", ], resources=[ ( f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:repository/awsmlopsmodels*" ) ], ), iam.PolicyStatement( actions=[ "codebuild:CreateProject", "codebuild:DeleteProject", "codebuild:BatchGetProjects", ], resources=[ ( f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:project/ContainerFactory*" ), ( f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*" ), ( f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:report-group/*" ), ], ), iam.PolicyStatement( actions=[ "lambda:CreateFunction", "lambda:DeleteFunction", "lambda:InvokeFunction", "lambda:PublishLayerVersion", "lambda:DeleteLayerVersion", "lambda:GetLayerVersion", "lambda:GetFunctionConfiguration", "lambda:GetFunction", "lambda:AddPermission", "lambda:RemovePermission", "lambda:UpdateFunctionConfiguration", ], resources=[ f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*", f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*", ], ), iam.PolicyStatement( actions=["s3:GetObject"], resources=[ blueprint_repository_bucket.bucket_arn, blueprint_repository_bucket.arn_for_objects("*"), f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*", ], ), iam.PolicyStatement( actions=[ "codepipeline:CreatePipeline", "codepipeline:DeletePipeline", "codepipeline:GetPipeline", "codepipeline:GetPipelineState", ], resources=[ ( f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*" ) ], ), iam.PolicyStatement( actions=[ "apigateway:POST", "apigateway:PATCH", "apigateway:DELETE", "apigateway:GET", "apigateway:PUT", ], resources=[ f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*", ], ), iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:DescribeLogGroups", ], resources=[ f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*", ], ), iam.PolicyStatement( actions=[ "s3:CreateBucket", "s3:PutEncryptionConfiguration", "s3:PutBucketVersioning", "s3:PutBucketPublicAccessBlock", "s3:PutBucketLogging", ], resources=["arn:" + core.Aws.PARTITION + ":s3:::*"], ), iam.PolicyStatement( actions=[ "sns:CreateTopic", "sns:DeleteTopic", "sns:Subscribe", "sns:Unsubscribe", "sns:GetTopicAttributes", "sns:SetTopicAttributes", ], resources=[ ( f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:" f"{pipeline_stack_name}*-PipelineNotification*" ), ( f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:" f"{pipeline_stack_name}*-ModelMonitorPipelineNotification*" ), ], ), iam.PolicyStatement( actions=[ "events:PutRule", "events:DescribeRule", "events:PutTargets", "events:RemoveTargets", "events:DeleteRule", "events:PutEvents", ], resources=[ f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*", f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*", ], ), ], ) orchestrator_policy.attach_to_role(cloudformation_role) # Lambda function IAM setup lambda_passrole_policy = iam.PolicyStatement(actions=["iam:passrole"], resources=[cloudformation_role.role_arn]) # API Gateway and lambda setup to enable provisioning pipelines through API calls provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda( self, "PipelineOrchestration", lambda_function_props={ "runtime": lambda_.Runtime.PYTHON_3_8, "handler": "index.handler", "code": lambda_.Code.from_asset("lambdas/pipeline_orchestration"), }, api_gateway_props={ "defaultMethodOptions": { "authorizationType": apigw.AuthorizationType.IAM, }, "restApiName": f"{core.Aws.STACK_NAME}-orchestrator", "proxy": False, "dataTraceEnabled": True, }, ) provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource("provisionpipeline") provision_resource.add_method("POST") status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource("pipelinestatus") status_resource.add_method("POST") blueprint_repository_bucket.grant_read(provisioner_apigw_lambda.lambda_function) provisioner_apigw_lambda.lambda_function.add_to_role_policy(lambda_passrole_policy) orchestrator_policy.attach_to_role(provisioner_apigw_lambda.lambda_function.role) provisioner_apigw_lambda.lambda_function.add_to_role_policy( iam.PolicyStatement(actions=["xray:PutTraceSegments"], resources=["*"]) ) lambda_node = provisioner_apigw_lambda.lambda_function.node.default_child lambda_node.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W12", "reason": "The xray permissions PutTraceSegments is not able to be bound to resources.", } ] } } # Environment variables setup provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET_URL", value=str(blueprint_repository_bucket.bucket_regional_domain_name), ) provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET", value=str(blueprint_repository_bucket.bucket_name) ) provisioner_apigw_lambda.lambda_function.add_environment( key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name) ) provisioner_apigw_lambda.lambda_function.add_environment(key="ASSETS_BUCKET", value=str(assets_s3_bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn) ) provisioner_apigw_lambda.lambda_function.add_environment(key="PIPELINE_STACK_NAME", value=pipeline_stack_name) provisioner_apigw_lambda.lambda_function.add_environment( key="NOTIFICATION_EMAIL", value=notification_email.value_as_string ) provisioner_apigw_lambda.lambda_function.add_environment(key="LOG_LEVEL", value="DEBUG") cfn_policy_for_lambda = orchestrator_policy.node.default_child cfn_policy_for_lambda.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W76", "reason": "A complex IAM policy is required for this resource.", } ] } } # Codepipeline with Git source definitions ### source_output = codepipeline.Artifact() # processing git_address to retrieve repo name repo_name_split = core.Fn.split("/", git_address.value_as_string) repo_name = core.Fn.select(5, repo_name_split) # getting codecommit repo cdk object using 'from_repository_name' repo = codecommit.Repository.from_repository_name(self, "AWSMLOpsFrameworkRepository", repo_name) codebuild_project = codebuild.PipelineProject( self, "Take config file", build_spec=codebuild.BuildSpec.from_object( { "version": "0.2", "phases": { "build": { "commands": [ "ls -a", "aws lambda invoke --function-name " + provisioner_apigw_lambda.lambda_function.function_name + " --payload fileb://mlops-config.json response.json" + " --invocation-type RequestResponse", ] } }, } ), ) # Defining a Codepipeline project with CodeCommit as source codecommit_pipeline = codepipeline.Pipeline( self, "MLOpsCodeCommitPipeline", stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit", repository=repo, branch="main", output=source_output, ) ], ), codepipeline.StageProps( stage_name="TakeConfig", actions=[ codepipeline_actions.CodeBuildAction( action_name="provision_pipeline", input=source_output, outputs=[], project=codebuild_project, ) ], ), ], cross_account_keys=False, ) codecommit_pipeline.add_to_role_policy( iam.PolicyStatement( actions=["lambda:InvokeFunction"], resources=[provisioner_apigw_lambda.lambda_function.function_arn], ) ) codebuild_project.add_to_role_policy( iam.PolicyStatement( actions=["lambda:InvokeFunction"], resources=[provisioner_apigw_lambda.lambda_function.function_arn], ) ) pipeline_child_nodes = codecommit_pipeline.node.find_all() pipeline_child_nodes[1].node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W35", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, { "id": "W51", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, ] } } # custom resource for operational metrics### metricsMapping = core.CfnMapping(self, "AnonymousData", mapping={"SendAnonymousData": {"Data": "Yes"}}) metrics_condition = core.CfnCondition( self, "AnonymousDatatoAWS", expression=core.Fn.condition_equals(metricsMapping.find_in_map("SendAnonymousData", "Data"), "Yes"), ) helper_function = lambda_.Function( self, "SolutionHelper", code=lambda_.Code.from_asset("lambdas/solution_helper"), handler="lambda_function.handler", runtime=lambda_.Runtime.PYTHON_3_8, timeout=core.Duration.seconds(60), ) createIdFunction = core.CustomResource( self, "CreateUniqueID", service_token=helper_function.function_arn, properties={"Resource": "UUID"}, resource_type="Custom::CreateUUID", ) sendDataFunction = core.CustomResource( self, "SendAnonymousData", service_token=helper_function.function_arn, properties={ "Resource": "AnonymousMetric", "UUID": createIdFunction.get_att_string("UUID"), "gitSelected": git_address.value_as_string, "Region": core.Aws.REGION, "SolutionId": "SO0136", "Version": "%%VERSION%%", }, resource_type="Custom::AnonymousData", ) core.Aspects.of(helper_function).add(ConditionalResources(metrics_condition)) core.Aspects.of(createIdFunction).add(ConditionalResources(metrics_condition)) core.Aspects.of(sendDataFunction).add(ConditionalResources(metrics_condition)) helper_function.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W58", "reason": "The lambda functions role already has permissions to write cloudwatch logs", } ] } } # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source core.Aspects.of(repo).add(ConditionalResources(git_address_provided)) core.Aspects.of(codecommit_pipeline).add(ConditionalResources(git_address_provided)) core.Aspects.of(codebuild_project).add(ConditionalResources(git_address_provided)) # Create Template Interface self.template_options.metadata = { "AWS::CloudFormation::Interface": { "ParameterGroups": [ { "Label": {"default": "MLOps Framework Settings"}, "Parameters": [ notification_email.logical_id, git_address.logical_id, existing_bucket.logical_id, ], } ], "ParameterLabels": { f"{notification_email.logical_id}": {"default": "Notification Email (Required)"}, f"{git_address.logical_id}": {"default": "CodeCommit Repo URL Address (Optional)"}, f"{existing_bucket.logical_id}": {"default": "Name of an Existing S3 Bucket (Optional)"}, }, } } # Outputs # core.CfnOutput( self, id="BlueprintsBucket", value=f"https://s3.console.aws.amazon.com/s3/buckets/{blueprint_repository_bucket.bucket_name}", description="S3 Bucket to upload MLOps Framework Blueprints", ) core.CfnOutput( self, id="AssetsBucket", value=f"https://s3.console.aws.amazon.com/s3/buckets/{assets_s3_bucket_name}", description="S3 Bucket to upload model artifact", )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Get stack parameters: email and repo address notification_email = core.CfnParameter( self, "Email Address", type="String", description= "Specify an email to receive notifications about pipeline outcomes.", allowed_pattern='^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$', min_length=5, max_length=320, constraint_description= "Please enter an email address with correct format ([email protected])" ) git_address = core.CfnParameter( self, "CodeCommit Repo Address", type="String", description= "AWS CodeCommit repository clone URL to connect to the framework.", allowed_pattern= '^(((https:\/\/|ssh:\/\/)(git\-codecommit)\.[a-zA-Z0-9_.+-]+(amazonaws\.com\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+(\/)[a-zA-Z0-9-.]+$)|)', min_length=0, max_length=320, constraint_description= "CodeCommit address must follow the pattern: ssh or https://git-codecommit.REGION.amazonaws.com/version/repos/REPONAME" ) # Conditions git_address_provided = core.CfnCondition( self, "GitAddressProvided", expression=core.Fn.condition_not( core.Fn.condition_equals(git_address, "")), ) # Constants pipeline_stack_name = "MLOps-pipeline" # CDK Resources setup access_logs_bucket = s3.Bucket( self, "accessLogs", encryption=s3.BucketEncryption.S3_MANAGED, block_public_access=s3.BlockPublicAccess.BLOCK_ALL) access_logs_bucket.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W35", "reason": "This is the access bucket." }, { "id": "W51", "reason": "This S3 bucket does not need a bucket policy.", }, ] } } source_bucket = s3.Bucket.from_bucket_name(self, "BucketByName", "%%BUCKET_NAME%%") blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4()) blueprint_repository_bucket = s3.Bucket( self, blueprints_bucket_name, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix=blueprints_bucket_name, block_public_access=s3.BlockPublicAccess.BLOCK_ALL) blueprint_repository_bucket.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W51", "reason": "This S3 bucket does not need a bucket policy. All access to this bucket is restricted by IAM (CDK grant_read method)", }] } } # Custom resource to copy source bucket content to blueprints bucket custom_resource_lambda_fn = lambda_.Function( self, "CustomResourceLambda", code=lambda_.Code.from_asset("lambdas/custom_resource"), handler="index.on_event", runtime=lambda_.Runtime.PYTHON_3_8, environment={ "source_bucket": "https://%%BUCKET_NAME%%-" + core.Aws.REGION + ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%", "destination_bucket": blueprint_repository_bucket.bucket_name, "LOG_LEVEL": "INFO", }, timeout=core.Duration.seconds(60), ) custom_resource_lambda_fn.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W58", "reason": "The lambda functions role already has permissions to write cloudwatch logs", }] } } blueprint_repository_bucket.grant_write(custom_resource_lambda_fn) custom_resource = core.CustomResource( self, "CustomResourceCopyAssets", service_token=custom_resource_lambda_fn.function_arn, ) custom_resource.node.add_dependency(blueprint_repository_bucket) ### IAM policies setup ### cloudformation_role = iam.Role( self, "mlopscloudformationrole", assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"), ) # Cloudformation policy setup orchestrator_policy = iam.Policy( self, "lambdaOrchestratorPolicy", statements=[ iam.PolicyStatement( actions=[ "cloudformation:CreateStack", "cloudformation:DeleteStack", "cloudformation:UpdateStack", "cloudformation:ListStackResources", ], resources=[ f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*", ], ), iam.PolicyStatement( actions=[ "iam:CreateRole", "iam:DeleteRole", "iam:DeleteRolePolicy", "iam:GetRole", "iam:GetRolePolicy", "iam:PassRole", "iam:PutRolePolicy", "iam:AttachRolePolicy", "iam:DetachRolePolicy", ], resources=[ f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*" ], ), iam.PolicyStatement( actions=[ "ecr:CreateRepository", "ecr:DeleteRepository", "ecr:DescribeRepositories", ], resources=[ f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:repository/awsmlopsmodels*" ], ), iam.PolicyStatement( actions=[ "codebuild:CreateProject", "codebuild:DeleteProject", "codebuild:BatchGetProjects", ], resources=[ f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:project/ContainerFactory*", f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*", f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:report-group/*", ], ), iam.PolicyStatement( actions=[ "lambda:CreateFunction", "lambda:DeleteFunction", "lambda:InvokeFunction", "lambda:PublishLayerVersion", "lambda:DeleteLayerVersion", "lambda:GetLayerVersion", "lambda:GetFunctionConfiguration", "lambda:GetFunction", "lambda:AddPermission", "lambda:RemovePermission", "lambda:UpdateFunctionConfiguration", ], resources=[ f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*", f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*", ], ), iam.PolicyStatement( actions=["s3:GetObject"], resources=[ blueprint_repository_bucket.bucket_arn, blueprint_repository_bucket.arn_for_objects("*"), f"arn:{core.Aws.PARTITION}:s3:::pipeline-assets-*", ], ), iam.PolicyStatement( actions=[ "codepipeline:CreatePipeline", "codepipeline:DeletePipeline", "codepipeline:GetPipeline", "codepipeline:GetPipelineState", ], resources=[ f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*" ], ), iam.PolicyStatement( actions=[ "apigateway:POST", "apigateway:PATCH", "apigateway:DELETE", "apigateway:GET", "apigateway:PUT", ], resources=[ f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*", ], ), iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:DescribeLogGroups", ], resources=[ f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*", ], ), iam.PolicyStatement( actions=[ "s3:CreateBucket", "s3:PutEncryptionConfiguration", "s3:PutBucketVersioning", "s3:PutBucketPublicAccessBlock", "s3:PutBucketLogging", ], resources=["arn:" + core.Aws.PARTITION + ":s3:::*"], ), iam.PolicyStatement( actions=[ "sns:CreateTopic", "sns:DeleteTopic", "sns:Subscribe", "sns:Unsubscribe", "sns:GetTopicAttributes", "sns:SetTopicAttributes", ], resources=[ f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*-PipelineNotification*", ], ), iam.PolicyStatement( actions=[ "events:PutRule", "events:DescribeRule", "events:PutTargets", "events:RemoveTargets", "events:DeleteRule", "events:PutEvents", ], resources=[ f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*", f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*", ], ), ], ) orchestrator_policy.attach_to_role(cloudformation_role) # Lambda function IAM setup lambda_passrole_policy = iam.PolicyStatement( actions=["iam:passrole"], resources=[cloudformation_role.role_arn]) # API Gateway and lambda setup to enable provisioning pipelines through API calls provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda( self, "PipelineOrchestration", lambda_function_props={ "runtime": lambda_.Runtime.PYTHON_3_8, "handler": "index.handler", "code": lambda_.Code.from_asset("lambdas/pipeline_orchestration"), }, api_gateway_props={ "defaultMethodOptions": { "authorizationType": apigw.AuthorizationType.IAM, }, "restApiName": f"{core.Aws.STACK_NAME}-orchestrator", "proxy": False }, ) provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource( 'provisionpipeline') provision_resource.add_method('POST') status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource( 'pipelinestatus') status_resource.add_method('POST') blueprint_repository_bucket.grant_read( provisioner_apigw_lambda.lambda_function) provisioner_apigw_lambda.lambda_function.add_to_role_policy( lambda_passrole_policy) orchestrator_policy.attach_to_role( provisioner_apigw_lambda.lambda_function.role) provisioner_apigw_lambda.lambda_function.add_to_role_policy( iam.PolicyStatement(actions=["xray:PutTraceSegments"], resources=["*"])) lambda_node = provisioner_apigw_lambda.lambda_function.node.default_child lambda_node.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W12", "reason": "The xray permissions PutTraceSegments is not able to be bound to resources.", }] } } # Environment variables setup provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET_URL", value=str(blueprint_repository_bucket.bucket_regional_domain_name), ) provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET", value=str(blueprint_repository_bucket.bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn)) provisioner_apigw_lambda.lambda_function.add_environment( key="PIPELINE_STACK_NAME", value=pipeline_stack_name) provisioner_apigw_lambda.lambda_function.add_environment( key="NOTIFICATION_EMAIL", value=notification_email.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="LOG_LEVEL", value="DEBUG") cfn_policy_for_lambda = orchestrator_policy.node.default_child cfn_policy_for_lambda.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W76", "reason": "A complex IAM policy is required for this resource.", }] } } ### Codepipeline with Git source definitions ### source_output = codepipeline.Artifact() # processing git_address to retrieve repo name repo_name_split = core.Fn.split("/", git_address.value_as_string) repo_name = core.Fn.select(5, repo_name_split) # getting codecommit repo cdk object using 'from_repository_name' repo = codecommit.Repository.from_repository_name( self, "AWSMLOpsFrameworkRepository", repo_name) codebuild_project = codebuild.PipelineProject( self, "Take config file", build_spec=codebuild.BuildSpec.from_object({ "version": "0.2", "phases": { "build": { "commands": [ "ls -a", "aws lambda invoke --function-name " + provisioner_apigw_lambda.lambda_function. function_name + " --payload fileb://mlops-config.json response.json" + " --invocation-type RequestResponse", ] } }, }), ) # Defining a Codepipeline project with CodeCommit as source codecommit_pipeline = codepipeline.Pipeline( self, "MLOpsCodeCommitPipeline", stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit", repository=repo, output=source_output, ) ], ), codepipeline.StageProps( stage_name="TakeConfig", actions=[ codepipeline_actions.CodeBuildAction( action_name="provision_pipeline", input=source_output, outputs=[], project=codebuild_project, ) ], ), ], cross_account_keys=False, ) codecommit_pipeline.add_to_role_policy( iam.PolicyStatement( actions=["lambda:InvokeFunction"], resources=[ provisioner_apigw_lambda.lambda_function.function_arn ], )) codebuild_project.add_to_role_policy( iam.PolicyStatement( actions=["lambda:InvokeFunction"], resources=[ provisioner_apigw_lambda.lambda_function.function_arn ], )) pipeline_child_nodes = codecommit_pipeline.node.find_all() pipeline_child_nodes[1].node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W35", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, { "id": "W51", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, ] } } ###custom resource for operational metrics### metricsMapping = core.CfnMapping( self, 'AnonymousData', mapping={'SendAnonymousData': { 'Data': 'Yes' }}) metrics_condition = core.CfnCondition( self, 'AnonymousDatatoAWS', expression=core.Fn.condition_equals( metricsMapping.find_in_map('SendAnonymousData', 'Data'), 'Yes')) helper_function = lambda_.Function( self, "SolutionHelper", code=lambda_.Code.from_asset("lambdas/solution_helper"), handler="lambda_function.handler", runtime=lambda_.Runtime.PYTHON_3_8, timeout=core.Duration.seconds(60), ) createIdFunction = core.CustomResource( self, 'CreateUniqueID', service_token=helper_function.function_arn, properties={'Resource': 'UUID'}, resource_type='Custom::CreateUUID') sendDataFunction = core.CustomResource( self, 'SendAnonymousData', service_token=helper_function.function_arn, properties={ 'Resource': 'AnonymousMetric', 'UUID': createIdFunction.get_att_string('UUID'), 'gitSelected': git_address.value_as_string, 'Region': core.Aws.REGION, 'SolutionId': 'SO0136', 'Version': '%%VERSION%%', }, resource_type='Custom::AnonymousData') core.Aspects.of(helper_function).add( ConditionalResources(metrics_condition)) core.Aspects.of(createIdFunction).add( ConditionalResources(metrics_condition)) core.Aspects.of(sendDataFunction).add( ConditionalResources(metrics_condition)) helper_function.node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W58", "reason": "The lambda functions role already has permissions to write cloudwatch logs", }] } } # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source core.Aspects.of(repo).add(ConditionalResources(git_address_provided)) core.Aspects.of(codecommit_pipeline).add( ConditionalResources(git_address_provided)) core.Aspects.of(codebuild_project).add( ConditionalResources(git_address_provided))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) if self.node.try_get_context('vpc_type'): validate_cdk_json(self) ES_LOADER_TIMEOUT = 600 ###################################################################### # REGION mapping / ELB & Lambda Arch ###################################################################### elb_id_temp = region_info.FactName.ELBV2_ACCOUNT elb_map_temp = region_info.RegionInfo.region_map(elb_id_temp) region_dict = {} for region in elb_map_temp: # ELB account ID region_dict[region] = {'ElbV2AccountId': elb_map_temp[region]} # Lambda Arch if region in ('us-east-1', 'us-east-2', 'us-west-2', 'ap-south-1', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'eu-central-1', 'eu-west-1', 'eu-west-2'): region_dict[region]['LambdaArch'] = ( aws_lambda.Architecture.ARM_64.name) else: region_dict[region]['LambdaArch'] = ( aws_lambda.Architecture.X86_64.name) region_mapping = core.CfnMapping( scope=self, id='RegionMap', mapping=region_dict) ###################################################################### # get params ###################################################################### allow_source_address = core.CfnParameter( self, 'AllowedSourceIpAddresses', allowed_pattern=r'^[0-9./\s]*', description='Space-delimited list of CIDR blocks', default='10.0.0.0/8 172.16.0.0/12 192.168.0.0/16') sns_email = core.CfnParameter( self, 'SnsEmail', allowed_pattern=r'^[0-9a-zA-Z@_\-\+\.]*', description=('Input your email as SNS topic, where Amazon ' 'OpenSearch Service will send alerts to'), default='*****@*****.**') geoip_license_key = core.CfnParameter( self, 'GeoLite2LicenseKey', allowed_pattern=r'^[0-9a-zA-Z]{16}$', default='xxxxxxxxxxxxxxxx', description=("If you wolud like to enrich geoip locaiton such as " "IP address's country, get a license key form MaxMind" " and input the key. If you not, keep " "xxxxxxxxxxxxxxxx")) reserved_concurrency = core.CfnParameter( self, 'ReservedConcurrency', default=10, type='Number', description=('Input reserved concurrency. Increase this value if ' 'there are steady logs delay despite no errors')) aes_domain_name = self.node.try_get_context('aes_domain_name') bucket = f'{aes_domain_name}-{core.Aws.ACCOUNT_ID}' s3bucket_name_geo = f'{bucket}-geo' s3bucket_name_log = f'{bucket}-log' s3bucket_name_snapshot = f'{bucket}-snapshot' # organizations / multiaccount org_id = self.node.try_get_context('organizations').get('org_id') org_mgmt_id = self.node.try_get_context( 'organizations').get('management_id') org_member_ids = self.node.try_get_context( 'organizations').get('member_ids') no_org_ids = self.node.try_get_context( 'no_organizations').get('aws_accounts') # Overwrite default S3 bucket name as customer name temp_geo = self.node.try_get_context('s3_bucket_name').get('geo') if temp_geo: s3bucket_name_geo = temp_geo else: print('Using default bucket names') temp_log = self.node.try_get_context('s3_bucket_name').get('log') if temp_log: s3bucket_name_log = temp_log elif org_id or no_org_ids: s3bucket_name_log = f'{aes_domain_name}-{self.account}-log' else: print('Using default bucket names') temp_snap = self.node.try_get_context('s3_bucket_name').get('snapshot') if temp_snap: s3bucket_name_snapshot = temp_snap else: print('Using default bucket names') kms_cmk_alias = self.node.try_get_context('kms_cmk_alias') if not kms_cmk_alias: kms_cmk_alias = 'aes-siem-key' print('Using default key alais') ###################################################################### # deploy VPC when context is defined as using VPC ###################################################################### # vpc_type is 'new' or 'import' or None vpc_type = self.node.try_get_context('vpc_type') if vpc_type == 'new': is_vpc = True vpc_cidr = self.node.try_get_context('new_vpc_nw_cidr_block') subnet_cidr_mask = int( self.node.try_get_context('new_vpc_subnet_cidr_mask')) is_vpc = True # VPC vpc_aes_siem = aws_ec2.Vpc( self, 'VpcAesSiem', cidr=vpc_cidr, max_azs=3, nat_gateways=0, subnet_configuration=[ aws_ec2.SubnetConfiguration( subnet_type=aws_ec2.SubnetType.ISOLATED, name='aes-siem-subnet', cidr_mask=subnet_cidr_mask)]) subnet1 = vpc_aes_siem.isolated_subnets[0] subnets = [{'subnet_type': aws_ec2.SubnetType.ISOLATED}] vpc_subnets = aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.ISOLATED) vpc_aes_siem_opt = vpc_aes_siem.node.default_child.cfn_options vpc_aes_siem_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN for subnet in vpc_aes_siem.isolated_subnets: subnet_opt = subnet.node.default_child.cfn_options subnet_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN elif vpc_type == 'import': vpc_id = self.node.try_get_context('imported_vpc_id') vpc_aes_siem = aws_ec2.Vpc.from_lookup( self, 'VpcAesSiem', vpc_id=vpc_id) subnet_ids = get_subnet_ids(self) subnets = [] for number, subnet_id in enumerate(subnet_ids, 1): obj_id = 'Subenet' + str(number) subnet = aws_ec2.Subnet.from_subnet_id(self, obj_id, subnet_id) subnets.append(subnet) subnet1 = subnets[0] vpc_subnets = aws_ec2.SubnetSelection(subnets=subnets) if vpc_type: is_vpc = True # Security Group sg_vpc_noinbound_aes_siem = aws_ec2.SecurityGroup( self, 'AesSiemVpcNoinboundSecurityGroup', security_group_name='aes-siem-noinbound-vpc-sg', vpc=vpc_aes_siem) sg_vpc_aes_siem = aws_ec2.SecurityGroup( self, 'AesSiemVpcSecurityGroup', security_group_name='aes-siem-vpc-sg', vpc=vpc_aes_siem) sg_vpc_aes_siem.add_ingress_rule( peer=aws_ec2.Peer.ipv4(vpc_aes_siem.vpc_cidr_block), connection=aws_ec2.Port.tcp(443),) sg_vpc_opt = sg_vpc_aes_siem.node.default_child.cfn_options sg_vpc_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN # VPC Endpoint vpc_aes_siem.add_gateway_endpoint( 'S3Endpoint', service=aws_ec2.GatewayVpcEndpointAwsService.S3, subnets=subnets) vpc_aes_siem.add_interface_endpoint( 'SQSEndpoint', security_groups=[sg_vpc_aes_siem], service=aws_ec2.InterfaceVpcEndpointAwsService.SQS,) vpc_aes_siem.add_interface_endpoint( 'KMSEndpoint', security_groups=[sg_vpc_aes_siem], service=aws_ec2.InterfaceVpcEndpointAwsService.KMS,) else: is_vpc = False is_vpc = core.CfnCondition( self, 'IsVpc', expression=core.Fn.condition_equals(is_vpc, True)) """ CloudFormation実行時の条件式の書き方 ClassのBasesが aws_cdk.core.Resource の時は、 node.default_child.cfn_options.condition = is_vpc ClassのBasesが aws_cdk.core.CfnResource の時は、 cfn_options.condition = is_vpc """ ###################################################################### # create cmk of KMS to encrypt S3 bucket ###################################################################### kms_aes_siem = aws_kms.Key( self, 'KmsAesSiemLog', description='CMK for SIEM solution', removal_policy=core.RemovalPolicy.RETAIN) aws_kms.Alias( self, 'KmsAesSiemLogAlias', alias_name=kms_cmk_alias, target_key=kms_aes_siem, removal_policy=core.RemovalPolicy.RETAIN) kms_aes_siem.add_to_resource_policy( aws_iam.PolicyStatement( sid='Allow GuardDuty to use the key', actions=['kms:GenerateDataKey'], principals=[aws_iam.ServicePrincipal( 'guardduty.amazonaws.com')], resources=['*'],),) kms_aes_siem.add_to_resource_policy( aws_iam.PolicyStatement( sid='Allow VPC Flow Logs to use the key', actions=['kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*', 'kms:GenerateDataKey*', 'kms:DescribeKey'], principals=[aws_iam.ServicePrincipal( 'delivery.logs.amazonaws.com')], resources=['*'],),) # basic policy key_policy_basic1 = aws_iam.PolicyStatement( sid='Allow principals in the account to decrypt log files', actions=['kms:DescribeKey', 'kms:ReEncryptFrom'], principals=[aws_iam.AccountPrincipal( account_id=core.Aws.ACCOUNT_ID)], resources=['*'],) kms_aes_siem.add_to_resource_policy(key_policy_basic1) # for Athena key_policy_athena = aws_iam.PolicyStatement( sid='Allow Athena to query s3 objects with this key', actions=['kms:Decrypt', 'kms:DescribeKey', 'kms:Encrypt', 'kms:GenerateDataKey*', 'kms:ReEncrypt*'], principals=[aws_iam.AccountPrincipal( account_id=core.Aws.ACCOUNT_ID)], resources=['*'], conditions={'ForAnyValue:StringEquals': { 'aws:CalledVia': 'athena.amazonaws.com'}}) kms_aes_siem.add_to_resource_policy(key_policy_athena) # for CloudTrail key_policy_trail1 = aws_iam.PolicyStatement( sid='Allow CloudTrail to describe key', actions=['kms:DescribeKey'], principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], resources=['*'],) kms_aes_siem.add_to_resource_policy(key_policy_trail1) key_policy_trail2 = aws_iam.PolicyStatement( sid=('Allow CloudTrail to encrypt logs'), actions=['kms:GenerateDataKey*'], principals=[aws_iam.ServicePrincipal( 'cloudtrail.amazonaws.com')], resources=['*'], conditions={'StringLike': { 'kms:EncryptionContext:aws:cloudtrail:arn': [ f'arn:aws:cloudtrail:*:{core.Aws.ACCOUNT_ID}:trail/*']}}) kms_aes_siem.add_to_resource_policy(key_policy_trail2) ###################################################################### # create s3 bucket ###################################################################### block_pub = aws_s3.BlockPublicAccess( block_public_acls=True, ignore_public_acls=True, block_public_policy=True, restrict_public_buckets=True ) s3_geo = aws_s3.Bucket( self, 'S3BucketForGeoip', block_public_access=block_pub, bucket_name=s3bucket_name_geo, # removal_policy=core.RemovalPolicy.DESTROY, ) # create s3 bucket for log collector s3_log = aws_s3.Bucket( self, 'S3BucketForLog', block_public_access=block_pub, bucket_name=s3bucket_name_log, versioned=True, encryption=aws_s3.BucketEncryption.S3_MANAGED, # removal_policy=core.RemovalPolicy.DESTROY, ) # create s3 bucket for aes snapshot s3_snapshot = aws_s3.Bucket( self, 'S3BucketForSnapshot', block_public_access=block_pub, bucket_name=s3bucket_name_snapshot, # removal_policy=core.RemovalPolicy.DESTROY, ) ###################################################################### # IAM Role ###################################################################### # delopyment policy for lambda deploy-aes arn_prefix = f'arn:aws:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}' loggroup_aes = f'log-group:/aws/aes/domains/{aes_domain_name}/*' loggroup_opensearch = ( f'log-group:/aws/OpenSearchService/domains/{aes_domain_name}/*') loggroup_lambda = 'log-group:/aws/lambda/aes-siem-*' policydoc_create_loggroup = aws_iam.PolicyDocument( statements=[ aws_iam.PolicyStatement( actions=[ 'logs:PutResourcePolicy', 'logs:DescribeLogGroups', 'logs:DescribeLogStreams' ], resources=[f'{arn_prefix}:*', ] ), aws_iam.PolicyStatement( actions=[ 'logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:PutLogEvents', 'logs:PutRetentionPolicy'], resources=[ f'{arn_prefix}:{loggroup_aes}', f'{arn_prefix}:{loggroup_opensearch}', f'{arn_prefix}:{loggroup_lambda}', ], ) ] ) policydoc_crhelper = aws_iam.PolicyDocument( statements=[ aws_iam.PolicyStatement( actions=[ 'lambda:AddPermission', 'lambda:RemovePermission', 'events:ListRules', 'events:PutRule', 'events:DeleteRule', 'events:PutTargets', 'events:RemoveTargets'], resources=['*'] ) ] ) # snaphot rule for AES policydoc_snapshot = aws_iam.PolicyDocument( statements=[ aws_iam.PolicyStatement( actions=['s3:ListBucket'], resources=[s3_snapshot.bucket_arn] ), aws_iam.PolicyStatement( actions=['s3:GetObject', 's3:PutObject', 's3:DeleteObject'], resources=[s3_snapshot.bucket_arn + '/*'] ) ] ) aes_siem_snapshot_role = aws_iam.Role( self, 'AesSiemSnapshotRole', role_name='aes-siem-snapshot-role', inline_policies=[policydoc_snapshot, ], assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com') ) policydoc_assume_snapshrole = aws_iam.PolicyDocument( statements=[ aws_iam.PolicyStatement( actions=['iam:PassRole'], resources=[aes_siem_snapshot_role.role_arn] ), ] ) aes_siem_deploy_role_for_lambda = aws_iam.Role( self, 'AesSiemDeployRoleForLambda', role_name='aes-siem-deploy-role-for-lambda', managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonOpenSearchServiceFullAccess'), aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), ], inline_policies=[policydoc_assume_snapshrole, policydoc_snapshot, policydoc_create_loggroup, policydoc_crhelper], assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com') ) if vpc_type: aes_siem_deploy_role_for_lambda.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaVPCAccessExecutionRole') ) # for alert from Amazon OpenSearch Service aes_siem_sns_role = aws_iam.Role( self, 'AesSiemSnsRole', role_name='aes-siem-sns-role', assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com') ) # EC2 role aes_siem_es_loader_ec2_role = aws_iam.Role( self, 'AesSiemEsLoaderEC2Role', role_name='aes-siem-es-loader-for-ec2', assumed_by=aws_iam.ServicePrincipal('ec2.amazonaws.com'), ) aws_iam.CfnInstanceProfile( self, 'AesSiemEsLoaderEC2InstanceProfile', instance_profile_name=aes_siem_es_loader_ec2_role.role_name, roles=[aes_siem_es_loader_ec2_role.role_name] ) ###################################################################### # in VPC ###################################################################### aes_role_exist = check_iam_role('/aws-service-role/es.amazonaws.com/') if vpc_type and not aes_role_exist: slr_aes = aws_iam.CfnServiceLinkedRole( self, 'AWSServiceRoleForAmazonOpenSearchService', aws_service_name='es.amazonaws.com', description='Created by cloudformation of siem stack' ) slr_aes.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN ###################################################################### # SQS for es-laoder's DLQ ###################################################################### sqs_aes_siem_dlq = aws_sqs.Queue( self, 'AesSiemDlq', queue_name='aes-siem-dlq', retention_period=core.Duration.days(14)) sqs_aes_siem_splitted_logs = aws_sqs.Queue( self, 'AesSiemSqsSplitLogs', queue_name='aes-siem-sqs-splitted-logs', dead_letter_queue=aws_sqs.DeadLetterQueue( max_receive_count=2, queue=sqs_aes_siem_dlq), visibility_timeout=core.Duration.seconds(ES_LOADER_TIMEOUT), retention_period=core.Duration.days(14)) ###################################################################### # Setup Lambda ###################################################################### # setup lambda of es_loader lambda_es_loader_vpc_kwargs = {} if vpc_type: lambda_es_loader_vpc_kwargs = { 'security_group': sg_vpc_noinbound_aes_siem, 'vpc': vpc_aes_siem, 'vpc_subnets': vpc_subnets, } lambda_es_loader = aws_lambda.Function( self, 'LambdaEsLoader', **lambda_es_loader_vpc_kwargs, function_name='aes-siem-es-loader', description=f'{SOLUTION_NAME} / es-loader', runtime=aws_lambda.Runtime.PYTHON_3_8, architecture=aws_lambda.Architecture.X86_64, # architecture=region_mapping.find_in_map( # core.Aws.REGION, 'LambdaArm'), # code=aws_lambda.Code.asset('../lambda/es_loader.zip'), code=aws_lambda.Code.asset('../lambda/es_loader'), handler='index.lambda_handler', memory_size=2048, timeout=core.Duration.seconds(ES_LOADER_TIMEOUT), reserved_concurrent_executions=( reserved_concurrency.value_as_number), dead_letter_queue_enabled=True, dead_letter_queue=sqs_aes_siem_dlq, environment={ 'GEOIP_BUCKET': s3bucket_name_geo, 'LOG_LEVEL': 'info', 'POWERTOOLS_LOGGER_LOG_EVENT': 'false', 'POWERTOOLS_SERVICE_NAME': 'es-loader', 'POWERTOOLS_METRICS_NAMESPACE': 'SIEM'}) es_loader_newver = lambda_es_loader.add_version( name=__version__, description=__version__) es_loader_opt = es_loader_newver.node.default_child.cfn_options es_loader_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN # send only # sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage') # send and reieve. but it must be loop sqs_aes_siem_dlq.grant( lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage', 'sqs:DeleteMessage', 'sqs:GetQueueAttributes') sqs_aes_siem_splitted_logs.grant( lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage', 'sqs:DeleteMessage', 'sqs:GetQueueAttributes') lambda_es_loader.add_event_source( aws_lambda_event_sources.SqsEventSource( sqs_aes_siem_splitted_logs, batch_size=1)) # es-loaer on EC2 role sqs_aes_siem_dlq.grant( aes_siem_es_loader_ec2_role, 'sqs:GetQueue*', 'sqs:ListQueues*', 'sqs:ReceiveMessage*', 'sqs:DeleteMessage*') lambda_geo = aws_lambda.Function( self, 'LambdaGeoipDownloader', function_name='aes-siem-geoip-downloader', description=f'{SOLUTION_NAME} / geoip-downloader', runtime=aws_lambda.Runtime.PYTHON_3_8, architecture=aws_lambda.Architecture.X86_64, # architecture=region_mapping.find_in_map( # core.Aws.REGION, 'LambdaArm'), code=aws_lambda.Code.asset('../lambda/geoip_downloader'), handler='index.lambda_handler', memory_size=320, timeout=core.Duration.seconds(300), environment={ 's3bucket_name': s3bucket_name_geo, 'license_key': geoip_license_key.value_as_string, } ) lambda_geo_newver = lambda_geo.add_version( name=__version__, description=__version__) lamba_geo_opt = lambda_geo_newver.node.default_child.cfn_options lamba_geo_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN ###################################################################### # setup OpenSearch Service ###################################################################### lambda_deploy_es = aws_lambda.Function( self, 'LambdaDeployAES', function_name='aes-siem-deploy-aes', description=f'{SOLUTION_NAME} / opensearch domain deployment', runtime=aws_lambda.Runtime.PYTHON_3_8, architecture=aws_lambda.Architecture.X86_64, # architecture=region_mapping.find_in_map( # core.Aws.REGION, 'LambdaArm'), # code=aws_lambda.Code.asset('../lambda/deploy_es.zip'), code=aws_lambda.Code.asset('../lambda/deploy_es'), handler='index.aes_domain_handler', memory_size=128, timeout=core.Duration.seconds(300), environment={ 'accountid': core.Aws.ACCOUNT_ID, 'aes_domain_name': aes_domain_name, 'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn, 'es_loader_role': lambda_es_loader.role.role_arn, 'allow_source_address': allow_source_address.value_as_string, }, role=aes_siem_deploy_role_for_lambda, ) lambda_deploy_es.add_environment( 's3_snapshot', s3_snapshot.bucket_name) if vpc_type: lambda_deploy_es.add_environment( 'vpc_subnet_id', subnet1.subnet_id) lambda_deploy_es.add_environment( 'security_group_id', sg_vpc_aes_siem.security_group_id) else: lambda_deploy_es.add_environment('vpc_subnet_id', 'None') lambda_deploy_es.add_environment('security_group_id', 'None') deploy_es_newver = lambda_deploy_es.add_version( name=__version__, description=__version__) deploy_es_opt = deploy_es_newver.node.default_child.cfn_options deploy_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN # execute lambda_deploy_es to deploy Amaozon ES Domain aes_domain = aws_cloudformation.CfnCustomResource( self, 'AesSiemDomainDeployedR2', service_token=lambda_deploy_es.function_arn,) aes_domain.add_override('Properties.ConfigVersion', __version__) es_endpoint = aes_domain.get_att('es_endpoint').to_string() lambda_es_loader.add_environment('ES_ENDPOINT', es_endpoint) lambda_es_loader.add_environment( 'SQS_SPLITTED_LOGS_URL', sqs_aes_siem_splitted_logs.queue_url) lambda_configure_es_vpc_kwargs = {} if vpc_type: lambda_configure_es_vpc_kwargs = { 'security_group': sg_vpc_noinbound_aes_siem, 'vpc': vpc_aes_siem, 'vpc_subnets': aws_ec2.SubnetSelection(subnets=[subnet1, ]), } lambda_configure_es = aws_lambda.Function( self, 'LambdaConfigureAES', **lambda_configure_es_vpc_kwargs, function_name='aes-siem-configure-aes', description=f'{SOLUTION_NAME} / opensearch configuration', runtime=aws_lambda.Runtime.PYTHON_3_8, architecture=aws_lambda.Architecture.X86_64, # architecture=region_mapping.find_in_map( # core.Aws.REGION, 'LambdaArm'), code=aws_lambda.Code.asset('../lambda/deploy_es'), handler='index.aes_config_handler', memory_size=128, timeout=core.Duration.seconds(300), environment={ 'accountid': core.Aws.ACCOUNT_ID, 'aes_domain_name': aes_domain_name, 'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn, 'es_loader_role': lambda_es_loader.role.role_arn, 'allow_source_address': allow_source_address.value_as_string, 'es_endpoint': es_endpoint, }, role=aes_siem_deploy_role_for_lambda, ) lambda_configure_es.add_environment( 's3_snapshot', s3_snapshot.bucket_name) if vpc_type: lambda_configure_es.add_environment( 'vpc_subnet_id', subnet1.subnet_id) lambda_configure_es.add_environment( 'security_group_id', sg_vpc_aes_siem.security_group_id) else: lambda_configure_es.add_environment('vpc_subnet_id', 'None') lambda_configure_es.add_environment('security_group_id', 'None') configure_es_newver = lambda_configure_es.add_version( name=__version__, description=__version__) configure_es_opt = configure_es_newver.node.default_child.cfn_options configure_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN aes_config = aws_cloudformation.CfnCustomResource( self, 'AesSiemDomainConfiguredR2', service_token=lambda_configure_es.function_arn,) aes_config.add_override('Properties.ConfigVersion', __version__) aes_config.add_depends_on(aes_domain) aes_config.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN es_arn = (f'arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}' f':domain/{aes_domain_name}') # grant permission to es_loader role inline_policy_to_load_entries_into_es = aws_iam.Policy( self, 'aes-siem-policy-to-load-entries-to-es', policy_name='aes-siem-policy-to-load-entries-to-es', statements=[ aws_iam.PolicyStatement( actions=['es:*'], resources=[es_arn + '/*', ]), ] ) lambda_es_loader.role.attach_inline_policy( inline_policy_to_load_entries_into_es) aes_siem_es_loader_ec2_role.attach_inline_policy( inline_policy_to_load_entries_into_es) # grant additional permission to es_loader role additional_kms_cmks = self.node.try_get_context('additional_kms_cmks') if additional_kms_cmks: inline_policy_access_to_additional_cmks = aws_iam.Policy( self, 'access_to_additional_cmks', policy_name='access_to_additional_cmks', statements=[ aws_iam.PolicyStatement( actions=['kms:Decrypt'], resources=sorted(set(additional_kms_cmks)) ) ] ) lambda_es_loader.role.attach_inline_policy( inline_policy_access_to_additional_cmks) aes_siem_es_loader_ec2_role.attach_inline_policy( inline_policy_access_to_additional_cmks) additional_buckets = self.node.try_get_context('additional_s3_buckets') if additional_buckets: buckets_list = [] for bucket in additional_buckets: buckets_list.append(f'arn:aws:s3:::{bucket}') buckets_list.append(f'arn:aws:s3:::{bucket}/*') inline_policy_access_to_additional_buckets = aws_iam.Policy( self, 'access_to_additional_buckets', policy_name='access_to_additional_buckets', statements=[ aws_iam.PolicyStatement( actions=['s3:GetObject*', 's3:GetBucket*', 's3:List*'], resources=sorted(set(buckets_list)) ) ] ) lambda_es_loader.role.attach_inline_policy( inline_policy_access_to_additional_buckets) aes_siem_es_loader_ec2_role.attach_inline_policy( inline_policy_access_to_additional_buckets) kms_aes_siem.grant_decrypt(lambda_es_loader) kms_aes_siem.grant_decrypt(aes_siem_es_loader_ec2_role) ###################################################################### # s3 notification and grant permisssion ###################################################################### s3_geo.grant_read_write(lambda_geo) s3_geo.grant_read(lambda_es_loader) s3_geo.grant_read(aes_siem_es_loader_ec2_role) s3_log.grant_read(lambda_es_loader) s3_log.grant_read(aes_siem_es_loader_ec2_role) # create s3 notification for es_loader notification = aws_s3_notifications.LambdaDestination(lambda_es_loader) # assign notification for the s3 PUT event type # most log system use PUT, but also CLB use POST & Multipart Upload s3_log.add_event_notification( aws_s3.EventType.OBJECT_CREATED, notification, aws_s3.NotificationKeyFilter(prefix='AWSLogs/')) # For user logs, not AWS logs s3_log.add_event_notification( aws_s3.EventType.OBJECT_CREATED, notification, aws_s3.NotificationKeyFilter(prefix='UserLogs/')) # Download geoip to S3 once by executing lambda_geo get_geodb = aws_cloudformation.CfnCustomResource( self, 'ExecLambdaGeoipDownloader', service_token=lambda_geo.function_arn,) get_geodb.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN # Download geoip every day at 6PM UTC rule = aws_events.Rule( self, 'CwlRuleLambdaGeoipDownloaderDilly', schedule=aws_events.Schedule.rate(core.Duration.hours(12))) rule.add_target(aws_events_targets.LambdaFunction(lambda_geo)) ###################################################################### # bucket policy ###################################################################### s3_awspath = s3_log.bucket_arn + '/AWSLogs/' + core.Aws.ACCOUNT_ID bucket_policy_common1 = aws_iam.PolicyStatement( sid='ELB Policy', principals=[aws_iam.AccountPrincipal( account_id=region_mapping.find_in_map( core.Aws.REGION, 'ElbV2AccountId'))], actions=['s3:PutObject'], resources=[s3_awspath + '/*'],) # NLB / ALB / R53resolver / VPC Flow Logs bucket_policy_elb1 = aws_iam.PolicyStatement( sid='AWSLogDeliveryAclCheck For ALB NLB R53Resolver Flowlogs', principals=[aws_iam.ServicePrincipal( 'delivery.logs.amazonaws.com')], actions=['s3:GetBucketAcl', 's3:ListBucket'], resources=[s3_log.bucket_arn],) bucket_policy_elb2 = aws_iam.PolicyStatement( sid='AWSLogDeliveryWrite For ALB NLB R53Resolver Flowlogs', principals=[aws_iam.ServicePrincipal( 'delivery.logs.amazonaws.com')], actions=['s3:PutObject'], resources=[s3_awspath + '/*'], conditions={ 'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}}) s3_log.add_to_resource_policy(bucket_policy_common1) s3_log.add_to_resource_policy(bucket_policy_elb1) s3_log.add_to_resource_policy(bucket_policy_elb2) # CloudTrail bucket_policy_trail1 = aws_iam.PolicyStatement( sid='AWSLogDeliveryAclCheck For Cloudtrail', principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], actions=['s3:GetBucketAcl'], resources=[s3_log.bucket_arn],) bucket_policy_trail2 = aws_iam.PolicyStatement( sid='AWSLogDeliveryWrite For CloudTrail', principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], actions=['s3:PutObject'], resources=[s3_awspath + '/*'], conditions={ 'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}}) s3_log.add_to_resource_policy(bucket_policy_trail1) s3_log.add_to_resource_policy(bucket_policy_trail2) # GuardDuty bucket_policy_gd1 = aws_iam.PolicyStatement( sid='Allow GuardDuty to use the getBucketLocation operation', principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')], actions=['s3:GetBucketLocation'], resources=[s3_log.bucket_arn],) bucket_policy_gd2 = aws_iam.PolicyStatement( sid='Allow GuardDuty to upload objects to the bucket', principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')], actions=['s3:PutObject'], resources=[s3_log.bucket_arn + '/*'],) bucket_policy_gd5 = aws_iam.PolicyStatement( sid='Deny non-HTTPS access', effect=aws_iam.Effect.DENY, actions=['s3:*'], resources=[s3_log.bucket_arn + '/*'], conditions={'Bool': {'aws:SecureTransport': 'false'}}) bucket_policy_gd5.add_any_principal() s3_log.add_to_resource_policy(bucket_policy_gd1) s3_log.add_to_resource_policy(bucket_policy_gd2) s3_log.add_to_resource_policy(bucket_policy_gd5) # Config bucket_policy_config1 = aws_iam.PolicyStatement( sid='AWSConfig BucketPermissionsCheck and BucketExistenceCheck', principals=[aws_iam.ServicePrincipal('config.amazonaws.com')], actions=['s3:GetBucketAcl', 's3:ListBucket'], resources=[s3_log.bucket_arn],) bucket_policy_config2 = aws_iam.PolicyStatement( sid='AWSConfigBucketDelivery', principals=[aws_iam.ServicePrincipal('config.amazonaws.com')], actions=['s3:PutObject'], resources=[s3_awspath + '/Config/*'], conditions={ 'StringEquals': {'s3:x-amz-acl': 'bucket-owner-full-control'}}) s3_log.add_to_resource_policy(bucket_policy_config1) s3_log.add_to_resource_policy(bucket_policy_config2) # geoip bucket_policy_geo1 = aws_iam.PolicyStatement( sid='Allow geoip downloader and es-loader to read/write', principals=[lambda_es_loader.role, lambda_geo.role], actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'], resources=[s3_geo.bucket_arn + '/*'],) s3_geo.add_to_resource_policy(bucket_policy_geo1) # ES Snapshot bucket_policy_snapshot = aws_iam.PolicyStatement( sid='Allow ES to store snapshot', principals=[aes_siem_snapshot_role], actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'], resources=[s3_snapshot.bucket_arn + '/*'],) s3_snapshot.add_to_resource_policy(bucket_policy_snapshot) ###################################################################### # for multiaccount / organizaitons ###################################################################### if org_id or no_org_ids: ################################################################## # KMS key policy for multiaccount / organizaitons ################################################################## # for CloudTrail cond_tail2 = self.make_resource_list( path='arn:aws:cloudtrail:*:', tail=':trail/*', keys=self.list_without_none(org_mgmt_id, no_org_ids)) key_policy_mul_trail2 = aws_iam.PolicyStatement( sid=('Allow CloudTrail to encrypt logs for multiaccounts'), actions=['kms:GenerateDataKey*'], principals=[aws_iam.ServicePrincipal( 'cloudtrail.amazonaws.com')], resources=['*'], conditions={'StringLike': { 'kms:EncryptionContext:aws:cloudtrail:arn': cond_tail2}}) kms_aes_siem.add_to_resource_policy(key_policy_mul_trail2) # for replicaiton key_policy_rep1 = aws_iam.PolicyStatement( sid=('Enable cross account encrypt access for S3 Cross Region ' 'Replication'), actions=['kms:Encrypt'], principals=self.make_account_principals( org_mgmt_id, org_member_ids, no_org_ids), resources=['*'],) kms_aes_siem.add_to_resource_policy(key_policy_rep1) ################################################################## # Buckdet Policy for multiaccount / organizaitons ################################################################## s3_log_bucket_arn = 'arn:aws:s3:::' + s3bucket_name_log # for CloudTrail s3_mulpaths = self.make_resource_list( path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/*', keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids)) bucket_policy_org_trail = aws_iam.PolicyStatement( sid='AWSCloudTrailWrite for Multiaccounts / Organizations', principals=[ aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], actions=['s3:PutObject'], resources=s3_mulpaths, conditions={'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control'}}) s3_log.add_to_resource_policy(bucket_policy_org_trail) # config s3_conf_multpaths = self.make_resource_list( path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/Config/*', keys=self.list_without_none(org_id, org_mgmt_id, no_org_ids)) bucket_policy_mul_config2 = aws_iam.PolicyStatement( sid='AWSConfigBucketDelivery', principals=[aws_iam.ServicePrincipal('config.amazonaws.com')], actions=['s3:PutObject'], resources=s3_conf_multpaths, conditions={'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control'}}) s3_log.add_to_resource_policy(bucket_policy_mul_config2) # for replication bucket_policy_rep1 = aws_iam.PolicyStatement( sid='PolicyForDestinationBucket / Permissions on objects', principals=self.make_account_principals( org_mgmt_id, org_member_ids, no_org_ids), actions=['s3:ReplicateDelete', 's3:ReplicateObject', 's3:ReplicateTags', 's3:GetObjectVersionTagging', 's3:ObjectOwnerOverrideToBucketOwner'], resources=[f'{s3_log_bucket_arn}/*']) bucket_policy_rep2 = aws_iam.PolicyStatement( sid='PolicyForDestinationBucket / Permissions on bucket', principals=self.make_account_principals( org_mgmt_id, org_member_ids, no_org_ids), actions=['s3:List*', 's3:GetBucketVersioning', 's3:PutBucketVersioning'], resources=[f'{s3_log_bucket_arn}']) s3_log.add_to_resource_policy(bucket_policy_rep1) s3_log.add_to_resource_policy(bucket_policy_rep2) ###################################################################### # SNS topic for Amazon OpenSearch Service Alert ###################################################################### sns_topic = aws_sns.Topic( self, 'SnsTopic', topic_name='aes-siem-alert', display_name='AES SIEM') sns_topic.add_subscription(aws_sns_subscriptions.EmailSubscription( email_address=sns_email.value_as_string)) sns_topic.grant_publish(aes_siem_sns_role) ###################################################################### # output of CFn ###################################################################### kibanaurl = f'https://{es_endpoint}/_dashboards/' kibanaadmin = aes_domain.get_att('kibanaadmin').to_string() kibanapass = aes_domain.get_att('kibanapass').to_string() core.CfnOutput(self, 'RoleDeploy', export_name='role-deploy', value=aes_siem_deploy_role_for_lambda.role_arn) core.CfnOutput(self, 'DashboardsUrl', export_name='dashboards-url', value=kibanaurl) core.CfnOutput(self, 'DashboardsPassword', export_name='dashboards-pass', value=kibanapass, description=('Please change the password in OpenSearch ' 'Dashboards ASAP')) core.CfnOutput(self, 'DashboardsAdminID', export_name='dashboards-admin', value=kibanaadmin)
def __init__(self, scope: core.Construct, id: str, *, multi_account=False, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Get stack parameters: notification_email = create_notification_email_parameter(self) git_address = create_git_address_parameter(self) # Get the optional S3 assets bucket to use existing_bucket = create_existing_bucket_parameter(self) # Get the optional S3 assets bucket to use existing_ecr_repo = create_existing_ecr_repo_parameter(self) # create only if multi_account template if multi_account: # create development parameters account_type = "development" dev_account_id = create_account_id_parameter( self, "DEV_ACCOUNT_ID", account_type) dev_org_id = create_org_id_parameter(self, "DEV_ORG_ID", account_type) # create staging parameters account_type = "staging" staging_account_id = create_account_id_parameter( self, "STAGING_ACCOUNT_ID", account_type) staging_org_id = create_org_id_parameter(self, "STAGING_ORG_ID", account_type) # create production parameters account_type = "production" prod_account_id = create_account_id_parameter( self, "PROD_ACCOUNT_ID", account_type) prod_org_id = create_org_id_parameter(self, "PROD_ORG_ID", account_type) # Conditions git_address_provided = create_git_address_provided_condition( self, git_address) # client provided an existing S3 bucket name, to be used for assets existing_bucket_provided = create_existing_bucket_provided_condition( self, existing_bucket) # client provided an existing Amazon ECR name existing_ecr_provided = create_existing_ecr_provided_condition( self, existing_ecr_repo) # S3 bucket needs to be created for assets create_new_bucket = create_new_bucket_condition(self, existing_bucket) # Amazon ECR repo needs too be created for custom Algorithms create_new_ecr_repo = create_new_ecr_repo_condition( self, existing_ecr_repo) # Constants pipeline_stack_name = "mlops-pipeline" # CDK Resources setup access_logs_bucket = s3.Bucket( self, "accessLogs", encryption=s3.BucketEncryption.S3_MANAGED, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transfer bucket policy apply_secure_bucket_policy(access_logs_bucket) # This is a logging bucket. access_logs_bucket.node.default_child.cfn_options.metadata = suppress_s3_access_policy( ) # Import user provide S3 bucket, if any. s3.Bucket.from_bucket_arn is used instead of # s3.Bucket.from_bucket_name to allow cross account bucket. client_existing_bucket = s3.Bucket.from_bucket_arn( self, "ClientExistingBucket", f"arn:aws:s3:::{existing_bucket.value_as_string.strip()}", ) # Create the resource if existing_bucket_provided condition is True core.Aspects.of(client_existing_bucket).add( ConditionalResources(existing_bucket_provided)) # Import user provided Amazon ECR repository client_erc_repo = ecr.Repository.from_repository_name( self, "ClientExistingECRReo", existing_ecr_repo.value_as_string) # Create the resource if existing_ecr_provided condition is True core.Aspects.of(client_erc_repo).add( ConditionalResources(existing_ecr_provided)) # Creating assets bucket so that users can upload ML Models to it. assets_bucket = s3.Bucket( self, "pipeline-assets-" + str(uuid.uuid4()), versioned=True, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix="assets_bucket_access_logs", block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transport bucket policy apply_secure_bucket_policy(assets_bucket) s3_actions = ["s3:GetObject", "s3:ListBucket"] # if multi account if multi_account: # add permissions for other accounts to access the assets bucket assets_bucket.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=s3_actions, principals=[ iam.AccountPrincipal(dev_account_id.value_as_string), iam.AccountPrincipal( staging_account_id.value_as_string), iam.AccountPrincipal(prod_account_id.value_as_string), ], resources=[ assets_bucket.bucket_arn, f"{assets_bucket.bucket_arn}/*" ], )) # Create the resource if create_new_bucket condition is True core.Aspects.of(assets_bucket).add( ConditionalResources(create_new_bucket)) # Get assets S3 bucket's name/arn, based on the condition assets_s3_bucket_name = core.Fn.condition_if( existing_bucket_provided.logical_id, client_existing_bucket.bucket_name, assets_bucket.bucket_name, ).to_string() # Creating Amazon ECR repository ecr_repo = ecr.Repository(self, "ECRRepo", image_scan_on_push=True) # if multi account if multi_account: # add permissios to other account to pull images ecr_repo.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ecr:DescribeImages", "ecr:DescribeRepositories", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", ], principals=[ iam.AccountPrincipal(dev_account_id.value_as_string), iam.AccountPrincipal( staging_account_id.value_as_string), iam.AccountPrincipal(prod_account_id.value_as_string), ], )) # Create the resource if create_new_ecr condition is True core.Aspects.of(ecr_repo).add( ConditionalResources(create_new_ecr_repo)) # Get ECR repo's name based on the condition ecr_repo_name = core.Fn.condition_if( existing_ecr_provided.logical_id, client_erc_repo.repository_name, ecr_repo.repository_name, ).to_string() # Get ECR repo's arn based on the condition ecr_repo_arn = core.Fn.condition_if( existing_ecr_provided.logical_id, client_erc_repo.repository_arn, ecr_repo.repository_arn, ).to_string() blueprints_bucket_name = "blueprint-repository-" + str(uuid.uuid4()) blueprint_repository_bucket = s3.Bucket( self, blueprints_bucket_name, encryption=s3.BucketEncryption.S3_MANAGED, server_access_logs_bucket=access_logs_bucket, server_access_logs_prefix=blueprints_bucket_name, block_public_access=s3.BlockPublicAccess.BLOCK_ALL, ) # Apply secure transport bucket policy apply_secure_bucket_policy(blueprint_repository_bucket) # if multi account if multi_account: # add permissions for other accounts to access the blueprint bucket blueprint_repository_bucket.add_to_resource_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=s3_actions, principals=[ iam.AccountPrincipal(dev_account_id.value_as_string), iam.AccountPrincipal( staging_account_id.value_as_string), iam.AccountPrincipal(prod_account_id.value_as_string), ], resources=[ blueprint_repository_bucket.bucket_arn, f"{blueprint_repository_bucket.bucket_arn}/*" ], )) # Custom resource to copy source bucket content to blueprints bucket custom_resource_lambda_fn = lambda_.Function( self, "CustomResourceLambda", code=lambda_.Code.from_asset("lambdas/custom_resource"), handler="index.on_event", runtime=lambda_.Runtime.PYTHON_3_8, environment={ "source_bucket": "https://%%BUCKET_NAME%%-" + core.Aws.REGION + ".s3.amazonaws.com/%%SOLUTION_NAME%%/%%VERSION%%", "destination_bucket": blueprint_repository_bucket.bucket_name, "LOG_LEVEL": "INFO", }, timeout=core.Duration.seconds(60), ) custom_resource_lambda_fn.node.default_child.cfn_options.metadata = suppress_lambda_policies( ) blueprint_repository_bucket.grant_write(custom_resource_lambda_fn) custom_resource = core.CustomResource( self, "CustomResourceCopyAssets", service_token=custom_resource_lambda_fn.function_arn, ) custom_resource.node.add_dependency(blueprint_repository_bucket) # IAM policies setup ### cloudformation_role = iam.Role( self, "mlopscloudformationrole", assumed_by=iam.ServicePrincipal("cloudformation.amazonaws.com"), ) lambda_invoke_action = "lambda:InvokeFunction" # Cloudformation policy setup orchestrator_policy = iam.Policy( self, "lambdaOrchestratorPolicy", statements=[ iam.PolicyStatement( actions=[ "cloudformation:CreateStack", "cloudformation:DeleteStack", "cloudformation:UpdateStack", "cloudformation:ListStackResources", ], resources=[ (f"arn:{core.Aws.PARTITION}:cloudformation:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:stack/{pipeline_stack_name}*/*" ), ], ), iam.PolicyStatement( actions=[ "iam:CreateRole", "iam:DeleteRole", "iam:DeleteRolePolicy", "iam:GetRole", "iam:GetRolePolicy", "iam:PassRole", "iam:PutRolePolicy", "iam:AttachRolePolicy", "iam:DetachRolePolicy", ], resources=[ f"arn:{core.Aws.PARTITION}:iam::{core.Aws.ACCOUNT_ID}:role/{pipeline_stack_name}*" ], ), iam.PolicyStatement( actions=[ "ecr:CreateRepository", "ecr:DescribeRepositories", ], resources=[ (f"arn:{core.Aws.PARTITION}:ecr:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:repository/{ecr_repo_name}") ], ), iam.PolicyStatement( actions=[ "codebuild:CreateProject", "codebuild:DeleteProject", "codebuild:BatchGetProjects", ], resources=[ (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:project/ContainerFactory*"), (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:project/VerifySagemaker*"), (f"arn:{core.Aws.PARTITION}:codebuild:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:report-group/*"), ], ), iam.PolicyStatement( actions=[ "lambda:CreateFunction", "lambda:DeleteFunction", lambda_invoke_action, "lambda:PublishLayerVersion", "lambda:DeleteLayerVersion", "lambda:GetLayerVersion", "lambda:GetFunctionConfiguration", "lambda:GetFunction", "lambda:AddPermission", "lambda:RemovePermission", "lambda:UpdateFunctionConfiguration", ], resources=[ f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:layer:*", f"arn:{core.Aws.PARTITION}:lambda:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:function:*", ], ), iam.PolicyStatement( actions=s3_actions, resources=[ blueprint_repository_bucket.bucket_arn, blueprint_repository_bucket.arn_for_objects("*"), f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*", ], ), iam.PolicyStatement( actions=[ "codepipeline:CreatePipeline", "codepipeline:UpdatePipeline", "codepipeline:DeletePipeline", "codepipeline:GetPipeline", "codepipeline:GetPipelineState", ], resources= [(f"arn:{core.Aws.PARTITION}:codepipeline:{core.Aws.REGION}:" f"{core.Aws.ACCOUNT_ID}:{pipeline_stack_name}*")], ), iam.PolicyStatement( actions=[ "apigateway:POST", "apigateway:PATCH", "apigateway:DELETE", "apigateway:GET", "apigateway:PUT", ], resources=[ f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis/*", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/restapis", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/account", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans", f"arn:{core.Aws.PARTITION}:apigateway:{core.Aws.REGION}::/usageplans/*", ], ), iam.PolicyStatement( actions=[ "logs:CreateLogGroup", "logs:DescribeLogGroups", ], resources=[ f"arn:{core.Aws.PARTITION}:logs:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:log-group:*", ], ), iam.PolicyStatement( actions=[ "s3:CreateBucket", "s3:PutEncryptionConfiguration", "s3:PutBucketVersioning", "s3:PutBucketPublicAccessBlock", "s3:PutBucketLogging", ], resources=[f"arn:{core.Aws.PARTITION}:s3:::*"], ), iam.PolicyStatement( actions=[ "s3:PutObject", ], resources=[ f"arn:{core.Aws.PARTITION}:s3:::{assets_s3_bucket_name}/*" ], ), iam.PolicyStatement( actions=[ "sns:CreateTopic", "sns:DeleteTopic", "sns:Subscribe", "sns:Unsubscribe", "sns:GetTopicAttributes", "sns:SetTopicAttributes", ], resources= [(f"arn:{core.Aws.PARTITION}:sns:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:" f"{pipeline_stack_name}*-*PipelineNotification*")], ), iam.PolicyStatement( actions=[ "events:PutRule", "events:DescribeRule", "events:PutTargets", "events:RemoveTargets", "events:DeleteRule", "events:PutEvents", ], resources=[ f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:rule/*", f"arn:{core.Aws.PARTITION}:events:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}:event-bus/*", ], ), ], ) orchestrator_policy.attach_to_role(cloudformation_role) # Lambda function IAM setup lambda_passrole_policy = iam.PolicyStatement( actions=["iam:passrole"], resources=[cloudformation_role.role_arn]) # create sagemaker layer sm_layer = sagemaker_layer(self, blueprint_repository_bucket) # make sure the sagemaker code is uploaded first to the blueprints bucket sm_layer.node.add_dependency(custom_resource) # API Gateway and lambda setup to enable provisioning pipelines through API calls provisioner_apigw_lambda = aws_apigateway_lambda.ApiGatewayToLambda( self, "PipelineOrchestration", lambda_function_props={ "runtime": lambda_.Runtime.PYTHON_3_8, "handler": "index.handler", "code": lambda_.Code.from_asset("lambdas/pipeline_orchestration"), "layers": [sm_layer], "timeout": core.Duration.minutes(10), }, api_gateway_props={ "defaultMethodOptions": { "authorizationType": apigw.AuthorizationType.IAM, }, "restApiName": f"{core.Aws.STACK_NAME}-orchestrator", "proxy": False, "dataTraceEnabled": True, }, ) # add lambda supressions provisioner_apigw_lambda.lambda_function.node.default_child.cfn_options.metadata = suppress_lambda_policies( ) provision_resource = provisioner_apigw_lambda.api_gateway.root.add_resource( "provisionpipeline") provision_resource.add_method("POST") status_resource = provisioner_apigw_lambda.api_gateway.root.add_resource( "pipelinestatus") status_resource.add_method("POST") blueprint_repository_bucket.grant_read( provisioner_apigw_lambda.lambda_function) provisioner_apigw_lambda.lambda_function.add_to_role_policy( lambda_passrole_policy) orchestrator_policy.attach_to_role( provisioner_apigw_lambda.lambda_function.role) # Environment variables setup provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET_URL", value=str(blueprint_repository_bucket.bucket_regional_domain_name), ) provisioner_apigw_lambda.lambda_function.add_environment( key="BLUEPRINT_BUCKET", value=str(blueprint_repository_bucket.bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="ACCESS_BUCKET", value=str(access_logs_bucket.bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="ASSETS_BUCKET", value=str(assets_s3_bucket_name)) provisioner_apigw_lambda.lambda_function.add_environment( key="CFN_ROLE_ARN", value=str(cloudformation_role.role_arn)) provisioner_apigw_lambda.lambda_function.add_environment( key="PIPELINE_STACK_NAME", value=pipeline_stack_name) provisioner_apigw_lambda.lambda_function.add_environment( key="NOTIFICATION_EMAIL", value=notification_email.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="REGION", value=core.Aws.REGION) provisioner_apigw_lambda.lambda_function.add_environment( key="IS_MULTI_ACCOUNT", value=str(multi_account)) # if multi account if multi_account: provisioner_apigw_lambda.lambda_function.add_environment( key="DEV_ACCOUNT_ID", value=dev_account_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="DEV_ORG_ID", value=dev_org_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="STAGING_ACCOUNT_ID", value=staging_account_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="STAGING_ORG_ID", value=staging_org_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="PROD_ACCOUNT_ID", value=prod_account_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="PROD_ORG_ID", value=prod_org_id.value_as_string) provisioner_apigw_lambda.lambda_function.add_environment( key="ECR_REPO_NAME", value=ecr_repo_name) provisioner_apigw_lambda.lambda_function.add_environment( key="ECR_REPO_ARN", value=ecr_repo_arn) provisioner_apigw_lambda.lambda_function.add_environment( key="LOG_LEVEL", value="DEBUG") cfn_policy_for_lambda = orchestrator_policy.node.default_child cfn_policy_for_lambda.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [{ "id": "W76", "reason": "A complex IAM policy is required for this resource.", }] } } # Codepipeline with Git source definitions ### source_output = codepipeline.Artifact() # processing git_address to retrieve repo name repo_name_split = core.Fn.split("/", git_address.value_as_string) repo_name = core.Fn.select(5, repo_name_split) # getting codecommit repo cdk object using 'from_repository_name' repo = codecommit.Repository.from_repository_name( self, "AWSMLOpsFrameworkRepository", repo_name) codebuild_project = codebuild.PipelineProject( self, "Take config file", build_spec=codebuild.BuildSpec.from_object({ "version": "0.2", "phases": { "build": { "commands": [ "ls -a", "aws lambda invoke --function-name " + provisioner_apigw_lambda.lambda_function. function_name + " --payload fileb://mlops-config.json response.json" + " --invocation-type RequestResponse", ] } }, }), ) # Defining a Codepipeline project with CodeCommit as source codecommit_pipeline = codepipeline.Pipeline( self, "MLOpsCodeCommitPipeline", stages=[ codepipeline.StageProps( stage_name="Source", actions=[ codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit", repository=repo, branch="main", output=source_output, ) ], ), codepipeline.StageProps( stage_name="TakeConfig", actions=[ codepipeline_actions.CodeBuildAction( action_name="provision_pipeline", input=source_output, outputs=[], project=codebuild_project, ) ], ), ], cross_account_keys=False, ) codecommit_pipeline.add_to_role_policy( iam.PolicyStatement( actions=[lambda_invoke_action], resources=[ provisioner_apigw_lambda.lambda_function.function_arn ], )) codebuild_project.add_to_role_policy( iam.PolicyStatement( actions=[lambda_invoke_action], resources=[ provisioner_apigw_lambda.lambda_function.function_arn ], )) pipeline_child_nodes = codecommit_pipeline.node.find_all() pipeline_child_nodes[1].node.default_child.cfn_options.metadata = { "cfn_nag": { "rules_to_suppress": [ { "id": "W35", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, { "id": "W51", "reason": "This is a managed bucket generated by CDK for codepipeline.", }, ] } } # custom resource for operational metrics### metrics_mapping = core.CfnMapping( self, "AnonymousData", mapping={"SendAnonymousData": { "Data": "Yes" }}) metrics_condition = core.CfnCondition( self, "AnonymousDatatoAWS", expression=core.Fn.condition_equals( metrics_mapping.find_in_map("SendAnonymousData", "Data"), "Yes"), ) helper_function = lambda_.Function( self, "SolutionHelper", code=lambda_.Code.from_asset("lambdas/solution_helper"), handler="lambda_function.handler", runtime=lambda_.Runtime.PYTHON_3_8, timeout=core.Duration.seconds(60), ) helper_function.node.default_child.cfn_options.metadata = suppress_lambda_policies( ) create_id_function = core.CustomResource( self, "CreateUniqueID", service_token=helper_function.function_arn, properties={"Resource": "UUID"}, resource_type="Custom::CreateUUID", ) send_data_function = core.CustomResource( self, "SendAnonymousData", service_token=helper_function.function_arn, properties={ "Resource": "AnonymousMetric", "UUID": create_id_function.get_att_string("UUID"), "gitSelected": git_address.value_as_string, "Region": core.Aws.REGION, "SolutionId": "SO0136", "Version": "%%VERSION%%", }, resource_type="Custom::AnonymousData", ) core.Aspects.of(helper_function).add( ConditionalResources(metrics_condition)) core.Aspects.of(create_id_function).add( ConditionalResources(metrics_condition)) core.Aspects.of(send_data_function).add( ConditionalResources(metrics_condition)) # If user chooses Git as pipeline provision type, create codepipeline with Git repo as source core.Aspects.of(repo).add(ConditionalResources(git_address_provided)) core.Aspects.of(codecommit_pipeline).add( ConditionalResources(git_address_provided)) core.Aspects.of(codebuild_project).add( ConditionalResources(git_address_provided)) # Create Template Interface paramaters_list = [ notification_email.logical_id, git_address.logical_id, existing_bucket.logical_id, existing_ecr_repo.logical_id, ] # if multi account if multi_account: paramaters_list.extend([ dev_account_id.logical_id, dev_org_id.logical_id, staging_account_id.logical_id, staging_org_id.logical_id, prod_account_id.logical_id, prod_org_id.logical_id, ]) paramaters_labels = { f"{notification_email.logical_id}": { "default": "Notification Email (Required)" }, f"{git_address.logical_id}": { "default": "CodeCommit Repo URL Address (Optional)" }, f"{existing_bucket.logical_id}": { "default": "Name of an Existing S3 Bucket (Optional)" }, f"{existing_ecr_repo.logical_id}": { "default": "Name of an Existing Amazon ECR repository (Optional)" }, } if multi_account: paramaters_labels.update({ f"{dev_account_id.logical_id}": { "default": "Development Account ID (Required)" }, f"{dev_org_id.logical_id}": { "default": "Development Account Organizational Unit ID (Required)" }, f"{staging_account_id.logical_id}": { "default": "Staging Account ID (Required)" }, f"{staging_org_id.logical_id}": { "default": "Staging Account Organizational Unit ID (Required)" }, f"{prod_account_id.logical_id}": { "default": "Production Account ID (Required)" }, f"{prod_org_id.logical_id}": { "default": "Production Account Organizational Unit ID (Required)" }, }) self.template_options.metadata = { "AWS::CloudFormation::Interface": { "ParameterGroups": [{ "Label": { "default": "MLOps Framework Settings" }, "Parameters": paramaters_list, }], "ParameterLabels": paramaters_labels, } } # Outputs # core.CfnOutput( self, id="BlueprintsBucket", value= f"https://s3.console.aws.amazon.com/s3/buckets/{blueprint_repository_bucket.bucket_name}", description="S3 Bucket to upload MLOps Framework Blueprints", ) core.CfnOutput( self, id="AssetsBucket", value= f"https://s3.console.aws.amazon.com/s3/buckets/{assets_s3_bucket_name}", description="S3 Bucket to upload model artifact", ) core.CfnOutput( self, id="ECRRepoName", value=ecr_repo_name, description="Amazon ECR repository's name", ) core.CfnOutput( self, id="ECRRepoArn", value=ecr_repo_arn, description="Amazon ECR repository's arn", )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) sns_principals_mapping = core.CfnMapping( scope=self, id="Inspector SNS Principals", mapping={ "eu-central-1": { "ARN": "arn:aws:iam::537503971621:root" }, "us-east-1": { "ARN": "arn:aws:iam::316112463485:root" }, "eu-west-1": { "ARN": "arn:aws:iam::357557129151:root" }, "us-east-2": { "ARN": "arn:aws:iam::646659390643:root" }, } ) inspector_rules_mapping = core.CfnMapping( scope=self, id="Inspector Rule packages", mapping={ "eu-central-1": { "CVE": "arn:aws:inspector:eu-central-1:537503971621:rulespackage/0-wNqHa8M9", "CIS": "arn:aws:inspector:eu-central-1:537503971621:rulespackage/0-nZrAVuv8", "securityBestPractices": "arn:aws:inspector:eu-central-1:537503971621:rulespackage/0-ZujVHEPB", "runtimeBehaviorAnalysis": "arn:aws:inspector:eu-central-1:537503971621:rulespackage/0-0GMUM6fg" }, "eu-west-1": { "CVE": "arn:aws:inspector:eu-west-1:357557129151:rulespackage/0-ubA5XvBh", "CIS": "arn:aws:inspector:eu-west-1:357557129151:rulespackage/0-sJBhCr0F", "securityBestPractices": "arn:aws:inspector:eu-west-1:357557129151:rulespackage/0-SnojL3Z6", "runtimeBehaviorAnalysis": "arn:aws:inspector:eu-west-1:357557129151:rulespackage/0-lLmwe1zd" }, "us-east-1": { "CVE": "arn:aws:inspector:us-east-1:316112463485:rulespackage/0-gEjTy7T7", "CIS": "arn:aws:inspector:us-east-1:316112463485:rulespackage/0-rExsr2X8", "securityBestPractices": "arn:aws:inspector:us-east-1:316112463485:rulespackage/0-R01qwB5Q", "runtimeBehaviorAnalysis": "arn:aws:inspector:us-east-1:316112463485:rulespackage/0-gBONHN9h" }, "us-east-2": { "CVE": "arn:aws:inspector:us-east-2:646659390643:rulespackage/0-JnA8Zp85", "CIS": "arn:aws:inspector:us-east-2:646659390643:rulespackage/0-m8r61nnh", "securityBestPractices": "arn:aws:inspector:us-east-2:646659390643:rulespackage/0-AxKmMHPX", "runtimeBehaviorAnalysis": "arn:aws:inspector:us-east-2:646659390643:rulespackage/0-UCYZFKPV" }, } ) resource_group = inspector.CfnResourceGroup( scope=self, id="CDK test resource group", resource_group_tags=[core.CfnTag(key="Inspector", value="true")] ) assessment_target = inspector.CfnAssessmentTarget( scope=self, id="CDK test assessment target", resource_group_arn=resource_group.attr_arn ) assessment_template = inspector.CfnAssessmentTemplate( scope=self, id="CDK test assessment template", assessment_target_arn=assessment_target.attr_arn, duration_in_seconds=300, rules_package_arns=[ inspector_rules_mapping.find_in_map(self.region, package) for package in ( "CVE", "CIS", "securityBestPractices", "runtimeBehaviorAnalysis" ) ] ) report_function = aws_lambda.Function( scope=self, id="CDK Inspector test report processor", code=aws_lambda.Code.from_asset("report_function"), handler="report.lambda_handler", runtime=aws_lambda.Runtime.PYTHON_3_7 ) topic = sns.Topic( scope=self, id="CDK Inspector topic" ) topic.add_to_resource_policy( statement=iam.PolicyStatement( actions=["SNS:Publish"], principals=[iam.ArnPrincipal(arn=sns_principals_mapping.find_in_map(self.region, "ARN"))], resources=[topic.topic_arn] ) ) topic.add_subscription( subscription=sns_subs.LambdaSubscription(fn=report_function) ) subscriber = InspectorSubscriberCustomResource( scope=self, id="Inspector SNS Subscriber", Template=assessment_template.attr_arn, Topic=topic.topic_arn )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) current_directory = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) allowed_values = yaml.load(open( os.path.join(current_directory, "..", "..", "allowed_values.yaml")), Loader=yaml.SafeLoader) ami_mapping = {"AMI": {"OEJITSI": AMI_NAME}} for region in generated_ami_ids.keys(): ami_mapping[region] = {"OEJITSI": generated_ami_ids[region]} aws_ami_region_map = core.CfnMapping(self, "AWSAMIRegionMap", mapping=ami_mapping) # utility function to parse the unique id from the stack id for # shorter resource names using cloudformation functions def append_stack_uuid(name): return core.Fn.join("-", [ name, core.Fn.select( 0, core.Fn.split( "-", core.Fn.select(2, core.Fn.split( "/", core.Aws.STACK_ID)))) ]) # # PARAMETERS # cidr_block_param = core.CfnParameter( self, "IngressCidrBlock", allowed_pattern="((\d{1,3})\.){3}\d{1,3}/\d{1,2}", default="0.0.0.0/0", description= "Required: A CIDR block to restrict access to the Jitsi application. Leave as 0.0.0.0/0 to allow public access from internet." ) ec2_instance_type_param = core.CfnParameter( self, "InstanceType", allowed_values=allowed_values["allowed_instance_types"], default="t3.xlarge", description= "Required: The EC2 instance type for the application Auto Scaling Group." ) jitsi_hostname_param = core.CfnParameter( self, "JitsiHostname", description= "Required: The hostname to access Jitsi. E.G. 'jitsi.internal.mycompany.com'" ) jitsi_interface_app_name_param = core.CfnParameter( self, "JitsiInterfaceAppName", default="Jitsi Meet", description= "Optional: Customize the app name on the Jitsi interface.") jitsi_interface_default_remote_display_name_param = core.CfnParameter( self, "JitsiInterfaceDefaultRemoteDisplayName", default="Fellow Jitster", description= "Optional: Customize the default display name for Jitsi users.") jitsi_interface_native_app_name_param = core.CfnParameter( self, "JitsiInterfaceNativeAppName", default="Jitsi Meet", description= "Optional: Customize the native app name on the Jitsi interface.") jitsi_interface_show_brand_watermark_param = core.CfnParameter( self, "JitsiInterfaceShowBrandWatermark", allowed_values=["true", "false"], default="true", description= "Optional: Display the watermark logo image in the upper left corner." ) jitsi_interface_show_watermark_for_guests_param = core.CfnParameter( self, "JitsiInterfaceShowWatermarkForGuests", allowed_values=["true", "false"], default="true", description= "Optional: Display the watermark logo image in the upper left corner for guest users. This can be set to override the general setting behavior for guest users." ) jitsi_interface_brand_watermark_param = core.CfnParameter( self, "JitsiInterfaceBrandWatermark", default="", description= "Optional: Provide a URL to a PNG image to be used as the brand watermark logo image in the upper right corner. File should be publically available for download." ) jitsi_interface_brand_watermark_link_param = core.CfnParameter( self, "JitsiInterfaceBrandWatermarkLink", default="http://jitsi.org", description= "Optional: Provide a link destination for the brand watermark logo image in the upper right corner." ) jitsi_interface_watermark_param = core.CfnParameter( self, "JitsiInterfaceWatermark", default="", description= "Optional: Provide a URL to a PNG image to be used as the watermark logo image in the upper left corner. File should be publically available for download." ) jitsi_interface_watermark_link_param = core.CfnParameter( self, "JitsiInterfaceWatermarkLink", default="http://jitsi.org", description= "Optional: Provide a link destination for the Jitsi watermark logo image in the upper left corner." ) route_53_hosted_zone_name_param = core.CfnParameter( self, "Route53HostedZoneName", description= "Required: Route 53 Hosted Zone name in which a DNS record will be created by this template. Must already exist and be the domain part of the Jitsi Hostname parameter, without trailing dot. E.G. 'internal.mycompany.com'" ) notification_email_param = core.CfnParameter( self, "NotificationEmail", default="", description= "Optional: Specify an email address to get emails about deploys, Let's Encrypt, and other system events." ) # # CONDITIONS # notification_email_exists_condition = core.CfnCondition( self, "NotificationEmailExistsCondition", expression=core.Fn.condition_not( core.Fn.condition_equals(notification_email_param.value, ""))) # # RESOURCES # # vpc vpc = Vpc(self, "Vpc") # sns sns_notification_topic = aws_sns.CfnTopic( self, "NotificationTopic", topic_name="{}-notifications".format(core.Aws.STACK_NAME)) sns_notification_subscription = aws_sns.CfnSubscription( self, "NotificationSubscription", protocol="email", topic_arn=sns_notification_topic.ref, endpoint=notification_email_param.value_as_string) sns_notification_subscription.cfn_options.condition = notification_email_exists_condition iam_notification_publish_policy = aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, actions=["sns:Publish"], resources=[sns_notification_topic.ref]) ]) # cloudwatch app_log_group = aws_logs.CfnLogGroup( self, "JitsiAppLogGroup", retention_in_days=TWO_YEARS_IN_DAYS) app_log_group.cfn_options.update_replace_policy = core.CfnDeletionPolicy.RETAIN app_log_group.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN system_log_group = aws_logs.CfnLogGroup( self, "JitsiSystemLogGroup", retention_in_days=TWO_YEARS_IN_DAYS) system_log_group.cfn_options.update_replace_policy = core.CfnDeletionPolicy.RETAIN system_log_group.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN # iam iam_jitsi_instance_role = aws_iam.CfnRole( self, "JitsiInstanceRole", assume_role_policy_document=aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["sts:AssumeRole"], principals=[aws_iam.ServicePrincipal("ec2.amazonaws.com")]) ]), policies=[ aws_iam.CfnRole.PolicyProperty( policy_document=aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents" ], resources=[ app_log_group.attr_arn, system_log_group.attr_arn ]) ]), policy_name="AllowStreamLogsToCloudWatch"), aws_iam.CfnRole.PolicyProperty( policy_document=aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "ec2:AssociateAddress", "ec2:DescribeVolumes", "ec2:DescribeTags", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "cloudwatch:PutMetricData" ], resources=["*"]) ]), policy_name="AllowStreamMetricsToCloudWatch"), aws_iam.CfnRole.PolicyProperty( policy_document=aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["autoscaling:Describe*"], resources=["*"]) ]), policy_name="AllowDescribeAutoScaling"), ], managed_policy_arns=[ "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" ]) # ec2 jitsi_sg = aws_ec2.CfnSecurityGroup( self, "JitsiSg", group_description="Jitsi security group", vpc_id=vpc.id()) eip = aws_ec2.CfnEIP(self, "Eip", domain="vpc") core.Tags.of(eip).add("Name", "{}/Eip".format(core.Aws.STACK_NAME)) ec2_instance_profile = aws_iam.CfnInstanceProfile( self, "JitsiInstanceProfile", roles=[iam_jitsi_instance_role.ref]) with open("jitsi/jitsi_launch_config_user_data.sh") as f: jitsi_launch_config_user_data = f.read() ec2_launch_config = aws_autoscaling.CfnLaunchConfiguration( self, "JitsiLaunchConfig", image_id=core.Fn.find_in_map("AWSAMIRegionMap", core.Aws.REGION, "OEJITSI"), instance_type=ec2_instance_type_param.value_as_string, iam_instance_profile=ec2_instance_profile.ref, security_groups=[jitsi_sg.ref], user_data=(core.Fn.base64( core.Fn.sub( jitsi_launch_config_user_data, { "JitsiHostname": jitsi_hostname_param.value_as_string, "JitsiPublicIP": eip.ref, "LetsEncryptCertificateEmail": notification_email_param.value_as_string })))) # autoscaling asg = aws_autoscaling.CfnAutoScalingGroup( self, "JitsiAsg", launch_configuration_name=ec2_launch_config.ref, desired_capacity="1", max_size="1", min_size="1", vpc_zone_identifier=vpc.public_subnet_ids()) asg.cfn_options.creation_policy = core.CfnCreationPolicy( resource_signal=core.CfnResourceSignal(count=1, timeout="PT15M")) asg.cfn_options.update_policy = core.CfnUpdatePolicy( auto_scaling_rolling_update=core.CfnAutoScalingRollingUpdate( max_batch_size=1, min_instances_in_service=0, pause_time="PT15M", wait_on_resource_signals=True)) core.Tags.of(asg).add("Name", "{}/JitsiAsg".format(core.Aws.STACK_NAME)) jitsi_http_ingress = aws_ec2.CfnSecurityGroupIngress( self, "JitsiHttpSgIngress", cidr_ip=cidr_block_param.value_as_string, from_port=80, group_id=jitsi_sg.ref, ip_protocol="tcp", to_port=80) jitsi_https_ingress = aws_ec2.CfnSecurityGroupIngress( self, "JitsiHttpsSgIngress", cidr_ip=cidr_block_param.value_as_string, from_port=443, group_id=jitsi_sg.ref, ip_protocol="tcp", to_port=443) jitsi_fallback_network_audio_video_ingress = aws_ec2.CfnSecurityGroupIngress( self, "JitsiFallbackNetworkAudioVideoSgIngress", cidr_ip=cidr_block_param.value_as_string, from_port=4443, group_id=jitsi_sg.ref, ip_protocol="tcp", to_port=4443) jitsi_general_network_audio_video_ingress = aws_ec2.CfnSecurityGroupIngress( self, "JitsiGeneralNetworkAudioVideoSgIngress", cidr_ip=cidr_block_param.value_as_string, from_port=10000, group_id=jitsi_sg.ref, ip_protocol="udp", to_port=10000) # route 53 record_set = aws_route53.CfnRecordSet( self, "RecordSet", hosted_zone_name= f"{route_53_hosted_zone_name_param.value_as_string}.", name=jitsi_hostname_param.value_as_string, resource_records=[eip.ref], type="A") # https://github.com/aws/aws-cdk/issues/8431 record_set.add_property_override("TTL", 60) # AWS::CloudFormation::Interface self.template_options.metadata = { "OE::Patterns::TemplateVersion": template_version, "AWS::CloudFormation::Interface": { "ParameterGroups": [{ "Label": { "default": "Infrastructure Config" }, "Parameters": [ jitsi_hostname_param.logical_id, route_53_hosted_zone_name_param.logical_id, cidr_block_param.logical_id, ec2_instance_type_param.logical_id, notification_email_param.logical_id ] }, { "Label": { "default": "Jitsi Config" }, "Parameters": [ jitsi_interface_app_name_param.logical_id, jitsi_interface_default_remote_display_name_param. logical_id, jitsi_interface_native_app_name_param.logical_id, jitsi_interface_show_brand_watermark_param.logical_id, jitsi_interface_show_watermark_for_guests_param. logical_id, jitsi_interface_brand_watermark_param.logical_id, jitsi_interface_brand_watermark_link_param.logical_id, jitsi_interface_watermark_param.logical_id, jitsi_interface_watermark_link_param.logical_id, ] }, *vpc.metadata_parameter_group()], "ParameterLabels": { cidr_block_param.logical_id: { "default": "Ingress CIDR Block" }, ec2_instance_type_param.logical_id: { "default": "EC2 instance type" }, jitsi_hostname_param.logical_id: { "default": "Jitsi Hostname" }, jitsi_interface_app_name_param.logical_id: { "default": "Jitsi Interface App Name" }, jitsi_interface_default_remote_display_name_param.logical_id: { "default": "Jitsi Interface Default Remote Display Name" }, jitsi_interface_native_app_name_param.logical_id: { "default": "Jitsi Interface Native App Name" }, jitsi_interface_show_brand_watermark_param.logical_id: { "default": "Jitsi Interface Show Watermark" }, jitsi_interface_show_watermark_for_guests_param.logical_id: { "default": "Jitsi Interface Show Watermark For Guests" }, jitsi_interface_brand_watermark_param.logical_id: { "default": "Jitsi Interface Watermark" }, jitsi_interface_brand_watermark_link_param.logical_id: { "default": "Jitsi Interface Watermark Link" }, jitsi_interface_watermark_param.logical_id: { "default": "Jitsi Interface Watermark" }, jitsi_interface_watermark_link_param.logical_id: { "default": "Jitsi Interface Watermark Link" }, notification_email_param.logical_id: { "default": "Notification Email" }, route_53_hosted_zone_name_param.logical_id: { "default": "AWS Route 53 Hosted Zone Name" }, **vpc.metadata_parameter_labels() } } } # # OUTPUTS # eip_output = core.CfnOutput( self, "EipOutput", description= "The Elastic IP address dynamically mapped to the autoscaling group instance.", value=eip.ref) endpoint_output = core.CfnOutput( self, "JitsiUrl", description="The URL for the Jitsi instance.", value=core.Fn.join( "", ["https://", jitsi_hostname_param.value_as_string]))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ###################################################################### # ELB mapping ###################################################################### elb_id_temp = region_info.FactName.ELBV2_ACCOUNT elb_map_temp = region_info.RegionInfo.region_map(elb_id_temp) elb_mapping = {} for key in elb_map_temp: elb_mapping[key] = {'accountid': elb_map_temp[key]} elb_accounts = core.CfnMapping(scope=self, id='ELBv2AccountMap', mapping=elb_mapping) ###################################################################### # get params ###################################################################### allow_source_address = core.CfnParameter( self, 'AllowedSourceIpAddresses', allowed_pattern=r'^[0-9./\s]*', description='Space-delimited list of CIDR blocks', default='10.0.0.0/8 172.16.0.0/12 192.168.0.0/16') sns_email = core.CfnParameter( self, 'SnsEmail', allowed_pattern=r'^[0-9a-zA-Z@_\-\+\.]*', description=('Input your email as SNS topic, where Amazon ES will ' 'send alerts to'), default='*****@*****.**') geoip_license_key = core.CfnParameter( self, 'GeoLite2LicenseKey', allowed_pattern=r'^[0-9a-zA-Z]{16}$', default='xxxxxxxxxxxxxxxx', description=("If you wolud like to enrich geoip locaiton such as " "IP address's country, get a license key form MaxMind" " and input the key. If you not, keep " "xxxxxxxxxxxxxxxx")) aes_domain_name = self.node.try_get_context('aes_domain_name') bucket = f'{aes_domain_name}-{core.Aws.ACCOUNT_ID}' s3bucket_name_geo = f'{bucket}-geo' s3bucket_name_log = f'{bucket}-log' s3bucket_name_snapshot = f'{bucket}-snapshot' # organizations / multiaccount org_id = self.node.try_get_context('organizations').get('org_id') org_mgmt_id = self.node.try_get_context('organizations').get( 'management_id') org_member_ids = self.node.try_get_context('organizations').get( 'member_ids') no_org_ids = self.node.try_get_context('no_organizations').get( 'aws_accounts') temp_geo = self.node.try_get_context('s3_bucket_name').get('geo') if temp_geo: s3bucket_name_geo = temp_geo temp_log = self.node.try_get_context('s3_bucket_name').get('log') if temp_log: s3bucket_name_log = temp_log elif org_id or no_org_ids: s3bucket_name_log = f'{aes_domain_name}-{self.account}-log' temp_snap = self.node.try_get_context('s3_bucket_name').get('snapshot') if temp_snap: s3bucket_name_snapshot = temp_snap kms_cmk_alias = self.node.try_get_context('kms_cmk_alias') if not kms_cmk_alias: kms_cmk_alias = 'aes-siem-key' ###################################################################### # deploy VPC when context is defined as using VPC ###################################################################### # vpc_type is 'new' or 'import' or None vpc_type = self.node.try_get_context('vpc_type') if vpc_type == 'new': is_vpc = True vpc_cidr = self.node.try_get_context('new_vpc_nw_cidr_block') subnet_cidr_mask = int( self.node.try_get_context('new_vpc_subnet_cidr_mask')) is_vpc = True # VPC vpc_aes_siem = aws_ec2.Vpc( self, 'VpcAesSiem', cidr=vpc_cidr, max_azs=3, nat_gateways=0, subnet_configuration=[ aws_ec2.SubnetConfiguration( subnet_type=aws_ec2.SubnetType.ISOLATED, name='aes-siem-subnet', cidr_mask=subnet_cidr_mask) ]) subnet1 = vpc_aes_siem.isolated_subnets[0] subnets = [{'subnet_type': aws_ec2.SubnetType.ISOLATED}] vpc_subnets = aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.ISOLATED) vpc_aes_siem_opt = vpc_aes_siem.node.default_child.cfn_options vpc_aes_siem_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN for subnet in vpc_aes_siem.isolated_subnets: subnet_opt = subnet.node.default_child.cfn_options subnet_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN elif vpc_type == 'import': vpc_id = self.node.try_get_context('imported_vpc_id') _sbunet1 = self.node.try_get_context('imported_vpc_subnet1') _sbunet2 = self.node.try_get_context('imported_vpc_subnet2') _sbunet3 = self.node.try_get_context('imported_vpc_subnet3') vpc_aes_siem = aws_ec2.Vpc.from_lookup(self, 'VpcAesSiem', vpc_id=vpc_id) subnet1 = aws_ec2.Subnet.from_subnet_attributes( self, 'Subenet1', **_sbunet1) subnet2 = aws_ec2.Subnet.from_subnet_attributes( self, 'Subenet2', **_sbunet2) subnet3 = aws_ec2.Subnet.from_subnet_attributes( self, 'Subenet3', **_sbunet3) subnets = [subnet1, subnet2, subnet3] vpc_subnets = aws_ec2.SubnetSelection(subnets=subnets) if vpc_type: is_vpc = True # Security Group sg_vpc_noinbound_aes_siem = aws_ec2.SecurityGroup( self, 'AesSiemVpcNoinboundSecurityGroup', security_group_name='aes-siem-noinbound-vpc-sg', vpc=vpc_aes_siem) sg_vpc_aes_siem = aws_ec2.SecurityGroup( self, 'AesSiemVpcSecurityGroup', security_group_name='aes-siem-vpc-sg', vpc=vpc_aes_siem) sg_vpc_aes_siem.add_ingress_rule( peer=aws_ec2.Peer.ipv4(vpc_aes_siem.vpc_cidr_block), connection=aws_ec2.Port.tcp(443), ) sg_vpc_opt = sg_vpc_aes_siem.node.default_child.cfn_options sg_vpc_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN # VPC Endpoint vpc_aes_siem.add_gateway_endpoint( 'S3Endpoint', service=aws_ec2.GatewayVpcEndpointAwsService.S3, subnets=subnets) vpc_aes_siem.add_interface_endpoint( 'SQSEndpoint', security_groups=[sg_vpc_aes_siem], service=aws_ec2.InterfaceVpcEndpointAwsService.SQS, ) vpc_aes_siem.add_interface_endpoint( 'KMSEndpoint', security_groups=[sg_vpc_aes_siem], service=aws_ec2.InterfaceVpcEndpointAwsService.KMS, ) vpc_aes_siem.add_interface_endpoint( 'SNSEndpoint', security_groups=[sg_vpc_aes_siem], service=aws_ec2.InterfaceVpcEndpointAwsService.SNS, ) else: is_vpc = False is_vpc = core.CfnCondition(self, 'IsVpc', expression=core.Fn.condition_equals( is_vpc, True)) """ CloudFormation実行時の条件式の書き方 ClassのBasesが aws_cdk.core.Resource の時は、 node.default_child.cfn_options.condition = is_vpc ClassのBasesが aws_cdk.core.CfnResource の時は、 cfn_options.condition = is_vpc """ ###################################################################### # create cmk of KMS to encrypt S3 bucket ###################################################################### kms_aes_siem = aws_kms.Key(self, 'KmsAesSiemLog', description='CMK for SIEM solution', removal_policy=core.RemovalPolicy.RETAIN) aws_kms.Alias(self, 'KmsAesSiemLogAlias', alias_name=kms_cmk_alias, target_key=kms_aes_siem, removal_policy=core.RemovalPolicy.RETAIN) kms_aes_siem.add_to_resource_policy( aws_iam.PolicyStatement( sid='Allow GuardDuty to use the key', actions=['kms:GenerateDataKey'], principals=[ aws_iam.ServicePrincipal('guardduty.amazonaws.com') ], resources=['*'], ), ) kms_aes_siem.add_to_resource_policy( aws_iam.PolicyStatement( sid='Allow VPC Flow Logs to use the key', actions=[ 'kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*', 'kms:GenerateDataKey*', 'kms:DescribeKey' ], principals=[ aws_iam.ServicePrincipal('delivery.logs.amazonaws.com') ], resources=['*'], ), ) # basic policy key_policy_basic1 = aws_iam.PolicyStatement( sid='Allow principals in the account to decrypt log files', actions=['kms:DescribeKey', 'kms:ReEncryptFrom'], principals=[ aws_iam.AccountPrincipal(account_id=core.Aws.ACCOUNT_ID) ], resources=['*'], ) kms_aes_siem.add_to_resource_policy(key_policy_basic1) # for CloudTrail key_policy_trail1 = aws_iam.PolicyStatement( sid='Allow CloudTrail to describe key', actions=['kms:DescribeKey'], principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], resources=['*'], ) kms_aes_siem.add_to_resource_policy(key_policy_trail1) key_policy_trail2 = aws_iam.PolicyStatement( sid=('Allow CloudTrail to encrypt logs'), actions=['kms:GenerateDataKey*'], principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], resources=['*'], conditions={ 'StringLike': { 'kms:EncryptionContext:aws:cloudtrail:arn': [f'arn:aws:cloudtrail:*:{core.Aws.ACCOUNT_ID}:trail/*'] } }) kms_aes_siem.add_to_resource_policy(key_policy_trail2) ###################################################################### # create s3 bucket ###################################################################### block_pub = aws_s3.BlockPublicAccess(block_public_acls=True, ignore_public_acls=True, block_public_policy=True, restrict_public_buckets=True) s3_geo = aws_s3.Bucket( self, 'S3BucketForGeoip', block_public_access=block_pub, bucket_name=s3bucket_name_geo, # removal_policy=core.RemovalPolicy.DESTROY, ) # create s3 bucket for log collector s3_log = aws_s3.Bucket( self, 'S3BucketForLog', block_public_access=block_pub, bucket_name=s3bucket_name_log, versioned=True, encryption=aws_s3.BucketEncryption.S3_MANAGED, # removal_policy=core.RemovalPolicy.DESTROY, ) # create s3 bucket for aes snapshot s3_snapshot = aws_s3.Bucket( self, 'S3BucketForSnapshot', block_public_access=block_pub, bucket_name=s3bucket_name_snapshot, # removal_policy=core.RemovalPolicy.DESTROY, ) ###################################################################### # IAM Role ###################################################################### # snaphot rule for AES policydoc_snapshot = aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement(actions=['s3:ListBucket'], resources=[s3_snapshot.bucket_arn]), aws_iam.PolicyStatement( actions=['s3:GetObject', 's3:PutObject', 's3:DeleteObject'], resources=[s3_snapshot.bucket_arn + '/*']) ]) aes_siem_snapshot_role = aws_iam.Role( self, 'AesSiemSnapshotRole', role_name='aes-siem-snapshot-role', inline_policies=[ policydoc_snapshot, ], assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')) policydoc_assume_snapshrole = aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( actions=['iam:PassRole'], resources=[aes_siem_snapshot_role.role_arn]), ]) aes_siem_deploy_role_for_lambda = aws_iam.Role( self, 'AesSiemDeployRoleForLambda', role_name='aes-siem-deploy-role-for-lambda', managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonESFullAccess'), aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), ], inline_policies=[policydoc_assume_snapshrole, policydoc_snapshot], assumed_by=aws_iam.ServicePrincipal('lambda.amazonaws.com')) if vpc_type: aes_siem_deploy_role_for_lambda.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaVPCAccessExecutionRole')) # for alert from Amazon ES aes_siem_sns_role = aws_iam.Role( self, 'AesSiemSnsRole', role_name='aes-siem-sns-role', assumed_by=aws_iam.ServicePrincipal('es.amazonaws.com')) ###################################################################### # in VPC ###################################################################### if vpc_type: slr_aes = aws_iam.CfnServiceLinkedRole( self, 'AWSServiceRoleForAmazonElasticsearchService', aws_service_name='es.amazonaws.com', description='Created by cloudformation of aes-siem stack') slr_aes.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN ###################################################################### # SQS for es-laoder's DLQ ###################################################################### sqs_aes_siem_dlq = aws_sqs.Queue( self, 'AesSiemDlq', queue_name='aes-siem-dlq', retention_period=core.Duration.days(14)) ###################################################################### # Setup Lambda ###################################################################### # setup lambda of es_loader lambda_es_loader_vpc_kwargs = {} if vpc_type: lambda_es_loader_vpc_kwargs = { 'security_group': sg_vpc_noinbound_aes_siem, 'vpc': vpc_aes_siem, 'vpc_subnets': vpc_subnets, } lambda_es_loader = aws_lambda.Function( self, 'LambdaEsLoader', **lambda_es_loader_vpc_kwargs, function_name='aes-siem-es-loader', runtime=aws_lambda.Runtime.PYTHON_3_8, # code=aws_lambda.Code.asset('../lambda/es_loader.zip'), code=aws_lambda.Code.asset('../lambda/es_loader'), handler='index.lambda_handler', memory_size=512, timeout=core.Duration.seconds(600), dead_letter_queue_enabled=True, dead_letter_queue=sqs_aes_siem_dlq, environment={'GEOIP_BUCKET': s3bucket_name_geo}) es_loader_newver = lambda_es_loader.add_version( name=__version__, description=__version__) es_loader_opt = es_loader_newver.node.default_child.cfn_options es_loader_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN # send only # sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage') # send and reieve. but it must be loop sqs_aes_siem_dlq.grant(lambda_es_loader, 'sqs:SendMessage', 'sqs:ReceiveMessage', 'sqs:DeleteMessage', 'sqs:GetQueueAttributes') lambda_geo = aws_lambda.Function( self, 'LambdaGeoipDownloader', function_name='aes-siem-geoip-downloader', runtime=aws_lambda.Runtime.PYTHON_3_8, code=aws_lambda.Code.asset('../lambda/geoip_downloader'), handler='index.lambda_handler', memory_size=320, timeout=core.Duration.seconds(300), environment={ 's3bucket_name': s3bucket_name_geo, 'license_key': geoip_license_key.value_as_string, }) lambda_geo_newver = lambda_geo.add_version(name=__version__, description=__version__) lamba_geo_opt = lambda_geo_newver.node.default_child.cfn_options lamba_geo_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN ###################################################################### # setup elasticsearch ###################################################################### lambda_deploy_es = aws_lambda.Function( self, 'LambdaDeployAES', function_name='aes-siem-deploy-aes', runtime=aws_lambda.Runtime.PYTHON_3_8, # code=aws_lambda.Code.asset('../lambda/deploy_es.zip'), code=aws_lambda.Code.asset('../lambda/deploy_es'), handler='index.aes_domain_handler', memory_size=128, timeout=core.Duration.seconds(720), environment={ 'accountid': core.Aws.ACCOUNT_ID, 'aes_domain_name': aes_domain_name, 'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn, 'es_loader_role': lambda_es_loader.role.role_arn, 'allow_source_address': allow_source_address.value_as_string, }, role=aes_siem_deploy_role_for_lambda, ) if vpc_type: lambda_deploy_es.add_environment('vpc_subnet_id', subnet1.subnet_id) lambda_deploy_es.add_environment('security_group_id', sg_vpc_aes_siem.security_group_id) else: lambda_deploy_es.add_environment('vpc_subnet_id', 'None') lambda_deploy_es.add_environment('security_group_id', 'None') deploy_es_newver = lambda_deploy_es.add_version( name=__version__, description=__version__) deploy_es_opt = deploy_es_newver.node.default_child.cfn_options deploy_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN # execute lambda_deploy_es to deploy Amaozon ES Domain aes_domain = aws_cloudformation.CfnCustomResource( self, 'AesSiemDomainDeployedR2', service_token=lambda_deploy_es.function_arn, ) aes_domain.add_override('Properties.ConfigVersion', __version__) aes_domain.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN es_endpoint = aes_domain.get_att('es_endpoint').to_string() lambda_es_loader.add_environment('ES_ENDPOINT', es_endpoint) lambda_configure_es_vpc_kwargs = {} if vpc_type: lambda_configure_es_vpc_kwargs = { 'security_group': sg_vpc_noinbound_aes_siem, 'vpc': vpc_aes_siem, 'vpc_subnets': aws_ec2.SubnetSelection(subnets=[ subnet1, ]), } lambda_configure_es = aws_lambda.Function( self, 'LambdaConfigureAES', **lambda_configure_es_vpc_kwargs, function_name='aes-siem-configure-aes', runtime=aws_lambda.Runtime.PYTHON_3_8, code=aws_lambda.Code.asset('../lambda/deploy_es'), handler='index.aes_config_handler', memory_size=128, timeout=core.Duration.seconds(300), environment={ 'accountid': core.Aws.ACCOUNT_ID, 'aes_domain_name': aes_domain_name, 'aes_admin_role': aes_siem_deploy_role_for_lambda.role_arn, 'es_loader_role': lambda_es_loader.role.role_arn, 'allow_source_address': allow_source_address.value_as_string, 'es_endpoint': es_endpoint, }, role=aes_siem_deploy_role_for_lambda, ) if vpc_type: lambda_configure_es.add_environment('vpc_subnet_id', subnet1.subnet_id) lambda_configure_es.add_environment( 'security_group_id', sg_vpc_aes_siem.security_group_id) else: lambda_configure_es.add_environment('vpc_subnet_id', 'None') lambda_configure_es.add_environment('security_group_id', 'None') configure_es_newver = lambda_configure_es.add_version( name=__version__, description=__version__) configure_es_opt = configure_es_newver.node.default_child.cfn_options configure_es_opt.deletion_policy = core.CfnDeletionPolicy.RETAIN aes_config = aws_cloudformation.CfnCustomResource( self, 'AesSiemDomainConfiguredR2', service_token=lambda_configure_es.function_arn, ) aes_config.add_override('Properties.ConfigVersion', __version__) aes_config.add_depends_on(aes_domain) aes_config.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN es_arn = (f'arn:aws:es:{core.Aws.REGION}:{core.Aws.ACCOUNT_ID}' f':domain/{aes_domain_name}') # grant permission to es_loader role lambda_es_loader.role.attach_inline_policy( aws_iam.Policy(self, 'aes-siem-policy-to-load-entries-to-es', policy_name='aes-siem-policy-to-load-entries-to-es', statements=[ aws_iam.PolicyStatement(actions=['es:*'], resources=[ es_arn + '/*', ]), ])) # grant additional permission to es_loader role additional_kms_cmks = self.node.try_get_context('additional_kms_cmks') if additional_kms_cmks: lambda_es_loader.role.attach_inline_policy( aws_iam.Policy(self, 'access_to_additional_cmks', policy_name='access_to_additional_cmks', statements=[ aws_iam.PolicyStatement( actions=['kms:Decrypt'], resources=sorted( set(additional_kms_cmks))) ])) additional_buckets = self.node.try_get_context('additional_s3_buckets') if additional_buckets: buckets_list = [] for bucket in additional_buckets: buckets_list.append(f'arn:aws:s3:::{bucket}') buckets_list.append(f'arn:aws:s3:::{bucket}/*') lambda_es_loader.role.attach_inline_policy( aws_iam.Policy(self, 'access_to_additional_buckets', policy_name='access_to_additional_buckets', statements=[ aws_iam.PolicyStatement( actions=[ 's3:GetObject*', 's3:GetBucket*', 's3:List*' ], resources=sorted(set(buckets_list))) ])) kms_aes_siem.grant_decrypt(lambda_es_loader) ###################################################################### # s3 notification and grant permisssion ###################################################################### s3_geo.grant_read_write(lambda_geo) s3_geo.grant_read(lambda_es_loader) s3_log.grant_read(lambda_es_loader) # create s3 notification for es_loader notification = aws_s3_notifications.LambdaDestination(lambda_es_loader) # assign notification for the s3 PUT event type # most log system use PUT, but also CLB use POST & Multipart Upload s3_log.add_event_notification( aws_s3.EventType.OBJECT_CREATED, notification, aws_s3.NotificationKeyFilter(prefix='AWSLogs/')) # For user logs, not AWS logs s3_log.add_event_notification( aws_s3.EventType.OBJECT_CREATED, notification, aws_s3.NotificationKeyFilter(prefix='UserLogs/')) # Download geoip to S3 once by executing lambda_geo get_geodb = aws_cloudformation.CfnCustomResource( self, 'ExecLambdaGeoipDownloader', service_token=lambda_geo.function_arn, ) get_geodb.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN # Download geoip every day at 6PM UTC rule = aws_events.Rule( self, 'CwlRuleLambdaGeoipDownloaderDilly', schedule=aws_events.Schedule.cron(minute='20', hour='0', month='*', week_day='*', year='*'), ) rule.add_target(aws_events_targets.LambdaFunction(lambda_geo)) ###################################################################### # bucket policy ###################################################################### s3_awspath = s3_log.bucket_arn + '/AWSLogs/' + core.Aws.ACCOUNT_ID bucket_policy_common1 = aws_iam.PolicyStatement( sid='ELB Policy', principals=[ aws_iam.AccountPrincipal(account_id=elb_accounts.find_in_map( core.Aws.REGION, 'accountid')) ], actions=['s3:PutObject'], resources=[s3_awspath + '/*'], ) # NLB / ALB / R53resolver / VPC Flow Logs bucket_policy_elb1 = aws_iam.PolicyStatement( sid='AWSLogDeliveryAclCheck For ALB NLB R53Resolver Flowlogs', principals=[ aws_iam.ServicePrincipal('delivery.logs.amazonaws.com') ], actions=['s3:GetBucketAcl', 's3:ListBucket'], resources=[s3_log.bucket_arn], ) bucket_policy_elb2 = aws_iam.PolicyStatement( sid='AWSLogDeliveryWrite For ALB NLB R53Resolver Flowlogs', principals=[ aws_iam.ServicePrincipal('delivery.logs.amazonaws.com') ], actions=['s3:PutObject'], resources=[s3_awspath + '/*'], conditions={ 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }) s3_log.add_to_resource_policy(bucket_policy_common1) s3_log.add_to_resource_policy(bucket_policy_elb1) s3_log.add_to_resource_policy(bucket_policy_elb2) # CloudTrail bucket_policy_trail1 = aws_iam.PolicyStatement( sid='AWSLogDeliveryAclCheck For Cloudtrail', principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], actions=['s3:GetBucketAcl'], resources=[s3_log.bucket_arn], ) bucket_policy_trail2 = aws_iam.PolicyStatement( sid='AWSLogDeliveryWrite For CloudTrail', principals=[aws_iam.ServicePrincipal('cloudtrail.amazonaws.com')], actions=['s3:PutObject'], resources=[s3_awspath + '/*'], conditions={ 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }) s3_log.add_to_resource_policy(bucket_policy_trail1) s3_log.add_to_resource_policy(bucket_policy_trail2) # GuardDuty bucket_policy_gd1 = aws_iam.PolicyStatement( sid='Allow GuardDuty to use the getBucketLocation operation', principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')], actions=['s3:GetBucketLocation'], resources=[s3_log.bucket_arn], ) bucket_policy_gd2 = aws_iam.PolicyStatement( sid='Allow GuardDuty to upload objects to the bucket', principals=[aws_iam.ServicePrincipal('guardduty.amazonaws.com')], actions=['s3:PutObject'], resources=[s3_log.bucket_arn + '/*'], ) bucket_policy_gd5 = aws_iam.PolicyStatement( sid='Deny non-HTTPS access', effect=aws_iam.Effect.DENY, actions=['s3:*'], resources=[s3_log.bucket_arn + '/*'], conditions={'Bool': { 'aws:SecureTransport': 'false' }}) bucket_policy_gd5.add_any_principal() s3_log.add_to_resource_policy(bucket_policy_gd1) s3_log.add_to_resource_policy(bucket_policy_gd2) s3_log.add_to_resource_policy(bucket_policy_gd5) # Config bucket_policy_config1 = aws_iam.PolicyStatement( sid='AWSConfig BucketPermissionsCheck and BucketExistenceCheck', principals=[aws_iam.ServicePrincipal('config.amazonaws.com')], actions=['s3:GetBucketAcl', 's3:ListBucket'], resources=[s3_log.bucket_arn], ) bucket_policy_config2 = aws_iam.PolicyStatement( sid='AWSConfigBucketDelivery', principals=[aws_iam.ServicePrincipal('config.amazonaws.com')], actions=['s3:PutObject'], resources=[s3_awspath + '/Config/*'], conditions={ 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }) s3_log.add_to_resource_policy(bucket_policy_config1) s3_log.add_to_resource_policy(bucket_policy_config2) # geoip bucket_policy_geo1 = aws_iam.PolicyStatement( sid='Allow geoip downloader and es-loader to read/write', principals=[lambda_es_loader.role, lambda_geo.role], actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'], resources=[s3_geo.bucket_arn + '/*'], ) s3_geo.add_to_resource_policy(bucket_policy_geo1) # ES Snapshot bucket_policy_snapshot = aws_iam.PolicyStatement( sid='Allow ES to store snapshot', principals=[aes_siem_snapshot_role], actions=['s3:PutObject', 's3:GetObject', 's3:DeleteObject'], resources=[s3_snapshot.bucket_arn + '/*'], ) s3_snapshot.add_to_resource_policy(bucket_policy_snapshot) ###################################################################### # for multiaccount / organizaitons ###################################################################### if org_id or no_org_ids: ################################################################## # KMS key policy for multiaccount / organizaitons ################################################################## # for CloudTrail cond_tail2 = self.make_resource_list( path='arn:aws:cloudtrail:*:', tail=':trail/*', keys=[org_mgmt_id, no_org_ids]) key_policy_mul_trail2 = aws_iam.PolicyStatement( sid=('Allow CloudTrail to encrypt logs for multiaccounts'), actions=['kms:GenerateDataKey*'], principals=[ aws_iam.ServicePrincipal('cloudtrail.amazonaws.com') ], resources=['*'], conditions={ 'StringLike': { 'kms:EncryptionContext:aws:cloudtrail:arn': cond_tail2 } }) kms_aes_siem.add_to_resource_policy(key_policy_mul_trail2) # for replicaiton key_policy_rep1 = aws_iam.PolicyStatement( sid=('Enable cross account encrypt access for S3 Cross Region ' 'Replication'), actions=['kms:Encrypt'], principals=self.make_account_plincipals( org_mgmt_id, org_member_ids, no_org_ids), resources=['*'], ) kms_aes_siem.add_to_resource_policy(key_policy_rep1) ################################################################## # Buckdet Policy for multiaccount / organizaitons ################################################################## s3_log_bucket_arn = 'arn:aws:s3:::' + s3bucket_name_log # for CloudTrail s3_mulpaths = self.make_resource_list( path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/*', keys=[org_id, org_mgmt_id, no_org_ids]) bucket_policy_org_trail = aws_iam.PolicyStatement( sid='AWSCloudTrailWrite for Multiaccounts / Organizations', principals=[ aws_iam.ServicePrincipal('cloudtrail.amazonaws.com') ], actions=['s3:PutObject'], resources=s3_mulpaths, conditions={ 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }) s3_log.add_to_resource_policy(bucket_policy_org_trail) # config s3_conf_multpaths = self.make_resource_list( path=f'{s3_log_bucket_arn}/AWSLogs/', tail='/Config/*', keys=[org_id, org_mgmt_id, no_org_ids]) bucket_policy_mul_config2 = aws_iam.PolicyStatement( sid='AWSConfigBucketDelivery', principals=[aws_iam.ServicePrincipal('config.amazonaws.com')], actions=['s3:PutObject'], resources=s3_conf_multpaths, conditions={ 'StringEquals': { 's3:x-amz-acl': 'bucket-owner-full-control' } }) s3_log.add_to_resource_policy(bucket_policy_mul_config2) # for replication bucket_policy_rep1 = aws_iam.PolicyStatement( sid='PolicyForDestinationBucket / Permissions on objects', principals=self.make_account_plincipals( org_mgmt_id, org_member_ids, no_org_ids), actions=[ 's3:ReplicateDelete', 's3:ReplicateObject', 's3:ReplicateTags', 's3:GetObjectVersionTagging', 's3:ObjectOwnerOverrideToBucketOwner' ], resources=[f'{s3_log_bucket_arn}/*']) bucket_policy_rep2 = aws_iam.PolicyStatement( sid='PolicyForDestinationBucket / Permissions on bucket', principals=self.make_account_plincipals( org_mgmt_id, org_member_ids, no_org_ids), actions=[ 's3:List*', 's3:GetBucketVersioning', 's3:PutBucketVersioning' ], resources=[f'{s3_log_bucket_arn}']) s3_log.add_to_resource_policy(bucket_policy_rep1) s3_log.add_to_resource_policy(bucket_policy_rep2) ###################################################################### # SNS topic for Amazon ES Alert ###################################################################### sns_topic = aws_sns.Topic(self, 'SnsTopic', topic_name='aes-siem-alert', display_name='AES SIEM') sns_topic.add_subscription( aws_sns_subscriptions.EmailSubscription( email_address=sns_email.value_as_string)) sns_topic.grant_publish(aes_siem_sns_role) ###################################################################### # output of CFn ###################################################################### kibanaurl = f'https://{es_endpoint}/_plugin/kibana/' kibanaadmin = aes_domain.get_att('kibanaadmin').to_string() kibanapass = aes_domain.get_att('kibanapass').to_string() core.CfnOutput(self, 'RoleDeploy', export_name='role-deploy', value=aes_siem_deploy_role_for_lambda.role_arn) core.CfnOutput(self, 'KibanaUrl', export_name='kibana-url', value=kibanaurl) core.CfnOutput(self, 'KibanaPassword', export_name='kibana-pass', value=kibanapass, description='Please change the password in Kibana ASAP') core.CfnOutput(self, 'KibanaAdmin', export_name='kibana-admin', value=kibanaadmin)