def __init__(self, app: core.App, id: str, props, **kwargs) -> None: super().__init__(app, id, **kwargs) #CP S3 bucket bucket = s3.Bucket( self, "ArtifactBucket", bucket_name= f"{props['namespace'].lower()}-{'cp'}-{core.Aws.ACCOUNT_ID}", versioned=True, removal_policy=core.RemovalPolicy.DESTROY) #CFN Deployer role cfn_role = iam.Role( self, "Role", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('cloudformation.amazonaws.com'), iam.AccountPrincipal('560360184571'))) cfn_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ "cloudformation:*", "lambda:*", "iam:*", "apigateway:*", "dynamodb:*" ], resources=["*"])) self.output_props = props.copy() self.output_props['bucket'] = bucket self.output_props['cfn_role'] = cfn_role
def _create_role_fargate_profile(self) -> iam.Role: name: str = f"orbit-{self.context.name}-eks-fargate-profile-role" return iam.Role( scope=self, id=name, role_name=name, assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("eks.amazonaws.com"), iam.ServicePrincipal("eks-fargate-pods.amazonaws.com"), ), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AmazonEC2ContainerRegistryReadOnly" ), iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AmazonEKSFargatePodExecutionRolePolicy" ), ], inline_policies={ "Logging": iam.PolicyDocument( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "logs:CreateLogStream", "logs:CreateLogGroup", "logs:DescribeLogStreams", "logs:PutLogEvents", ], resources=["*"], ) ] ) }, )
def _create_role_cluster(self) -> iam.Role: name: str = f"orbit-{self.context.name}-eks-cluster-role" role = iam.Role( scope=self, id=name, role_name=name, assumed_by=cast( iam.IPrincipal, iam.CompositePrincipal( iam.ServicePrincipal("eks.amazonaws.com"), iam.ServicePrincipal("eks-fargate-pods.amazonaws.com"), ), ), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name(managed_policy_name="AmazonEKSClusterPolicy"), iam.ManagedPolicy.from_aws_managed_policy_name(managed_policy_name="AmazonEKSServicePolicy"), iam.ManagedPolicy.from_aws_managed_policy_name(managed_policy_name="AmazonEKSVPCResourceController"), ], inline_policies={ "Extras": iam.PolicyDocument( statements=[ iam.PolicyStatement( actions=[ "elasticloadbalancing:*", "ec2:CreateSecurityGroup", "ec2:Describe*", "cloudwatch:PutMetricData", "iam:ListAttachedRolePolicies", ], resources=["*"], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["iam:AttachRolePolicy", "iam:PutRolePolicy", "s3:*"], resources=[ "arn:aws:iam::*:role/aws-service-role/s3.data-source.lustre.fsx.amazonaws.com/", f"{self.context.scratch_bucket_arn}", f"{self.context.scratch_bucket_arn}/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "iam:CreateServiceLinkedRole", "s3:ListBucket", "fsx:CreateFileSystem", "fsx:DeleteFileSystem", "fsx:DescribeFileSystems", ], resources=["*"], ), ] ) }, ) return role
def create_role(self): return iam.Role(self, id='ssl-sqs-kms-role', assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('s3.amazonaws.com'), iam.ServicePrincipal('sqs.amazonaws.com'), iam.ServicePrincipal('kms.amazonaws.com')), role_name='ssl-sqs-kms-role')
def generate_pipeline_service_role(scope): role = iam.Role(scope=scope, id="JVSANTOSTier1PipelineServiceRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("codepipeline.amazonaws.com"), iam.ServicePrincipal("codebuild.amazonaws.com")), role_name="JVSANTOSTier1PipelineServiceRole") role.attach_inline_policy(generate_pipeline_policy(scope)) return role
def __init__(self, scope: core.Stack, id: str, **kwargs): super().__init__(scope, id, **kwargs) # create an iam role to be assumed later by codebuild self.role = iam.Role( self, "CodeBuildRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('codebuild.amazonaws.com'), iam.ServicePrincipal('ec2.amazonaws.com'))) # TODO: Don't need admin, let's make this least privilege self.role.add_to_policy( iam.PolicyStatement( actions=['*'], resources=['*'], )) # create a pipeline self.pipeline = codepipeline.Pipeline(self, "Pipeline", pipeline_name='EKS') # add a source stage self.source_stage = self.pipeline.add_stage(stage_name="Source") self.source_artifact = codepipeline.Artifact() # codebuild projects self.codebuild_deploy = CodeBuildProjects(self, "CodebuildDeploy", buildspec='buildspec.yml', codepipelinerole=self.role) # add source action self.source_stage.add_action( codepipeline_actions.GitHubSourceAction( oauth_token=core.SecretValue.secrets_manager( secret_id='prod/github_oauth_token', json_field='github_oauth_token'), output=self.source_artifact, owner=config['CODEPIPELINE']['GITHUB_OWNER'], repo=config['CODEPIPELINE']['GITHUB_REPO'], action_name='Pull_Source', run_order=1, )) # add deploy stage self.deploy_stage = self.pipeline.add_stage(stage_name='Deploy') # add deploy codebuild action self.deploy_stage.add_action( codepipeline_actions.CodeBuildAction( input=self.source_artifact, project=self.codebuild_deploy.project, action_name='Deploy_EKS_Cluster'))
def _assumed_by(self, principals: typing.Union[aws_iam.IPrincipal, typing.List[str]]) -> aws_iam.IPrincipal: pps = list() for principal in principals: if isinstance(principal, str): if principal.startswith("arn:aws"): pps.append(aws_iam.ArnPrincipal(arn=principal)) else: pps.append(aws_iam.AccountPrincipal(account_id=principal)) else: pps.append(principal) if len(pps) > 1: return aws_iam.CompositePrincipal(*pps) return pps[0]
def _create_role_cluster(self) -> iam.Role: name: str = f"orbit-{self.context.name}-eks-cluster-role" role = iam.Role( scope=self, id=name, role_name=name, assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("eks.amazonaws.com"), iam.ServicePrincipal("eks-fargate-pods.amazonaws.com"), ), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name(managed_policy_name="AmazonEKSClusterPolicy"), iam.ManagedPolicy.from_aws_managed_policy_name(managed_policy_name="AmazonEKSServicePolicy"), iam.ManagedPolicy.from_aws_managed_policy_name(managed_policy_name="AmazonEKSVPCResourceController"), ], inline_policies={ "Extras": iam.PolicyDocument( statements=[ iam.PolicyStatement( actions=[ "elasticloadbalancing:*", "ec2:CreateSecurityGroup", "ec2:Describe*", "cloudwatch:PutMetricData", "iam:ListAttachedRolePolicies", ], resources=["*"], ), # FIXME can this be moved to a service role and only be allowed to access the # team key after chamcca@ changes iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "kms:CreateGrant", "kms:ListGrants", "kms:RevokeGrant", "kms:DescribeKey", "kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt*", "kms:GenerateDataKey*", "kms:DescribeKey", ], resources=["*"], ), ] ) }, ) return role
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) # security group self.webapp_ec2_security_grp = ec2.SecurityGroup( self, "healthlake_webapp_ec2_security_grp", vpc=vpc, description="security group ec2 hosting ec2", allow_all_outbound=True, ) code_server_role = iam.Role( self, "CodeServerRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("ec2.amazonaws.com")), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AdministratorAccess") ]) # Open port 22, 80, and 443 self.webapp_ec2_security_grp.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), "ssh") self.webapp_ec2_security_grp.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80), "http") self.webapp_ec2_security_grp.add_ingress_rule( ec2.Peer.any_ipv4(), ec2.Port.tcp(443), "https", ) core.Tags.of(self.webapp_ec2_security_grp).add( "Name", "webapp_ec2_security_grp") ## EC2 instance to host the webapp self.webAppInstance = ec2.Instance( self, "healthlake-knowledge-webapp-ec2", instance_type=ec2.InstanceType(EC2_INSTANCE_TYPE), machine_image=ec2.AmazonLinuxImage( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2), role=code_server_role, vpc=vpc, vpc_subnets={"subnet_type": ec2.SubnetType.PUBLIC}, key_name=KEY_PAIR_NAME, security_group=self.webapp_ec2_security_grp)
def __init__(self, scope: core.Construct, id: str, TargetS3="default", **kwargs): super().__init__(scope, id, **kwargs) self.function_list = {} self.lambda_compute_role = _iam.Role( self, 'lambda_compute_role', assumed_by=_iam.CompositePrincipal( _iam.ServicePrincipal('lambda.amazonaws.com'), ), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchLogsFullAccess") ]) TargetS3.grant_read_write(self.lambda_compute_role) self.Get_Job_List = _lambda.Function( self, 'Get_Job_List', runtime=_lambda.Runtime.PYTHON_3_7, handler='get_job_list.handler', code=_lambda.Code.asset('workshop/lambda/get_job_list'), # environment = { # 'BUCKET': "", # 'KEY': "", # }, timeout=core.Duration.seconds(15), role=self.lambda_compute_role) self.function_list["Get_Job_List"] = self.Get_Job_List self.Get_Output_size = _lambda.Function( self, 'Get_Output_size', runtime=_lambda.Runtime.PYTHON_3_7, handler='get_output_size.handler', code=_lambda.Code.asset('workshop/lambda/get_output_size'), timeout=core.Duration.seconds(15), role=self.lambda_compute_role) self.function_list["Get_Output_size"] = self.Get_Output_size
def _build_kms_key_for_env(self) -> None: administrator_arns: List[str] = [ ] # A place to add other admins if needed for KMS admin_principals = iam.CompositePrincipal( *[iam.ArnPrincipal(arn) for arn in administrator_arns], iam.ArnPrincipal(f"arn:aws:iam::{self.context.account_id}:root"), ) self.env_kms_key: kms.Key = kms.Key( self, id="kms-key", removal_policy=core.RemovalPolicy.RETAIN, enabled=True, enable_key_rotation=True, policy=iam.PolicyDocument(statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["kms:*"], resources=["*"], principals=[admin_principals]) ]), )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.PREFIX = id ## **************** Create HealthLake Knowledge Analyzer Service Role **************** self.service_role = aws_iam.Role( self, f'{self.PREFIX}-ServiceRole', assumed_by=aws_iam.CompositePrincipal( aws_iam.ServicePrincipal('sns.amazonaws.com'), aws_iam.ServicePrincipal('sqs.amazonaws.com'), aws_iam.ServicePrincipal('lambda.amazonaws.com'), aws_iam.ServicePrincipal('rds.amazonaws.com'), aws_iam.ServicePrincipal('healthlake.amazonaws.com'), aws_iam.ServicePrincipal('ec2.amazonaws.com'), aws_iam.ServicePrincipal('kendra.amazonaws.com'), aws_iam.ServicePrincipal('sagemaker.amazonaws.com'), ), role_name=f"{self.PREFIX}-ServiceRole", ) self.updateServiceRolePermissions()
def __init__(self, scope: core.Construct, id: str, UserName="******", StateMachine="default", **kwargs): super().__init__(scope, id, **kwargs) self.taskdefine = {} self.lambda_compute_role = _iam.Role( self, 'lambda_compute_role', assumed_by=_iam.CompositePrincipal( _iam.ServicePrincipal('lambda.amazonaws.com'), ), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchLogsFullAccess") ]) StateMachine.grant_start_execution(self.lambda_compute_role) self.Call_Stepfunctions = _lambda.Function( self, 'Call_Stepfunctions', runtime=_lambda.Runtime.PYTHON_3_7, handler='call_stepfunctions.handler', code=_lambda.Code.asset('workshop/lambda/call_stepfunctions'), timeout=core.Duration.seconds(15), environment={'StatemachineArn': StateMachine.state_machine_arn}, role=self.lambda_compute_role) self.APIGW = _apigateway.LambdaRestApi( self, "Apigateway-" + UserName, endpoint_types=[_apigateway.EndpointType.REGIONAL], handler=self.Call_Stepfunctions) self.taskdefine["SubmitForm"] = self.APIGW
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) ################################################################################ # Set up permissions ro_buckets = set() for bucket in props['ro_buckets']: tmp_bucket = s3.Bucket.from_bucket_name(self, bucket, bucket_name=bucket) ro_buckets.add(tmp_bucket) rw_buckets = set() for bucket in props['rw_buckets']: tmp_bucket = s3.Bucket.from_bucket_name(self, bucket, bucket_name=bucket) rw_buckets.add(tmp_bucket) batch_service_role = iam.Role( self, 'BatchServiceRole', assumed_by=iam.ServicePrincipal('batch.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSBatchServiceRole') ]) spotfleet_role = iam.Role( self, 'AmazonEC2SpotFleetRole', assumed_by=iam.ServicePrincipal('spotfleet.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonEC2SpotFleetTaggingRole') ]) # Create role for Batch instances batch_instance_role = iam.Role( self, 'BatchInstanceRole', role_name='RnasumBatchInstanceRole', assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('ec2.amazonaws.com'), iam.ServicePrincipal('ecs.amazonaws.com')), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonEC2RoleforSSM'), iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonEC2ContainerServiceforEC2Role') ]) batch_instance_role.add_to_policy( iam.PolicyStatement(actions=[ "ec2:Describe*", "ec2:AttachVolume", "ec2:CreateVolume", "ec2:CreateTags", "ec2:ModifyInstanceAttribute" ], resources=["*"])) batch_instance_role.add_to_policy( iam.PolicyStatement(actions=["ecs:ListClusters"], resources=["*"])) for bucket in ro_buckets: bucket.grant_read(batch_instance_role) for bucket in rw_buckets: # TODO: restirct write to paths with */rnasum/* bucket.grant_read_write(batch_instance_role) # Turn the instance role into a Instance Profile batch_instance_profile = iam.CfnInstanceProfile( self, 'BatchInstanceProfile', instance_profile_name='RnasumBatchInstanceProfile', roles=[batch_instance_role.role_name]) ################################################################################ # Minimal networking # TODO: import resource created with TF vpc = props['vpc'] ################################################################################ # Setup Batch compute resources # Configure BlockDevice to expand instance disk space (if needed?) block_device_mappings = [{ 'deviceName': '/dev/xvdf', 'ebs': { 'deleteOnTermination': True, 'volumeSize': 1024, 'volumeType': 'gp2' } }] launch_template = ec2.CfnLaunchTemplate( self, 'RnasumBatchComputeLaunchTemplate', launch_template_name='RnasumBatchComputeLaunchTemplate', launch_template_data={ # 'userData': core.Fn.base64(user_data_script), FIXME may not need this for RNAsum case? see job_definition below 'blockDeviceMappings': block_device_mappings }) launch_template_spec = batch.LaunchTemplateSpecification( launch_template_name=launch_template.launch_template_name, version='$Latest') my_compute_res = batch.ComputeResources( type=batch.ComputeResourceType.SPOT, allocation_strategy=batch.AllocationStrategy.BEST_FIT_PROGRESSIVE, desiredv_cpus=0, maxv_cpus=80, minv_cpus=0, image=ec2.MachineImage.generic_linux( ami_map={'ap-southeast-2': props['compute_env_ami']}), launch_template=launch_template_spec, spot_fleet_role=spotfleet_role, instance_role=batch_instance_profile.instance_profile_name, vpc=vpc, #compute_resources_tags=core.Tag('Creator', 'Batch') ) # XXX: How to add more than one tag above?? # core.Tag.add(my_compute_res, 'Foo', 'Bar') my_compute_env = batch.ComputeEnvironment( self, 'RnasumBatchComputeEnv', compute_environment_name="RnasumBatchComputeEnv", service_role=batch_service_role, compute_resources=my_compute_res) job_queue = batch.JobQueue(self, 'RnasumJobQueue', job_queue_name='rnasum_job_queue', compute_environments=[ batch.JobQueueComputeEnvironment( compute_environment=my_compute_env, order=1) ], priority=10) # it is equivalent of # https://github.com/umccr/infrastructure/blob/master/terraform/stacks/wts_report/jobs/wts_report.json default_container_props = { 'image': props['container_image'], 'vcpus': 2, 'memory': 2048, 'command': ['/opt/container/WTS-report-wrapper.sh', 'Ref::vcpus'], 'volumes': [{ 'host': { 'sourcePath': '/mnt' }, 'name': 'work' }, { 'host': { 'sourcePath': '/opt/container' }, 'name': 'container' }], 'mountPoints': [{ 'containerPath': '/work', 'readOnly': False, 'sourceVolume': 'work' }, { 'containerPath': '/opt/container', 'readOnly': True, 'sourceVolume': 'container' }], 'readonlyRootFilesystem': False, 'privileged': True, 'ulimits': [] } # and CDK equivalent of # https://github.com/umccr/infrastructure/blob/master/terraform/stacks/wts_report/main.tf#L113 job_definition = batch.CfnJobDefinition( self, 'RnasumJobDefinition', job_definition_name='rnasum_job_dev', type='container', container_properties=default_container_props, parameters={ 'vcpus': 1, }) ################################################################################ # Set up job submission Lambda lambda_role = iam.Role( self, 'RnasumLambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSBatchFullAccess') # TODO: restrict! ]) for bucket in ro_buckets: bucket.grant_read(lambda_role) for bucket in rw_buckets: bucket.grant_read(lambda_role) # TODO: support dev/prod split, i.e. image being configurable on dev, but fixed on prod # may need a default JobDefinition to be set up # and CDK equivalent of # https://github.com/umccr/infrastructure/blob/master/terraform/stacks/wts_report/main.tf#L159 lmbda.Function(self, 'RnasumLambda', function_name='rnasum_batch_lambda', handler='trigger_wts_report.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas/'), environment={ 'JOBNAME_PREFIX': "rnasum_", 'JOBQUEUE': job_queue.job_queue_name, 'JOBDEF': job_definition.job_definition_name, 'REFDATA_BUCKET': props['refdata_bucket'], 'DATA_BUCKET': props['data_bucket'], 'JOB_MEM': '32000', 'JOB_VCPUS': '8', 'REF_DATASET': 'PANCAN', 'GENOME_BUILD': '38', }, role=lambda_role)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) eks_vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/16") self.node.apply_aspect( core.Tag("kubernetes.io/cluster/cluster", "shared")) eks_vpc.private_subnets[0].node.apply_aspect( core.Tag("kubernetes.io/role/internal-elb", "1")) eks_vpc.private_subnets[1].node.apply_aspect( core.Tag("kubernetes.io/role/internal-elb", "1")) eks_vpc.public_subnets[0].node.apply_aspect( core.Tag("kubernetes.io/role/elb", "1")) eks_vpc.public_subnets[1].node.apply_aspect( core.Tag("kubernetes.io/role/elb", "1")) # Create IAM Role For CodeBuild and Cloud9 codebuild_role = iam.Role( self, "BuildRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("codebuild.amazonaws.com"), iam.ServicePrincipal("ec2.amazonaws.com")), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AdministratorAccess") ]) instance_profile = iam.CfnInstanceProfile( self, "InstanceProfile", roles=[codebuild_role.role_name]) # Create CodeBuild PipelineProject build_project = codebuild.PipelineProject( self, "BuildProject", role=codebuild_role, build_spec=codebuild.BuildSpec.from_source_filename( "buildspec.yml")) # Create CodePipeline pipeline = codepipeline.Pipeline( self, "Pipeline", ) # Create Artifact artifact = codepipeline.Artifact() # S3 Source Bucket source_bucket = s3.Bucket.from_bucket_attributes( self, "SourceBucket", bucket_arn=core.Fn.join( "", ["arn:aws:s3:::ee-assets-prod-", core.Fn.ref("AWS::Region")])) # Add Source Stage pipeline.add_stage( stage_name="Source", actions=[ codepipeline_actions.S3SourceAction( action_name="S3SourceRepo", bucket=source_bucket, bucket_key= "modules/2cae1f20008d4fc5aaef294602649b98/v9/source.zip", output=artifact, trigger=codepipeline_actions.S3Trigger.NONE) ]) # Add CodeBuild Stage pipeline.add_stage( stage_name="Deploy", actions=[ codepipeline_actions.CodeBuildAction( action_name="CodeBuildProject", project=build_project, type=codepipeline_actions.CodeBuildActionType.BUILD, input=artifact, environment_variables={ 'PublicSubnet1ID': codebuild.BuildEnvironmentVariable( value=eks_vpc.public_subnets[0].subnet_id), 'PublicSubnet2ID': codebuild.BuildEnvironmentVariable( value=eks_vpc.public_subnets[1].subnet_id), 'PrivateSubnet1ID': codebuild.BuildEnvironmentVariable( value=eks_vpc.private_subnets[0].subnet_id), 'PrivateSubnet2ID': codebuild.BuildEnvironmentVariable( value=eks_vpc.private_subnets[1].subnet_id), 'AWS_DEFAULT_REGION': codebuild.BuildEnvironmentVariable(value=self.region), 'INSTANCEPROFILEID': codebuild.BuildEnvironmentVariable( value=instance_profile.ref), 'AWS_ACCOUNT_ID': codebuild.BuildEnvironmentVariable(value=self.account) }) ]) cloud9_stack = cloudformation.CfnStack( self, "Cloud9Stack", # template_url="https://aws-quickstart.s3.amazonaws.com/quickstart-cloud9-ide/templates/cloud9-ide-instance.yaml", template_url= "https://ee-assets-prod-us-east-1.s3.amazonaws.com/modules/2cae1f20008d4fc5aaef294602649b98/v9/cloud9-ide-instance.yaml", parameters={ "C9InstanceType": "m5.large", "C9Subnet": eks_vpc.public_subnets[0].subnet_id }) pipeline.node.add_dependency(eks_vpc) pipeline.node.add_dependency(cloud9_stack)
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) ################################################################################ # Set up permissions ro_buckets = set() for bucket in props['ro_buckets']: tmp_bucket = s3.Bucket.from_bucket_name(self, bucket, bucket_name=bucket) ro_buckets.add(tmp_bucket) rw_buckets = set() for bucket in props['rw_buckets']: tmp_bucket = s3.Bucket.from_bucket_name(self, bucket, bucket_name=bucket) rw_buckets.add(tmp_bucket) batch_service_role = iam.Role( self, 'BatchServiceRole', assumed_by=iam.ServicePrincipal('batch.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSBatchServiceRole') ]) spotfleet_role = iam.Role( self, 'AmazonEC2SpotFleetRole', assumed_by=iam.ServicePrincipal('spotfleet.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonEC2SpotFleetTaggingRole') ]) # Create role for Batch instances batch_instance_role = iam.Role( self, 'BatchInstanceRole', role_name='UmccriseBatchInstanceRole', assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('ec2.amazonaws.com'), iam.ServicePrincipal('ecs.amazonaws.com')), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonEC2RoleforSSM'), iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonEC2ContainerServiceforEC2Role') ]) batch_instance_role.add_to_policy( iam.PolicyStatement(actions=[ "ec2:Describe*", "ec2:AttachVolume", "ec2:CreateVolume", "ec2:CreateTags", "ec2:ModifyInstanceAttribute" ], resources=["*"])) batch_instance_role.add_to_policy( iam.PolicyStatement(actions=["ecs:ListClusters"], resources=["*"])) for bucket in ro_buckets: bucket.grant_read(batch_instance_role) for bucket in rw_buckets: # restirct write to paths with */umccrise/* bucket.grant_read_write(batch_instance_role, '*/umccrised/*') # Turn the instance role into a Instance Profile batch_instance_profile = iam.CfnInstanceProfile( self, 'BatchInstanceProfile', instance_profile_name='UmccriseBatchInstanceProfile', roles=[batch_instance_role.role_name]) ################################################################################ # Minimal networking # TODO: import resource created with TF vpc = props['vpc'] ################################################################################ # Setup Batch compute resources # Configure BlockDevice to expand instance disk space (if needed?) block_device_mappings = [{ 'deviceName': '/dev/xvdf', 'ebs': { 'deleteOnTermination': True, 'volumeSize': 1024, 'volumeType': 'gp2' } }] launch_template = ec2.CfnLaunchTemplate( self, 'UmccriseBatchComputeLaunchTemplate', launch_template_name='UmccriseBatchComputeLaunchTemplate', launch_template_data={ 'userData': core.Fn.base64(user_data_script), 'blockDeviceMappings': block_device_mappings }) launch_template_spec = batch.LaunchTemplateSpecification( launch_template_name=launch_template.launch_template_name, version='$Latest') my_compute_res = batch.ComputeResources( type=batch.ComputeResourceType.SPOT, allocation_strategy=batch.AllocationStrategy.BEST_FIT_PROGRESSIVE, desiredv_cpus=0, maxv_cpus=128, minv_cpus=0, image=ec2.MachineImage.generic_linux( ami_map={'ap-southeast-2': props['compute_env_ami']}), launch_template=launch_template_spec, spot_fleet_role=spotfleet_role, instance_role=batch_instance_profile.instance_profile_name, vpc=vpc, #compute_resources_tags=core.Tag('Creator', 'Batch') ) # XXX: How to add more than one tag above?? # core.Tag.add(my_compute_res, 'Foo', 'Bar') my_compute_env = batch.ComputeEnvironment( self, 'UmccriseBatchComputeEnv', compute_environment_name="cdk-umccrise-batch-compute-env", service_role=batch_service_role, compute_resources=my_compute_res) job_queue = batch.JobQueue(self, 'UmccriseJobQueue', job_queue_name='cdk-umccrise_job_queue', compute_environments=[ batch.JobQueueComputeEnvironment( compute_environment=my_compute_env, order=1) ], priority=10) job_container = batch.JobDefinitionContainer( image=ecs.ContainerImage.from_registry( name=props['container_image']), vcpus=2, memory_limit_mib=2048, command=["/opt/container/umccrise-wrapper.sh", "Ref::vcpus"], mount_points=[ ecs.MountPoint(container_path='/work', read_only=False, source_volume='work'), ecs.MountPoint(container_path='/opt/container', read_only=True, source_volume='container') ], volumes=[ ecs.Volume(name='container', host=ecs.Host(source_path='/opt/container')), ecs.Volume(name='work', host=ecs.Host(source_path='/mnt')) ], privileged=True) job_definition = batch.JobDefinition( self, 'UmccriseJobDefinition', job_definition_name='cdk-umccrise-job-definition', parameters={'vcpus': '1'}, container=job_container, timeout=core.Duration.hours(5)) ################################################################################ # Set up job submission Lambda lambda_role = iam.Role( self, 'UmccriseLambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSBatchFullAccess') # TODO: restrict! ]) for bucket in ro_buckets: bucket.grant_read(lambda_role) for bucket in rw_buckets: bucket.grant_read(lambda_role) # TODO: support dev/prod split, i.e. image being configurable on dev, but fixed on prod # may need a default JobDefinition to be set up lmbda.Function(self, 'UmccriseLambda', function_name='umccrise_batch_lambda', handler='umccrise.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas/umccrise'), environment={ 'JOBNAME_PREFIX': "UMCCRISE_", 'JOBQUEUE': job_queue.job_queue_name, 'REFDATA_BUCKET': props['refdata_bucket'], 'DATA_BUCKET': props['data_bucket'], 'UMCCRISE_MEM': '50000', 'UMCCRISE_VCPUS': '16' }, role=lambda_role)
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) dirname = os.path.dirname(__file__) ecr_repo = ecr.Repository.from_repository_name( self, 'UmccriseEcrRepo', repository_name='umccrise' ) ################################################################################ # Set up permissions ro_buckets = set() for bucket in props['ro_buckets']: tmp_bucket = s3.Bucket.from_bucket_name( self, bucket, bucket_name=bucket ) ro_buckets.add(tmp_bucket) rw_buckets = set() for bucket in props['rw_buckets']: tmp_bucket = s3.Bucket.from_bucket_name( self, bucket, bucket_name=bucket ) rw_buckets.add(tmp_bucket) batch_service_role = iam.Role( self, 'BatchServiceRole', assumed_by=iam.ServicePrincipal('batch.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSBatchServiceRole') ] ) spotfleet_role = iam.Role( self, 'AmazonEC2SpotFleetRole', assumed_by=iam.ServicePrincipal('spotfleet.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2SpotFleetTaggingRole') ] ) # Create role for Batch instances batch_instance_role = iam.Role( self, 'BatchInstanceRole', role_name='UmccriseBatchInstanceRole', assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('ec2.amazonaws.com'), iam.ServicePrincipal('ecs.amazonaws.com') ), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2RoleforSSM'), iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonEC2ContainerServiceforEC2Role') ] ) batch_instance_role.add_to_policy( iam.PolicyStatement( actions=[ "ec2:Describe*", "ec2:AttachVolume", "ec2:CreateVolume", "ec2:CreateTags", "ec2:ModifyInstanceAttribute" ], resources=["*"] ) ) batch_instance_role.add_to_policy( iam.PolicyStatement( actions=[ "ecs:ListClusters" ], resources=["*"] ) ) for bucket in ro_buckets: bucket.grant_read(batch_instance_role) for bucket in rw_buckets: # restirct write to paths with */umccrise/* bucket.grant_read_write(batch_instance_role, '*/umccrised/*') # Turn the instance role into a Instance Profile batch_instance_profile = iam.CfnInstanceProfile( self, 'BatchInstanceProfile', instance_profile_name='UmccriseBatchInstanceProfile', roles=[batch_instance_role.role_name] ) ################################################################################ # Network # (Import common infrastructure (maintained via TerraForm) # VPC vpc = ec2.Vpc.from_lookup( self, 'UmccrMainVpc', tags={'Name': 'main-vpc', 'Stack': 'networking'} ) batch_security_group = ec2.SecurityGroup( self, "BatchSecurityGroup", vpc=vpc, description="Allow all outbound, no inbound traffic" ) ################################################################################ # Setup Batch compute resources # Configure BlockDevice to expand instance disk space (if needed?) block_device_mappings = [ { 'deviceName': '/dev/xvdf', 'ebs': { 'deleteOnTermination': True, 'encrypted': True, 'volumeSize': 2048, 'volumeType': 'gp2' } } ] # Set up custom user data to configure the Batch instances umccrise_wrapper_asset = assets.Asset( self, 'UmccriseWrapperAsset', path=os.path.join(dirname, '..', 'assets', "umccrise-wrapper.sh") ) umccrise_wrapper_asset.grant_read(batch_instance_role) user_data_asset = assets.Asset( self, 'UserDataAsset', path=os.path.join(dirname, '..', 'assets', "batch-user-data.sh") ) user_data_asset.grant_read(batch_instance_role) user_data = ec2.UserData.for_linux() local_path = user_data.add_s3_download_command( bucket=user_data_asset.bucket, bucket_key=user_data_asset.s3_object_key ) user_data.add_execute_file_command( file_path=local_path, arguments=f"s3://{umccrise_wrapper_asset.bucket.bucket_name}/{umccrise_wrapper_asset.s3_object_key}" ) # Generate user data wrapper to comply with LaunchTemplate required MIME multi-part archive format for user data mime_wrapper = ec2.UserData.custom('MIME-Version: 1.0') mime_wrapper.add_commands('Content-Type: multipart/mixed; boundary="==MYBOUNDARY=="') mime_wrapper.add_commands('') mime_wrapper.add_commands('--==MYBOUNDARY==') mime_wrapper.add_commands('Content-Type: text/x-shellscript; charset="us-ascii"') mime_wrapper.add_commands('') # install AWS CLI, as it's unexpectedly missing from the AWS Linux 2 AMI... mime_wrapper.add_commands('yum -y install unzip') mime_wrapper.add_commands('cd /opt') mime_wrapper.add_commands('curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"') mime_wrapper.add_commands('unzip awscliv2.zip') mime_wrapper.add_commands('sudo ./aws/install --bin-dir /usr/bin') # insert our actual user data payload mime_wrapper.add_commands(user_data.render()) mime_wrapper.add_commands('--==MYBOUNDARY==--') launch_template = ec2.CfnLaunchTemplate( self, 'UmccriseBatchComputeLaunchTemplate', launch_template_name='UmccriseBatchComputeLaunchTemplate', launch_template_data={ 'userData': core.Fn.base64(mime_wrapper.render()), 'blockDeviceMappings': block_device_mappings } ) launch_template_spec = batch.LaunchTemplateSpecification( launch_template_name=launch_template.launch_template_name, version='$Latest' ) my_compute_res = batch.ComputeResources( type=(batch.ComputeResourceType.SPOT if props['compute_env_type'].lower() == 'spot' else batch.ComputeResourceType.ON_DEMAND), allocation_strategy=batch.AllocationStrategy.BEST_FIT_PROGRESSIVE, desiredv_cpus=0, maxv_cpus=320, minv_cpus=0, image=ec2.MachineImage.generic_linux(ami_map={'ap-southeast-2': props['compute_env_ami']}), launch_template=launch_template_spec, spot_fleet_role=spotfleet_role, instance_role=batch_instance_profile.instance_profile_name, vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE, # availability_zones=["ap-southeast-2a"] ), security_groups=[batch_security_group] # compute_resources_tags=core.Tag('Creator', 'Batch') ) # XXX: How to add more than one tag above?? # https://github.com/aws/aws-cdk/issues/7350 # core.Tag.add(my_compute_res, 'Foo', 'Bar') my_compute_env = batch.ComputeEnvironment( self, 'UmccriseBatchComputeEnv', compute_environment_name="cdk-umccr_ise-batch-compute-env", service_role=batch_service_role, compute_resources=my_compute_res ) # child = my_compute_env.node.default_child # child_comp_res = child.compute_resources # child_comp_res.tags = "{'Foo': 'Bar'}" job_queue = batch.JobQueue( self, 'UmccriseJobQueue', job_queue_name='cdk-umccrise_job_queue', compute_environments=[ batch.JobQueueComputeEnvironment( compute_environment=my_compute_env, order=1 ) ], priority=10 ) job_container = batch.JobDefinitionContainer( image=ecs.ContainerImage.from_registry(name=props['container_image']), vcpus=32, memory_limit_mib=100000, command=[ "/opt/container/umccrise-wrapper.sh", "Ref::vcpus" ], mount_points=[ ecs.MountPoint( container_path='/work', read_only=False, source_volume='work' ), ecs.MountPoint( container_path='/opt/container', read_only=True, source_volume='container' ) ], volumes=[ ecs.Volume( name='container', host=ecs.Host( source_path='/opt/container' ) ), ecs.Volume( name='work', host=ecs.Host( source_path='/mnt' ) ) ], privileged=True ) job_definition = batch.JobDefinition( self, 'UmccriseJobDefinition', job_definition_name='cdk-umccrise-job-definition', parameters={'vcpus': '1'}, container=job_container, timeout=core.Duration.hours(5) ) ################################################################################ # Set up job submission Lambda lambda_role = iam.Role( self, 'UmccriseLambdaRole', assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole'), iam.ManagedPolicy.from_aws_managed_policy_name('AWSBatchFullAccess') # TODO: restrict! ] ) for bucket in ro_buckets: bucket.grant_read(lambda_role) for bucket in rw_buckets: bucket.grant_read(lambda_role) ecr_repo.grant(lambda_role, 'ecr:ListImages') # TODO: support dev/prod split, i.e. image being configurable on dev, but fixed on prod # may need a default JobDefinition to be set up lmbda.Function( self, 'UmccriseLambda', function_name='umccrise_batch_lambda', handler='umccrise.lambda_handler', runtime=lmbda.Runtime.PYTHON_3_7, code=lmbda.Code.from_asset('lambdas/umccrise'), environment={ 'JOBNAME_PREFIX': "UMCCRISE_", 'JOBQUEUE': job_queue.job_queue_name, 'UMCCRISE_MEM': '100000', 'UMCCRISE_VCPUS': '32', 'JOBDEF': job_definition.job_definition_name, 'REFDATA_BUCKET': props['refdata_bucket'], 'INPUT_BUCKET': props['input_bucket'], 'RESULT_BUCKET': props['result_bucket'], 'IMAGE_CONFIGURABLE': props['image_configurable'] }, role=lambda_role )
stack = core.Stack(app, 'CrossAccountCodeCommitResourcesStack', env=core.Environment( account=DEPLOYMENT_ACCOUNT, region=DEPLOYMENT_REGION )) repository = codecommit.Repository.from_repository_name( stack, 'CodeCommitRepository', CODECOMMIT_REPOSITORY) pipeline_artifacts_bucket = s3.Bucket.from_bucket_name( stack, 'PipelineArtifactsBucket', PIPELINE_ARTIFACTS_BUCKET) pipeline_artifacts_key = kms.Key.from_key_arn( stack, 'PipelineArtifactsKey', PIPELINE_ARTIFACTS_KEY) cross_account_codecommit_role = iam.Role( stack, 'CrossAccountCodeCommitRole', assumed_by=iam.CompositePrincipal(*[iam.AccountPrincipal(account) for account in TRUSTED_ACCOUNTS]), inline_policies={ 'codepipeline-policy': iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'codecommit:BatchGet*', 'codecommit:BatchDescribe*', 'codecommit:Describe*', 'codecommit:EvaluatePullRequestApprovalRules', 'codecommit:Get*', 'codecommit:List*', 'codecommit:GitPull', 'codecommit:UploadArchive' ], resources=[repository.repository_arn]
def __init__(self, scope: core.Construct, id: str, CurrentVPC="default", TargetS3="default", UserName="******", **kwargs): super().__init__(scope, id, **kwargs) self.job_queue = {} # batch service role self.batch_service_role = _iam.Role( self, 'BatchServiceRole', assumed_by=_iam.ServicePrincipal('batch.amazonaws.com'), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSBatchServiceRole') ]) # ec2 role with policy that allow to get object from s3 bucket for batch computing self.batch_compute_role = _iam.Role( self, 'BatchComputeRole', assumed_by=_iam.CompositePrincipal( _iam.ServicePrincipal('ec2.amazonaws.com'), _iam.ServicePrincipal('ecs.amazonaws.com')), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonEC2RoleforSSM'), _iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonEC2ContainerServiceforEC2Role"), _iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchLogsFullAccess") ]) TargetS3.grant_read_write(self.batch_compute_role) self.batch_compute_instance_profile = _iam.CfnInstanceProfile( self, 'BatchInstanceProfile' + UserName, instance_profile_name='BatchInstanceProfile-' + UserName, roles=[self.batch_compute_role.role_name]) self.ComputeENV = _batch.ComputeEnvironment( self, "ComputeENV", service_role=self.batch_service_role, compute_resources={ "vpc": CurrentVPC, "instance_types": [_ec2.InstanceType("c5"), _ec2.InstanceType("m5")], "maxv_cpus": 128, "minv_cpus": 0, "type": _batch.ComputeResourceType.SPOT, "allocation_strategy": _batch.AllocationStrategy.BEST_FIT_PROGRESSIVE, "instance_role": self.batch_compute_instance_profile.instance_profile_name }) self.ComputeQueue = _batch.JobQueue( self, "ComputeQueue", priority=1, compute_environments=[ _batch.JobQueueComputeEnvironment( compute_environment=self.ComputeENV, order=1) ]) self.job_queue["ComputeQueue"] = self.ComputeQueue
def __init__(self, scope: core.Construct, id: str, domain_prefix: str, other_account: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) user_pool = cognito.CfnUserPool( scope=self, id="user-pool", admin_create_user_config=cognito.CfnUserPool.AdminCreateUserConfigProperty( allow_admin_create_user_only=True, ), policies=cognito.CfnUserPool.PoliciesProperty( password_policy=cognito.CfnUserPool.PasswordPolicyProperty( minimum_length=20, )), username_attributes=["email"], auto_verified_attributes=["email"], ) cognito.CfnUserPoolDomain( scope=self, id="cognito-user-pool-domain", domain=f"{domain_prefix}-{core.Aws.ACCOUNT_ID}", user_pool_id=user_pool.ref, ) id_pool = cognito.CfnIdentityPool( scope=self, id="identity-pool", allow_unauthenticated_identities=False, cognito_identity_providers=[], ) auth_role = iam.Role( scope=self, id="auth-role", assumed_by=iam.FederatedPrincipal( federated="cognito-identity.amazonaws.com", conditions={ "StringEquals": {"cognito-identity.amazonaws.com:aud": id_pool.ref}, "ForAnyValue:StringLike": {"cognito-identity.amazonaws.com:amr": "authenticated"}, }, assume_role_action="sts:AssumeRoleWithWebIdentity"), ) es_role = iam.Role( scope=self, id="es-role", assumed_by=iam.ServicePrincipal('es.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AmazonESCognitoAccess" ) ], ) es_domain = elasticsearch.CfnDomain( scope=self, id="search-domain", elasticsearch_cluster_config=elasticsearch.CfnDomain.ElasticsearchClusterConfigProperty( instance_count=2, instance_type="m5.large.elasticsearch", ), node_to_node_encryption_options=elasticsearch.CfnDomain.NodeToNodeEncryptionOptionsProperty( enabled=True), encryption_at_rest_options=elasticsearch.CfnDomain.EncryptionAtRestOptionsProperty( enabled=True), ebs_options=elasticsearch.CfnDomain.EBSOptionsProperty( ebs_enabled=True, volume_size=20), elasticsearch_version="7.4", domain_name=domain_prefix, access_policies={ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "AWS": auth_role.role_arn }, "Action": [ "es:ESHttpGet", "es:ESHttpPut", "es:ESHttpPost", "es:ESHttpDelete" ], "Resource": "arn:aws:es:" + core.Aws.REGION + ":" + core.Aws.ACCOUNT_ID + ":domain/" + domain_prefix + "/*" }, ] }, ) es_domain.add_property_override( 'CognitoOptions.Enabled', True) es_domain.add_property_override( 'CognitoOptions.IdentityPoolId', id_pool.ref) es_domain.add_property_override( 'CognitoOptions.RoleArn', es_role.role_arn) es_domain.add_property_override( 'CognitoOptions.UserPoolId', user_pool.ref) cognito.CfnIdentityPoolRoleAttachment( scope=self, id='user-pool-role-attachment', identity_pool_id=id_pool.ref, roles={ 'authenticated': auth_role.role_arn } ) es_external_role = iam.Role( scope=self, id="logger-role", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("lambda.amazonaws.com"), iam.AccountPrincipal(other_account), ), description="role to use elastic search assumed by lambda", inline_policies={ "es_policy": iam.PolicyDocument(statements=[ iam.PolicyStatement( actions=[ "es:ESHttpPost", ], resources=[ es_domain.attr_arn + "/*", ], )]), }, ) core.CfnOutput( scope=self, id="es-host", value=es_domain.attr_domain_endpoint, ) core.CfnOutput( scope=self, id="es-region", value=core.Aws.REGION, ) core.CfnOutput( scope=self, id="es-external-role", value=es_external_role.role_arn, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create IAM Role For CodeBuild codebuild_role = iam.Role( self, "BuildRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("codebuild.amazonaws.com")), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AdministratorAccess") ]) instance_profile = iam.CfnInstanceProfile( self, "InstanceProfile", roles=[codebuild_role.role_name]) # Create CodeBuild PipelineProject build_project = codebuild.PipelineProject( self, "BuildProject", role=codebuild_role, build_spec=codebuild.BuildSpec.from_source_filename( "buildspec.yml"), environment=codebuild.BuildEnvironment( build_image=codebuild.LinuxBuildImage.STANDARD_5_0, compute_type=codebuild.ComputeType.LARGE)) # Create CodePipeline pipeline = codepipeline.Pipeline( self, "Pipeline", ) # Create Artifact artifact = codepipeline.Artifact() # S3 Source Bucket source_bucket = s3.Bucket.from_bucket_attributes( self, "SourceBucket", bucket_arn=core.Fn.join( "", ["arn:aws:s3:::ee-assets-prod-", core.Fn.ref("AWS::Region")])) # Add Source Stage pipeline.add_stage( stage_name="Source", actions=[ codepipeline_actions.S3SourceAction( action_name="S3SourceRepo", bucket=source_bucket, bucket_key= "modules/c52c7d8ba87d4217a2bf045037b58b5d/v1/source.zip", output=artifact, trigger=codepipeline_actions.S3Trigger.NONE) ]) # Add CodeBuild Stage pipeline.add_stage( stage_name="Deploy", actions=[ codepipeline_actions.CodeBuildAction( action_name="CodeBuildProject", project=build_project, type=codepipeline_actions.CodeBuildActionType.BUILD, input=artifact) ])
def __init__(self, scope: core.Construct, id: str, UserName="******", Repo="default", WebService="default", **kwargs): super().__init__(scope, id, **kwargs) self.My_CodeBuild_Role = _iam.Role( self, 'CodeBuildRole-Web-' + UserName, assumed_by=_iam.CompositePrincipal( _iam.ServicePrincipal('ec2.amazonaws.com'), _iam.ServicePrincipal('codebuild.amazonaws.com'))) for repo in Repo.getRepositoriesList(): Repo.getRepositories(repo).grant_pull_push(self.My_CodeBuild_Role) self.My_CodeCommit_Web = _codecommit.Repository( self, "CodeCommit-Web-" + UserName, repository_name="Workshop-Web-" + UserName, description="CodeCommit for Web Project,Owner:" + UserName) self.My_CodeBuild_Web = _codebuild.PipelineProject( self, "CodeBuild-Web-" + UserName, project_name="CodeBuild-Web" + UserName, role=self.My_CodeBuild_Role, environment=_codebuild.BuildEnvironment( build_image=_codebuild.LinuxBuildImage.STANDARD_2_0, privileged=True)) self.CodeCommit_Web_Source = _codepipeline.Artifact( "CodeCommit_Web_Source-" + UserName) self.EcsImage_Web_Source = _codepipeline.Artifact( 'EcsImage_Web_Source-' + UserName) self.FargateImage_Web_Source = _codepipeline.Artifact( 'FargateImage_Web_Source-' + UserName) self.My_CodePipeline_Web = _codepipeline.Pipeline( self, "CodePipeline-Web-" + UserName, stages=[ _codepipeline.StageProps( stage_name="Source", actions=[ _codepipeline_actions.CodeCommitSourceAction( action_name="CodeCommit_Web_Source", repository=self.My_CodeCommit_Web, branch="master", output=self.CodeCommit_Web_Source) ]), _codepipeline.StageProps( stage_name="Build", actions=[ _codepipeline_actions.CodeBuildAction( action_name="CodeCommit_Web_Build", project=self.My_CodeBuild_Web, input=self.CodeCommit_Web_Source, outputs=[self.FargateImage_Web_Source]) ]), _codepipeline.StageProps( stage_name="Deploy", actions=[ _codepipeline_actions.EcsDeployAction( action_name='CodeDeploy_Web_Deploy', service=WebService.getFargateService( "WebApplicationService"), input=self.FargateImage_Web_Source) ]) ]) core.CfnOutput(self, "CodeCommit For WebApplication", value=self.My_CodeCommit_Web.repository_clone_url_http)
def __init__(self, scope: cdk.Construct, construct_id: str, stack_log_level, vpc, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Create EKS Cluster Role # https://docs.aws.amazon.com/eks/latest/userguide/getting-started-console.html self._eks_cluster_svc_role = _iam.Role( self, "c_SvcRole", assumed_by=_iam.ServicePrincipal("eks.amazonaws.com"), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEKSClusterPolicy"), _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEKS_CNI_Policy"), _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEKSVPCResourceController") ]) self._eks_node_role = _iam.Role( self, "c_NodeRole", assumed_by=_iam.ServicePrincipal("ec2.amazonaws.com"), managed_policies=[ _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEKSWorkerNodePolicy"), _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEC2ContainerRegistryReadOnly"), _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEKS_CNI_Policy"), _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSSMManagedInstanceCore"), _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess"), _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess") # Yes, yes...I know. :) ]) c_admin_role = _iam.Role( self, "c_AdminRole", assumed_by=_iam.CompositePrincipal( _iam.AccountRootPrincipal(), _iam.ServicePrincipal("ec2.amazonaws.com"))) c_admin_role.add_to_policy( _iam.PolicyStatement(effect=_iam.Effect.ALLOW, actions=["eks:DescribeCluster"], resources=["*"])) # Create Security Group for EKS Cluster SG self.eks_cluster_sg = _ec2.SecurityGroup( self, "eksClusterSG", vpc=vpc, description="EKS Cluster security group", allow_all_outbound=True, ) cdk.Tags.of(self.eks_cluster_sg).add("Name", "eks_cluster_sg") # https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html self.eks_cluster_sg.add_ingress_rule( peer=self.eks_cluster_sg, connection=_ec2.Port.all_traffic(), description="Allow incoming within SG") clust_name = "c_1_event_processor" self.eks_cluster_1 = _eks.Cluster( self, f"{clust_name}", cluster_name=f"{clust_name}", version=_eks.KubernetesVersion.V1_18, vpc=vpc, vpc_subnets=[ _ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PUBLIC), _ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PRIVATE) ], default_capacity=0, masters_role=c_admin_role, role=self._eks_cluster_svc_role, security_group=self.eks_cluster_sg, endpoint_access=_eks.EndpointAccess.PUBLIC # endpoint_access=_eks.EndpointAccess.PUBLIC_AND_PRIVATE ) node_grp_1 = self.eks_cluster_1.add_nodegroup_capacity( f"n_g_{clust_name}", nodegroup_name=f"{clust_name}_n_g", instance_types=[ _ec2.InstanceType("t3.medium"), _ec2.InstanceType("t3.large"), ], disk_size=20, min_size=1, max_size=6, desired_size=2, labels={ "app": "miztiik_ng", "lifecycle": "on_demand", "compute_provider": "ec2" }, subnets=_ec2.SubnetSelection(subnet_type=_ec2.SubnetType.PUBLIC), ami_type=_eks.NodegroupAmiType.AL2_X86_64, # remote_access=_eks.NodegroupRemoteAccess(ssh_key_name="eks-ssh-keypair"), capacity_type=_eks.CapacityType.ON_DEMAND, node_role=self._eks_node_role # bootstrap_options={"kubelet_extra_args": "--node-labels=node.kubernetes.io/lifecycle=spot,daemonset=active,app=general --eviction-hard imagefs.available<15% --feature-gates=CSINodeInfo=true,CSIDriverRegistry=true,CSIBlockVolume=true,ExpandCSIVolumes=true"} ) # This code block will provision worker nodes with Fargate Profile configuration fargate_n_g_3 = self.eks_cluster_1.add_fargate_profile( "FargateEnabled", fargate_profile_name="miztiik_n_g_fargate", selectors=[ _eks.Selector(namespace="default", labels={"fargate": "enabled"}) ]) self.add_cluster_admin() # We like to use the Kubernetes Dashboard self.enable_dashboard() ########################################### ################# OUTPUTS ################# ########################################### output_0 = cdk.CfnOutput( self, "AutomationFrom", value=f"{GlobalArgs.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output_1 = cdk.CfnOutput(self, "eksClusterAdminRole", value=f"{c_admin_role.role_name}", description="EKS Cluster Admin Role") output_2 = cdk.CfnOutput( self, "eksClusterSvcRole", value=f"{self._eks_cluster_svc_role.role_name}", description="EKS Cluster Service Role")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) eks_vpc = ec2.Vpc( self, "VPC", cidr="10.0.0.0/16" ) # Create IAM Role For EC2 bastion instance to be able to manage the cluster bastion_role = iam.Role( self, "BastionRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("ec2.amazonaws.com"), iam.AccountRootPrincipal() ) ) self.bastion_role = bastion_role # Create EC2 Instance Profile for that Role instance_profile = iam.CfnInstanceProfile( self, "InstanceProfile", roles=[bastion_role.role_name] ) # Create SecurityGroup for the Control Plane ENIs eks_security_group = ec2.SecurityGroup( self, "EKSSecurityGroup", vpc=eks_vpc, allow_all_outbound=True ) eks_security_group.add_ingress_rule( ec2.Peer.ipv4('10.0.0.0/16'), ec2.Port.all_traffic() ) # Create an EKS Cluster eks_cluster = eks.Cluster( self, "cluster", vpc=eks_vpc, masters_role=bastion_role, default_capacity_type=eks.DefaultCapacityType.NODEGROUP, default_capacity_instance=ec2.InstanceType("m5.large"), default_capacity=2, security_group=eks_security_group, endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE, version=eks.KubernetesVersion.V1_18 ) # Deploy ALB Ingress Controller # Create the k8s Service account and corresponding IAM Role mapped via IRSA alb_service_account = eks_cluster.add_service_account( "aws-load-balancer-controller", name="aws-load-balancer-controller", namespace="kube-system" ) # Create the PolicyStatements to attach to the role # I couldn't find a way to get this to work with a PolicyDocument and there are 10 of these alb_policy_statement_json_1 = { "Effect": "Allow", "Action": [ "acm:DescribeCertificate", "acm:ListCertificates", "acm:GetCertificate" ], "Resource": "*" } alb_policy_statement_json_2 = { "Effect": "Allow", "Action": [ "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup", "ec2:CreateTags", "ec2:DeleteTags", "ec2:DeleteSecurityGroup", "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeInstances", "ec2:DescribeInstanceStatus", "ec2:DescribeInternetGateways", "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcs", "ec2:ModifyInstanceAttribute", "ec2:ModifyNetworkInterfaceAttribute", "ec2:RevokeSecurityGroupIngress" ], "Resource": "*" } alb_policy_statement_json_3 = { "Effect": "Allow", "Action": [ "elasticloadbalancing:AddListenerCertificates", "elasticloadbalancing:AddTags", "elasticloadbalancing:CreateListener", "elasticloadbalancing:CreateLoadBalancer", "elasticloadbalancing:CreateRule", "elasticloadbalancing:CreateTargetGroup", "elasticloadbalancing:DeleteListener", "elasticloadbalancing:DeleteLoadBalancer", "elasticloadbalancing:DeleteRule", "elasticloadbalancing:DeleteTargetGroup", "elasticloadbalancing:DeregisterTargets", "elasticloadbalancing:DescribeListenerCertificates", "elasticloadbalancing:DescribeListeners", "elasticloadbalancing:DescribeLoadBalancers", "elasticloadbalancing:DescribeLoadBalancerAttributes", "elasticloadbalancing:DescribeRules", "elasticloadbalancing:DescribeSSLPolicies", "elasticloadbalancing:DescribeTags", "elasticloadbalancing:DescribeTargetGroups", "elasticloadbalancing:DescribeTargetGroupAttributes", "elasticloadbalancing:DescribeTargetHealth", "elasticloadbalancing:ModifyListener", "elasticloadbalancing:ModifyLoadBalancerAttributes", "elasticloadbalancing:ModifyRule", "elasticloadbalancing:ModifyTargetGroup", "elasticloadbalancing:ModifyTargetGroupAttributes", "elasticloadbalancing:RegisterTargets", "elasticloadbalancing:RemoveListenerCertificates", "elasticloadbalancing:RemoveTags", "elasticloadbalancing:SetIpAddressType", "elasticloadbalancing:SetSecurityGroups", "elasticloadbalancing:SetSubnets", "elasticloadbalancing:SetWebAcl" ], "Resource": "*" } alb_policy_statement_json_4 = { "Effect": "Allow", "Action": [ "iam:CreateServiceLinkedRole", "iam:GetServerCertificate", "iam:ListServerCertificates" ], "Resource": "*" } alb_policy_statement_json_5 = { "Effect": "Allow", "Action": [ "cognito-idp:DescribeUserPoolClient" ], "Resource": "*" } alb_policy_statement_json_6 = { "Effect": "Allow", "Action": [ "waf-regional:GetWebACLForResource", "waf-regional:GetWebACL", "waf-regional:AssociateWebACL", "waf-regional:DisassociateWebACL" ], "Resource": "*" } alb_policy_statement_json_7 = { "Effect": "Allow", "Action": [ "tag:GetResources", "tag:TagResources" ], "Resource": "*" } alb_policy_statement_json_8 = { "Effect": "Allow", "Action": [ "waf:GetWebACL" ], "Resource": "*" } alb_policy_statement_json_9 = { "Effect": "Allow", "Action": [ "wafv2:GetWebACL", "wafv2:GetWebACLForResource", "wafv2:AssociateWebACL", "wafv2:DisassociateWebACL" ], "Resource": "*" } alb_policy_statement_json_10 = { "Effect": "Allow", "Action": [ "shield:DescribeProtection", "shield:GetSubscriptionState", "shield:DeleteProtection", "shield:CreateProtection", "shield:DescribeSubscription", "shield:ListProtections" ], "Resource": "*" } # Attach the necessary permissions alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_1)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_2)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_3)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_4)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_5)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_6)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_7)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_8)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_9)) alb_service_account.add_to_policy(iam.PolicyStatement.from_json(alb_policy_statement_json_10)) # Deploy the ALB Ingress Controller from the Helm chart eks_cluster.add_helm_chart( "aws-load-balancer-controller", chart="aws-load-balancer-controller", repository="https://aws.github.io/eks-charts", namespace="kube-system", values={ "clusterName": eks_cluster.cluster_name, "region": self.region, "vpcId": eks_vpc.vpc_id, "serviceAccount": { "create": False, "name": "aws-load-balancer-controller" } } ) # Deploy External DNS Controller # Create the k8s Service account and corresponding IAM Role mapped via IRSA externaldns_service_account = eks_cluster.add_service_account( "external-dns", name="external-dns", namespace="kube-system" ) # Create the PolicyStatements to attach to the role externaldns_policy_statement_json_1 = { "Effect": "Allow", "Action": [ "route53:ChangeResourceRecordSets" ], "Resource": [ "arn:aws:route53:::hostedzone/*" ] } externaldns_policy_statement_json_2 = { "Effect": "Allow", "Action": [ "route53:ListHostedZones", "route53:ListResourceRecordSets" ], "Resource": [ "*" ] } # Add the policies to the service account externaldns_service_account.add_to_policy(iam.PolicyStatement.from_json(externaldns_policy_statement_json_1)) externaldns_service_account.add_to_policy(iam.PolicyStatement.from_json(externaldns_policy_statement_json_2)) # Deploy the Helm Chart eks_cluster.add_helm_chart( "external-dns", chart="external-dns", repository="https://charts.bitnami.com/bitnami", namespace="kube-system", values={ "provider": "aws", "aws": { "region": self.region }, "serviceAccount": { "create": False, "name": "external-dns" }, "podSecurityContext": { "fsGroup": 65534 } } ) # Install external secrets controller # Create the Service Account externalsecrets_service_account = eks_cluster.add_service_account( "kubernetes-external-secrets", name="kubernetes-external-secrets", namespace="kube-system" ) # Define the policy in JSON externalsecrets_policy_statement_json_1 = { "Effect": "Allow", "Action": [ "secretsmanager:GetResourcePolicy", "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret", "secretsmanager:ListSecretVersionIds" ], "Resource": [ "*" ] } # Add the policies to the service account externalsecrets_service_account.add_to_policy(iam.PolicyStatement.from_json(externalsecrets_policy_statement_json_1)) # Deploy the Helm Chart eks_cluster.add_helm_chart( "external-secrets", chart="kubernetes-external-secrets", repository="https://external-secrets.github.io/kubernetes-external-secrets/", namespace="kube-system", values={ "env": { "AWS_REGION": self.region }, "serviceAccount": { "name": "kubernetes-external-secrets", "create": False }, "securityContext": { "fsGroup": 65534 } } ) # Deploy Flux # Deploy the Helm Chart eks_cluster.add_helm_chart( "flux", chart="flux", repository="https://charts.fluxcd.io", namespace="kube-system", values={ "git": { "url": "[email protected]:jasonumiker/k8s-plus-aws-gitops", "path": "k8s-app-resources", "branch": "master" } } ) # Deploy Prometheus and Grafana # TODO Replace this with the new AWS Managed Prometheus and Grafana when available eks_cluster.add_helm_chart( "metrics", chart="kube-prometheus-stack", repository="https://prometheus-community.github.io/helm-charts", namespace="monitoring", values={ "prometheus": { "prometheusSpec": { "storageSpec": { "volumeClaimTemplate": { "spec": { "accessModes": [ "ReadWriteOnce" ], "resources": { "requests": { "storage": "8Gi" } }, "storageClassName": "gp2" } } } } }, "alertmanager": { "alertmanagerSpec": { "storage": { "volumeClaimTemplate": { "spec": { "accessModes": [ "ReadWriteOnce" ], "resources": { "requests": { "storage": "2Gi" } }, "storageClassName": "gp2" } } } } }, "grafana": { "persistence": { "enabled": "true", "storageClassName": "gp2" } } } ) # Deploy Fluentbit and Elasticsearch # Deploy an ElasticSearch Domain es_domain = es.Domain( self, "ESDomain", version=es.ElasticsearchVersion.V7_9 ) # Create the Service Account fluentbit_service_account = eks_cluster.add_service_account( "fluentbit", name="fluentbit", namespace="monitoring" ) # Define the policy in JSON fluentbit_policy_statement_json_1 = { "Effect": "Allow", "Action": [ "es:ESHttp*" ], "Resource": [ es_domain.domain_arn ] } # Add the policies to the service account fluentbit_service_account.add_to_policy(iam.PolicyStatement.from_json(externalsecrets_policy_statement_json_1)) # Grant fluentbit access to our ES Domain es_domain.grant_write(fluentbit_service_account) eks_cluster.add_helm_chart( "fluent-bit", chart="fluent-bit", repository="https://fluent.github.io/helm-charts", namespace="monitoring", values={ "serviceAccount": { "create": False, "name": "fluentbit" }, "config": { "outputs": "[OUTPUT]\n Name es\n Match *\n Host "+es_domain.domain_endpoint+"\n Port 443\n TLS On\n AWS_Auth On\n AWS_Region "+self.region+"\n Retry_Limit 6\n", } } )
def __init__( self, stack: core.Stack, prefix: str, secret: aws_secretsmanager.Secret, vpc_parameters: VPCParameters, database: Union[aws_rds.CfnDBInstance, aws_rds.CfnDBCluster], kms_key: Optional[aws_kms.IKey] = None, ) -> None: """ Constructor. :param stack: A stack in which resources should be created. :param prefix: A prefix to give for every resource. :param secret: A secret instance which the lambda function should be able to access. :param vpc_parameters: VPC parameters for resource (e.g. lambda rotation function) configuration. :param kms_key: Custom or managed KMS key for secret encryption which the lambda function should be able to access. """ super().__init__() self.__prefix = prefix + 'SecretRotation' # Read more about the permissions required to successfully rotate a secret: # https://docs.aws.amazon.com/secretsmanager/latest/userguide//rotating-secrets-required-permissions.html rotation_lambda_role_statements = [ # We enforce lambdas to run in a VPC. # Therefore lambdas need some network interface permissions. aws_iam.PolicyStatement( actions=[ 'ec2:CreateNetworkInterface', 'ec2:ModifyNetworkInterface', 'ec2:DeleteNetworkInterface', 'ec2:AttachNetworkInterface', 'ec2:DetachNetworkInterface', 'ec2:DescribeNetworkInterfaces', "logs:CreateLogGroup", "logs:CreateLogStream", "logs:PutLogEvents", ], effect=aws_iam.Effect.ALLOW, resources=['*'] ), # Lambda needs to call secrets manager to get secret value in order to update database password. aws_iam.PolicyStatement( actions=[ "secretsmanager:DescribeSecret", "secretsmanager:GetSecretValue", "secretsmanager:PutSecretValue", "secretsmanager:UpdateSecretVersionStage" ], effect=aws_iam.Effect.ALLOW, resources=[secret.secret_arn] ), # Not exactly sure about this one. # Despite that, this policy does not impose any security risks. aws_iam.PolicyStatement( actions=[ "secretsmanager:GetRandomPassword" ], effect=aws_iam.Effect.ALLOW, resources=['*'] ) ] if kms_key is not None: rotation_lambda_role_statements.append( # Secrets may be KMS encrypted. # Therefore the lambda function should be able to get this value. aws_iam.PolicyStatement( actions=[ 'kms:GenerateDataKey', 'kms:Decrypt', ], effect=aws_iam.Effect.ALLOW, resources=[kms_key.key_arn], ) ) self.rotation_lambda_role = aws_iam.Role( scope=stack, id=self.__prefix + 'LambdaRole', role_name=self.__prefix + 'LambdaRole', assumed_by=aws_iam.CompositePrincipal( aws_iam.ServicePrincipal("lambda.amazonaws.com"), aws_iam.ServicePrincipal("secretsmanager.amazonaws.com"), ), inline_policies={ self.__prefix + 'LambdaPolicy': aws_iam.PolicyDocument( statements=rotation_lambda_role_statements ) }, ) # Create rotation lambda functions source code path. dir_path = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(dir_path, self.LAMBDA_BACKEND_DEPLOYMENT_PACKAGE) # Create a lambda function responsible for rds password rotation. self.rotation_lambda_function = LambdaFunction( scope=stack, prefix=self.__prefix, description=( 'A lambda function that is utilized by AWS SecretsManager to rotate a secret after X number of days. ' 'This lambda function connects to a given database and changes its password to whatever password was ' 'provides by AWS SecretsManager.' ), memory=128, timeout=60, handler='lambda_function.lambda_handler', runtime=Runtime.PYTHON_2_7, role=self.rotation_lambda_role, env={ 'SECRETS_MANAGER_ENDPOINT': f'https://secretsmanager.{stack.region}.amazonaws.com', 'INITIAL_DATABASE_PASSWORD': database.master_user_password }, security_groups=vpc_parameters.rotation_lambda_security_groups, subnets=vpc_parameters.rotation_lambda_subnets, vpc=vpc_parameters.rotation_lambda_vpc, source_code=Code.from_asset(path=path) ).lambda_function
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) eks_vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/16") self.eks_vpc = eks_vpc # Create IAM Role For code-server bastion bastion_role = iam.Role( self, "BastionRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("ec2.amazonaws.com"), iam.AccountRootPrincipal()), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AdministratorAccess") ]) self.bastion_role = bastion_role # Create EC2 Instance Profile for that Role instance_profile = iam.CfnInstanceProfile( self, "InstanceProfile", roles=[bastion_role.role_name]) # Create SecurityGroup for the Control Plane ENIs eks_security_group = ec2.SecurityGroup(self, "EKSSecurityGroup", vpc=eks_vpc, allow_all_outbound=True) eks_security_group.add_ingress_rule(ec2.Peer.ipv4('10.0.0.0/16'), ec2.Port.all_traffic()) # Create an EKS Cluster eks_cluster = eks.Cluster( self, "cluster", cluster_name="cluster", vpc=eks_vpc, masters_role=bastion_role, default_capacity_type=eks.DefaultCapacityType.NODEGROUP, default_capacity_instance=ec2.InstanceType("m5.large"), default_capacity=2, security_group=eks_security_group, endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE, version=eks.KubernetesVersion.V1_17) self.cluster_cert = eks_cluster.cluster_certificate_authority_data # Deploy ALB Ingress Controller # Create the k8s Service account and corresponding IAM Role mapped via IRSA alb_service_account = eks_cluster.add_service_account( "alb-ingress-controller", name="alb-ingress-controller", namespace="kube-system") # Create the PolicyStatements to attach to the role # I couldn't find a way to get this to work with a PolicyDocument and there are 10 of these alb_policy_statement_json_1 = { "Effect": "Allow", "Action": [ "acm:DescribeCertificate", "acm:ListCertificates", "acm:GetCertificate" ], "Resource": "*" } alb_policy_statement_json_2 = { "Effect": "Allow", "Action": [ "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup", "ec2:CreateTags", "ec2:DeleteTags", "ec2:DeleteSecurityGroup", "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeInstances", "ec2:DescribeInstanceStatus", "ec2:DescribeInternetGateways", "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcs", "ec2:ModifyInstanceAttribute", "ec2:ModifyNetworkInterfaceAttribute", "ec2:RevokeSecurityGroupIngress" ], "Resource": "*" } alb_policy_statement_json_3 = { "Effect": "Allow", "Action": [ "elasticloadbalancing:AddListenerCertificates", "elasticloadbalancing:AddTags", "elasticloadbalancing:CreateListener", "elasticloadbalancing:CreateLoadBalancer", "elasticloadbalancing:CreateRule", "elasticloadbalancing:CreateTargetGroup", "elasticloadbalancing:DeleteListener", "elasticloadbalancing:DeleteLoadBalancer", "elasticloadbalancing:DeleteRule", "elasticloadbalancing:DeleteTargetGroup", "elasticloadbalancing:DeregisterTargets", "elasticloadbalancing:DescribeListenerCertificates", "elasticloadbalancing:DescribeListeners", "elasticloadbalancing:DescribeLoadBalancers", "elasticloadbalancing:DescribeLoadBalancerAttributes", "elasticloadbalancing:DescribeRules", "elasticloadbalancing:DescribeSSLPolicies", "elasticloadbalancing:DescribeTags", "elasticloadbalancing:DescribeTargetGroups", "elasticloadbalancing:DescribeTargetGroupAttributes", "elasticloadbalancing:DescribeTargetHealth", "elasticloadbalancing:ModifyListener", "elasticloadbalancing:ModifyLoadBalancerAttributes", "elasticloadbalancing:ModifyRule", "elasticloadbalancing:ModifyTargetGroup", "elasticloadbalancing:ModifyTargetGroupAttributes", "elasticloadbalancing:RegisterTargets", "elasticloadbalancing:RemoveListenerCertificates", "elasticloadbalancing:RemoveTags", "elasticloadbalancing:SetIpAddressType", "elasticloadbalancing:SetSecurityGroups", "elasticloadbalancing:SetSubnets", "elasticloadbalancing:SetWebAcl" ], "Resource": "*" } alb_policy_statement_json_4 = { "Effect": "Allow", "Action": [ "iam:CreateServiceLinkedRole", "iam:GetServerCertificate", "iam:ListServerCertificates" ], "Resource": "*" } alb_policy_statement_json_5 = { "Effect": "Allow", "Action": ["cognito-idp:DescribeUserPoolClient"], "Resource": "*" } alb_policy_statement_json_6 = { "Effect": "Allow", "Action": [ "waf-regional:GetWebACLForResource", "waf-regional:GetWebACL", "waf-regional:AssociateWebACL", "waf-regional:DisassociateWebACL" ], "Resource": "*" } alb_policy_statement_json_7 = { "Effect": "Allow", "Action": ["tag:GetResources", "tag:TagResources"], "Resource": "*" } alb_policy_statement_json_8 = { "Effect": "Allow", "Action": ["waf:GetWebACL"], "Resource": "*" } alb_policy_statement_json_9 = { "Effect": "Allow", "Action": [ "wafv2:GetWebACL", "wafv2:GetWebACLForResource", "wafv2:AssociateWebACL", "wafv2:DisassociateWebACL" ], "Resource": "*" } alb_policy_statement_json_10 = { "Effect": "Allow", "Action": [ "shield:DescribeProtection", "shield:GetSubscriptionState", "shield:DeleteProtection", "shield:CreateProtection", "shield:DescribeSubscription", "shield:ListProtections" ], "Resource": "*" } # Attach the necessary permissions alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_1)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_2)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_3)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_4)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_5)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_6)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_7)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_8)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_9)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_10)) # Deploy the ALB Ingress Controller from the Helm chart eks_cluster.add_helm_chart( "aws-alb-ingress-controller", chart="aws-alb-ingress-controller", repository= "http://storage.googleapis.com/kubernetes-charts-incubator", namespace="kube-system", values={ "clusterName": "cluster", "awsRegion": os.environ["CDK_DEFAULT_REGION"], "awsVpcID": eks_vpc.vpc_id, "rbac": { "create": True, "serviceAccount": { "create": False, "name": "alb-ingress-controller" } } }) # Create code-server bastion # Get Latest Amazon Linux AMI amzn_linux = ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, edition=ec2.AmazonLinuxEdition.STANDARD, virtualization=ec2.AmazonLinuxVirt.HVM, storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE) # Create SecurityGroup for code-server security_group = ec2.SecurityGroup(self, "SecurityGroup", vpc=eks_vpc, allow_all_outbound=True) security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8080)) # Create our EC2 instance running CodeServer code_server_instance = ec2.Instance( self, "CodeServerInstance", instance_type=ec2.InstanceType("t3.large"), machine_image=amzn_linux, role=bastion_role, vpc=eks_vpc, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), security_group=security_group, block_devices=[ ec2.BlockDevice(device_name="/dev/xvda", volume=ec2.BlockDeviceVolume.ebs(20)) ]) # Add UserData code_server_instance.user_data.add_commands( "mkdir -p ~/.local/lib ~/.local/bin ~/.config/code-server") code_server_instance.user_data.add_commands( "curl -fL https://github.com/cdr/code-server/releases/download/v3.5.0/code-server-3.5.0-linux-amd64.tar.gz | tar -C ~/.local/lib -xz" ) code_server_instance.user_data.add_commands( "mv ~/.local/lib/code-server-3.5.0-linux-amd64 ~/.local/lib/code-server-3.5.0" ) code_server_instance.user_data.add_commands( "ln -s ~/.local/lib/code-server-3.5.0/bin/code-server ~/.local/bin/code-server" ) code_server_instance.user_data.add_commands( "echo \"bind-addr: 0.0.0.0:8080\" > ~/.config/code-server/config.yaml" ) code_server_instance.user_data.add_commands( "echo \"auth: password\" >> ~/.config/code-server/config.yaml") code_server_instance.user_data.add_commands( "echo \"password: $(curl -s http://169.254.169.254/latest/meta-data/instance-id)\" >> ~/.config/code-server/config.yaml" ) code_server_instance.user_data.add_commands( "echo \"cert: false\" >> ~/.config/code-server/config.yaml") code_server_instance.user_data.add_commands( "~/.local/bin/code-server &") code_server_instance.user_data.add_commands( "yum -y install jq gettext bash-completion moreutils") code_server_instance.user_data.add_commands( "sudo pip install --upgrade awscli && hash -r") code_server_instance.user_data.add_commands( "echo 'export ALB_INGRESS_VERSION=\"v1.1.8\"' >> ~/.bash_profile") code_server_instance.user_data.add_commands( "curl --silent --location -o /usr/local/bin/kubectl \"https://amazon-eks.s3.us-west-2.amazonaws.com/1.17.9/2020-08-04/bin/linux/amd64/kubectl\"" ) code_server_instance.user_data.add_commands( "chmod +x /usr/local/bin/kubectl") code_server_instance.user_data.add_commands( "curl -L https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash" ) code_server_instance.user_data.add_commands( "export ACCOUNT_ID=$(aws sts get-caller-identity --output text --query Account)" ) code_server_instance.user_data.add_commands( "export AWS_REGION=$(curl -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region')" ) code_server_instance.user_data.add_commands( "echo \"export ACCOUNT_ID=${ACCOUNT_ID}\" | tee -a ~/.bash_profile" ) code_server_instance.user_data.add_commands( "echo \"export AWS_REGION=${AWS_REGION}\" | tee -a ~/.bash_profile" ) code_server_instance.user_data.add_commands( "aws configure set default.region ${AWS_REGION}") code_server_instance.user_data.add_commands( "curl --silent --location https://rpm.nodesource.com/setup_12.x | bash -" ) code_server_instance.user_data.add_commands("yum -y install nodejs") code_server_instance.user_data.add_commands( "amazon-linux-extras enable python3") code_server_instance.user_data.add_commands( "yum install -y python3 --disablerepo amzn2-core") code_server_instance.user_data.add_commands("yum install -y git") code_server_instance.user_data.add_commands( "rm /usr/bin/python && ln -s /usr/bin/python3 /usr/bin/python && ln -s /usr/bin/pip3 /usr/bin/pip" ) code_server_instance.user_data.add_commands("npm install -g aws-cdk") code_server_instance.user_data.add_commands( "echo 'export KUBECONFIG=~/.kube/config' >> ~/.bash_profile") code_server_instance.user_data.add_commands( "git clone https://github.com/jasonumiker/eks-school.git") # Add ALB lb = elbv2.ApplicationLoadBalancer(self, "LB", vpc=eks_vpc, internet_facing=True) listener = lb.add_listener("Listener", port=80) listener.connections.allow_default_port_from_any_ipv4( "Open to the Internet") listener.connections.allow_to_any_ipv4( port_range=ec2.Port(string_representation="TCP 8080", protocol=ec2.Protocol.TCP, from_port=8080, to_port=8080)) listener.add_targets( "Target", port=8080, targets=[ elbv2.InstanceTarget( instance_id=code_server_instance.instance_id, port=8080) ])
def build_team_role( scope: core.Construct, context: "Context", team_name: str, policy_names: List[str], scratch_bucket: s3.IBucket, team_kms_key: kms.Key, session_timeout: core.Duration, ) -> iam.Role: env_name = context.name partition = core.Aws.PARTITION account = core.Aws.ACCOUNT_ID region = core.Aws.REGION lake_role_name: str = f"orbit-{env_name}-{team_name}-{region}-role" role_prefix: str = f"/{context.role_prefix}/" if context.role_prefix else "/" kms_keys = [team_kms_key.key_arn] scratch_bucket_kms_key = IamBuilder.get_kms_key_scratch_bucket( context=context) if scratch_bucket_kms_key: kms_keys.append(scratch_bucket_kms_key) lake_operational_policy = iam.ManagedPolicy( scope=scope, id="lake_operational_policy", managed_policy_name= f"orbit-{env_name}-{team_name}-{region}-user-access", statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "s3:*", ], resources=[ f"arn:{partition}:s3:::sagemaker-{region}-{account}", f"arn:{partition}:s3:::sagemaker-{region}-{account}/*", scratch_bucket.bucket_arn, f"{scratch_bucket.bucket_arn}/{team_name}/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:List*", "s3:Get*", "s3:Put*"], resources=[ f"arn:{partition}:s3:::{context.toolkit.s3_bucket}", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/samples/*", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/codeseeder/*", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/teams/{team_name}/*", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/helm/repositories/env/*", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/helm/repositories/teams/{team_name}/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["ssm:Get*"], resources=[ f"arn:{partition}:ssm:{region}:{account}:parameter/orbit*", f"arn:{partition}:ssm:{region}:{account}:parameter/emr_launch/", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["ssm:PutParameter"], resources=[ f"arn:{partition}:ssm:{region}:{account}:parameter/orbit/{env_name}/teams/{team_name}/user*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "redshift:GetClusterCredentials", "redshift:CreateClusterUser", "redshift:DeleteCluster", ], resources=[ f"arn:{partition}:redshift:{region}:{account}:dbuser:orbit-{env_name}-{team_name}*", f"arn:{partition}:redshift:{region}:{account}:dbuser:orbit-{env_name}-{team_name}*/master", f"arn:{partition}:redshift:{region}:{account}:dbuser:orbit-{env_name}-{team_name}*/defaultdb", f"arn:{partition}:redshift:{region}:{account}:dbname:orbit-{env_name}-{team_name}*/defaultdb", f"arn:{partition}:redshift:{region}:{account}:cluster:orbit-{env_name}-{team_name}*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "sns:*", ], resources=[ f"arn:{partition}:sns:{region}:{account}:{env_name}-{team_name}*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["iam:PassRole"], resources=[ f"arn:{partition}:iam::{account}:role{role_prefix}{lake_role_name}" ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:Describe*", "codeartifact:Describe*", "codeartifact:Get*", "codeartifact:List*", "codeartifact:Read*", "sts:GetServiceBearerToken", "s3:ListAllMyBuckets", "lambda:List*", "lambda:Get*", "iam:List*", "tag:GetResources", "ecr:Get*", "ecr:List*", "ecr:Describe*", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "cloudwatch:PutMetricData", "redshift:DescribeClusters", "states:List*", "states:Get*", "states:Describe*", "glue:Get*", "glue:List*", "glue:Search*", "athena:*", "ecs:Describe*", "ecs:ListTasks", "ec2:Describe*", "redshift:DescribeClusters", "elasticmapreduce:List*", "elasticmapreduce:Get*", "elasticmapreduce:Describe*", "elasticmapreduce:TerminateJobFlows", "elasticmapreduce:AddJobFlowSteps", "sagemaker:*", "databrew:*", "lakeformation:GetDataAccess", "fsx:Describe*", "fsx:List*", ], resources=["*"], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ecr:*", ], resources=[ f"arn:{partition}:ecr:{region}:{account}:repository/orbit-{env_name}/users/*" ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "kms:Encrypt", "kms:Decrypt", "kms:ReEncrypt", "kms:GenerateDataKey", "kms:DescribeKey" ], resources=kms_keys, ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "lambda:InvokeFunction", ], resources=[ f"arn:{partition}:lambda:{region}:{account}:function:orbit-{env_name}-{team_name}-*", f"arn:{partition}:lambda:{region}:{account}:function:orbit-{env_name}-token-validation", f"arn:{partition}:lambda:{region}:{account}:function:orbit-{env_name}-eks-service-handler", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "cloudformation:DescribeStacks", ], resources=[ f"arn:{partition}:cloudformation:{region}:{account}:stack/orbit-{env_name}/*", f"arn:{partition}:cloudformation:{region}:{account}:stack/aws-codeseeder-orbit*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:GetParameters", "ssm:DescribeParameters", "ssm:GetParameter", "ssm:DescribeParameter", ], resources=[ f"arn:{partition}:ssm:{region}:{account}:parameter/orbit/{env_name}/teams/{team_name}/*", f"arn:{partition}:ssm:{region}:{account}:parameter/Orbit-Slack-Notifications", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:DeleteParameter", "ssm:DeleteParameters", ], resources=[ f"arn:{partition}:ssm:{region}:{account}:parameter/orbit/{env_name}/changeset", f"arn:{partition}:ssm:{region}:{account}:parameter/orbit/{env_name}/manifest", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:DescribeParameters", ], resources=[f"arn:{partition}:ssm:{region}:{account}:*"], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:Put*"], resources=[ f"arn:{partition}:s3:::{context.toolkit.s3_bucket}", f"arn:{partition}:s3:::{context.toolkit.s3_bucket}/cli/remote/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "codebuild:StartBuild", "codebuild:BatchGetBuilds" ], resources=[ f"arn:{partition}:codebuild:{region}:{account}:project/orbit-{env_name}", f"arn:{partition}:codebuild:{region}:{account}:project/codeseeder-orbit", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "logs:CreateLogStream", "logs:CreateLogGroup", "logs:DescribeLogStreams", "logs:PutLogEvents", ], resources=[ f"arn:{partition}:logs:{region}:{account}:log-group:/aws/codebuild/orbit-{env_name}:log-stream:*", # noqa f"arn:{partition}:logs:{region}:{account}:log-group:/aws/codebuild/codeseeder-orbit:log-stream:*", # noqa f"arn:{partition}:logs:{region}:{account}:log-group:/aws-glue-databrew/*:log-stream:*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "logs:List*", "logs:Describe*", "logs:StartQuery", "logs:StopQuery", "logs:Get*", "logs:Filter*", "events:*", ], resources=[ f"arn:{partition}:logs:{region}:{account}:log-group:/aws/codebuild/orbit-{env_name}*:log-stream:*", # noqa f"arn:{partition}:logs:{region}:{account}:log-group:/aws/codebuild/codeseeder-orbit:log-stream:*", # noqa f"arn:{partition}:logs:{region}:{account}:log-group:/aws-glue-databrew/*:log-stream:*", f"arn:{partition}:logs:{region}:{account}:log-group:/aws/sagemaker/*", f"arn:{partition}:logs:{region}:{account}:log-group:/aws/sagemaker/*:log-stream:*", f"arn:{partition}:logs:{region}:{account}:log-group:/aws/eks/orbit*", f"arn:{partition}:events:{region}:{account}:rule/orbit-{env_name}-{team_name}-*", f"arn:{partition}:logs:{region}:{account}:log-group:/aws-glue-databrew/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ecr:InitiateLayerUpload", ], resources=[ f"arn:{partition}:ecr:{region}:{account}:repository/*", ], ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "eks:DescribeCluster", ], resources=[ f"arn:{partition}:eks:{region}:{account}:cluster/orbit-{env_name}", ], ), ], ) managed_policies = [ lake_operational_policy, # For EKS iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AmazonEKS_CNI_Policy"), ] # Parse list to IAM policies # First check if the policies are AWS managed or not, and if they have a tag aws_policies, customer_policies = process_policies( policy_names=policy_names, account_id=context.account_id) aws_managed_user_policies = [ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name=policy_name) for policy_name in aws_policies ] orbit_custom_policies = [ iam.ManagedPolicy.from_managed_policy_name( scope=scope, id=policy_name, managed_policy_name=policy_name) for policy_name in customer_policies ] managed_policies = (managed_policies + cast(List[object], aws_managed_user_policies) + cast(List[object], orbit_custom_policies)) role = iam.Role( scope=scope, id=f"lakerole-for-{env_name}-{team_name}", role_name=lake_role_name, assumed_by=cast( iam.IPrincipal, iam.CompositePrincipal( iam.ServicePrincipal("ec2.amazonaws.com"), iam.ServicePrincipal("glue.amazonaws.com"), iam.ServicePrincipal("sagemaker.amazonaws.com"), iam.ServicePrincipal("redshift.amazonaws.com"), iam.ServicePrincipal("codepipeline.amazonaws.com"), iam.ServicePrincipal("codebuild.amazonaws.com"), iam.ServicePrincipal("personalize.amazonaws.com"), iam.ServicePrincipal("databrew.amazonaws.com"), ), ), managed_policies=cast(Optional[Sequence[iam.IManagedPolicy]], managed_policies), max_session_duration=session_timeout, ) if role.assume_role_policy: role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["sts:AssumeRoleWithWebIdentity"], principals=[ cast( iam.IPrincipal, iam.FederatedPrincipal( federated= f"arn:{partition}:iam::{account}:oidc-provider/{context.eks_oidc_provider}", conditions={ "StringLike": { f"{context.eks_oidc_provider}:sub": f"system:serviceaccount:{team_name}*:*" } }, ), ) ], ), ) return role
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # store dynamodb_table = dynamodb.Table( self, 'dynamodb_table', table_name=f'{PROJECT}_{STAGE}', partition_key=dynamodb.Attribute( name='date', type=dynamodb.AttributeType.STRING), billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, point_in_time_recovery=False, removal_policy=core.RemovalPolicy.DESTROY, server_side_encryption=True, ) # public api public_api = appsync.CfnGraphQLApi( self, 'public_api', name=f'{PROJECT}_{STAGE}', authentication_type='API_KEY', ) now = time.localtime() epoch = time.mktime(now) public_api_key = appsync.CfnApiKey( self, 'public_api_key', api_id=public_api.attr_api_id, expires=epoch + core.Duration.days(90).to_seconds(), ) with open('schema.gql', mode='r') as f: graphql_schema = f.read() appsync.CfnGraphQLSchema(self, 'public_api_schema', api_id=public_api.attr_api_id, definition=graphql_schema) public_api_role = iam.Role( self, 'public_api_role', assumed_by=iam.ServicePrincipal('appsync.amazonaws.com'), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonDynamoDBFullAccess') ], ) public_api_datasource = appsync.CfnDataSource( self, 'public_api_datasource', api_id=public_api.attr_api_id, name=f'{PROJECT}_{STAGE}_dynamodb', type='AMAZON_DYNAMODB', dynamo_db_config={ 'awsRegion': 'us-east-1', 'tableName': dynamodb_table.table_name, }, service_role_arn=public_api_role.role_arn, ) with open('mapping_templates/get_holiday.json', mode='r') as f: get_holiday_json = f.read() appsync.CfnResolver( self, 'public_api_resolver_get_holiday', api_id=public_api.attr_api_id, type_name='Query', field_name='getHoliday', data_source_name=public_api_datasource.attr_name, kind='UNIT', request_mapping_template=get_holiday_json, response_mapping_template='$util.toJson($context.result)', ) with open('mapping_templates/list_holidays.json', mode='r') as f: list_holidays_json = f.read() appsync.CfnResolver( self, 'public_api_resolver_list_holidays', api_id=public_api.attr_api_id, type_name='Query', field_name='listHolidays', data_source_name=public_api_datasource.attr_name, kind='UNIT', request_mapping_template=list_holidays_json, response_mapping_template='$util.toJson($context.result)', ) # lambda source code upload to s3 lambda_assets = s3_assets.Asset(self, 'lambda_assets', path='./function/.artifact/') # update function func_api = lambda_.Function( self, f'{PROJECT}-{STAGE}-func', function_name=f'{PROJECT}-{STAGE}-func', code=lambda_.Code.from_bucket(bucket=lambda_assets.bucket, key=lambda_assets.s3_object_key), handler='app.handler', runtime=lambda_.Runtime.PYTHON_3_7, timeout=core.Duration.seconds(120), log_retention=logs.RetentionDays.SIX_MONTHS, memory_size=128, tracing=lambda_.Tracing.ACTIVE, ) func_api.add_environment('TABLE_NAME', dynamodb_table.table_name) func_api.add_environment('CSV_URL', CSV_URL) func_api.add_to_role_policy( iam.PolicyStatement( actions=[ 'dynamodb:Get*', 'dynamodb:Put*', 'dynamodb:Batch*', ], resources=[dynamodb_table.table_arn], )) # schedule execute events.Rule( self, f'{PROJECT}-{STAGE}-schedule', enabled=True, schedule=events.Schedule.rate(core.Duration.days(10)), targets=[events_targets.LambdaFunction(func_api)], ) # lambda@edge func_lambdaedge = lambda_.Function( self, f'{PROJECT}-{STAGE}-func-lambdaedge', function_name=f'{PROJECT}-{STAGE}-func-lambdaedge', code=lambda_.Code.from_inline( open('./function/src/lambdaedge.py').read().replace( '__X_API_KEY__', public_api_key.attr_api_key)), handler='index.handler', runtime=lambda_.Runtime.PYTHON_3_7, timeout=core.Duration.seconds(30), memory_size=128, role=iam.Role( self, f'{PROJECT}-{STAGE}-func-lambdaedge-role', assumed_by=iam.CompositePrincipal( iam.ServicePrincipal('edgelambda.amazonaws.com'), iam.ServicePrincipal('lambda.amazonaws.com'), ), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AWSLambdaBasicExecutionRole'), ], ), ) lambdaedge_version = func_lambdaedge.add_version( hashlib.sha256( open('./function/src/lambdaedge.py').read().replace( '__X_API_KEY__', public_api_key.attr_api_key).encode()).hexdigest()) # ACM certificates = acm.Certificate( self, 'certificates', domain_name=DOMAIN, validation_method=acm.ValidationMethod.DNS, ) # CDN cdn = cloudfront.CloudFrontWebDistribution( self, f'{PROJECT}-{STAGE}-cloudfront', origin_configs=[ cloudfront.SourceConfiguration( behaviors=[ # default behavior cloudfront.Behavior( allowed_methods=cloudfront. CloudFrontAllowedMethods.ALL, default_ttl=core.Duration.seconds(0), max_ttl=core.Duration.seconds(0), min_ttl=core.Duration.seconds(0), is_default_behavior=True, lambda_function_associations=[ cloudfront.LambdaFunctionAssociation( event_type=cloudfront.LambdaEdgeEventType. ORIGIN_REQUEST, lambda_function=lambdaedge_version, ), ]) ], custom_origin_source=cloudfront.CustomOriginConfig( domain_name=core.Fn.select( 2, core.Fn.split('/', public_api.attr_graph_ql_url)), ), ) ], alias_configuration=cloudfront.AliasConfiguration( acm_cert_ref=certificates.certificate_arn, names=[DOMAIN], security_policy=cloudfront.SecurityPolicyProtocol. TLS_V1_2_2018, ), price_class=cloudfront.PriceClass.PRICE_CLASS_ALL, ) core.CfnOutput( self, 'cloudfront-domain', value=cdn.domain_name, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here EcsStack.readConfig(0) vpc = ec.Vpc( self, "Main", cidr="11.0.0.0/26", max_azs=2, nat_gateways=1, subnet_configuration=[ ec.SubnetConfiguration(name="public", cidr_mask=28, subnet_type=ec.SubnetType.PUBLIC), ec.SubnetConfiguration(name="private", cidr_mask=28, subnet_type=ec.SubnetType.PRIVATE) ]) cluster = ecs.Cluster(self, "TestingCluster", vpc=vpc) # defining the task iam role taskRole = iam.Role( self, id="taskRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'), iam.ServicePrincipal(service='ec2.amazonaws.com')), role_name="webmaintaskRole", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonRDSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonDynamoDBFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonRedshiftFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonKinesisFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonECSTaskExecutionRolePolicy"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSNSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaRole"), iam.ManagedPolicy(self, id="ManagedPolicy", managed_policy_name="Grant_dev", statements=[ iam.PolicyStatement(actions=[ "kms:Decrypt", "secretemanager:GetSecreteValue" ], resources=["*"]) ]) ]) # taskRole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSFullAccess")) # WebApp Main task Defenition & Service webmain_task_definition = ecs.FargateTaskDefinition( self, "WebAppMain", memory_limit_mib=512, cpu=256, task_role=taskRole, execution_role=taskRole) webmain_container = webmain_task_definition.add_container( "webapp-mainContainer", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"), environment=EcsStack.commands, docker_labels={ "com.datadoghq.ad.instances": "[{\"host\": \"%%host%%\", \"port\": 80}]", "com.datadoghq.ad.check_names": "[\"ecs_fargate\"]", "com.datadoghq.ad.init_configs": "[{}]" }, logging=ecs.LogDriver.aws_logs(stream_prefix="awslogs")) # Clearing the environment vairables from the commands(Map) EcsStack.commands.clear() EcsStack.readConfig(1) webmain_datadog_container = webmain_task_definition.add_container( "webapp-main_datadog_Container", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"), environment=EcsStack.commands) webmain_port_mapping = ecs.PortMapping(container_port=80, host_port=80, protocol=ecs.Protocol.TCP) datadog_port_mapping1 = ecs.PortMapping(container_port=8126, host_port=8126, protocol=ecs.Protocol.TCP) datadog_port_mapping2 = ecs.PortMapping(container_port=8125, host_port=8125, protocol=ecs.Protocol.TCP) webmain_container.add_port_mappings(webmain_port_mapping) webmain_datadog_container.add_port_mappings(datadog_port_mapping1) webmain_datadog_container.add_port_mappings(datadog_port_mapping2) # Security group for service webmain_sg = ec.SecurityGroup(self, "webmain_sg", vpc=vpc, allow_all_outbound=True, security_group_name="WebAppMain") webmain_sg.add_ingress_rule(peer=Peer.ipv4("202.65.133.194/32"), connection=Port.tcp(5432)) webmain_service = ecs.FargateService( self, "webapp-main", cluster=cluster, task_definition=webmain_task_definition, desired_count=1, security_group=webmain_sg) # defining the load balancer webmain_lb = elbv2.ApplicationLoadBalancer( self, "LB", vpc=vpc, internet_facing=True, load_balancer_name="WebAppMain", # security_group= vpc_subnets=ec.SubnetSelection(subnet_type=ec.SubnetType.PUBLIC)) webmain_target_grp = elbv2.ApplicationTargetGroup( self, id="webapp-main-target", port=80, protocol=elbv2.ApplicationProtocol.HTTP, health_check=elbv2.HealthCheck(healthy_http_codes="200-399", healthy_threshold_count=2, unhealthy_threshold_count=2, port="traffic-port", protocol=elbv2.Protocol.HTTP, timeout=core.Duration.seconds(6), interval=core.Duration.seconds(10)), targets=[webmain_service], target_group_name="WebAppMain", target_type=elbv2.TargetType.IP, vpc=vpc) listener = webmain_lb.add_listener( "webMain_Listener", port=443, open=True, default_target_groups=[webmain_target_grp], certificate_arns=[ "arn:aws:acm:us-west-2:384853870836:certificate/182c0fdd-813f-4bd3-aee1-0b4543cfb52b" ]) listener2 = webmain_lb.add_listener( "webMain_Listener2", port=80, # default_target_groups=[webmain_target_grp] ) # elbv2.ApplicationListenerCertificate(self,"WebAppMAin_Certificate",listener=listener,certificate_arns=["arn:aws:acm:us-west-2:384853870836:certificate/182c0fdd-813f-4bd3-aee1-0b4543cfb52b"]) listener2.add_redirect_response(id="HttptoHttps", status_code="HTTP_301", port="443", protocol="HTTPS")
def __init__( self, scope: Construct, id: str, context: "Context", team_name: str, team_policies: List[str], image: Optional[str], ) -> None: self.scope = scope self.id = id self.context: "Context" = context self.team_name: str = team_name self.team_policies: List[str] = team_policies self.image: Optional[str] = image super().__init__( scope=scope, id=id, stack_name=id, env=Environment(account=self.context.account_id, region=self.context.region), ) Tags.of(scope=cast(IConstruct, self)).add( key="Env", value=f"orbit-{self.context.name}") Tags.of(scope=cast(IConstruct, self)).add(key="TeamSpace", value=self.team_name) if self.context.networking.vpc_id is None: raise ValueError("self.context.networking.vpc_id is None!") self.i_vpc = ec2.Vpc.from_vpc_attributes( scope=self, id="vpc", vpc_id=self.context.networking.vpc_id, availability_zones=cast( List[str], self.context.networking.availability_zones), ) self.i_isolated_subnets = Ec2Builder.build_subnets( scope=self, subnet_manifests=context.networking.isolated_subnets) self.i_private_subnets = Ec2Builder.build_subnets( scope=self, subnet_manifests=context.networking.private_subnets) administrator_arns: List[str] = [ ] # A place to add other admins if needed for KMS admin_principals = iam.CompositePrincipal( *[iam.ArnPrincipal(arn) for arn in administrator_arns], iam.ArnPrincipal(f"arn:aws:iam::{self.context.account_id}:root"), ) self.team_kms_key: kms.Key = kms.Key( self, id="kms-key", removal_policy=core.RemovalPolicy.RETAIN, enabled=True, enable_key_rotation=True, policy=iam.PolicyDocument(statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["kms:*"], resources=["*"], principals=[cast(iam.IPrincipal, admin_principals)], ) ]), ) self.team_security_group: ec2.SecurityGroup = Ec2Builder.build_team_security_group( scope=self, context=context, team_name=self.team_name, vpc=self.i_vpc) self.policies: List[str] = self.team_policies if self.context.scratch_bucket_arn: self.scratch_bucket: s3.Bucket = cast( s3.Bucket, s3.Bucket.from_bucket_attributes( scope=self, id="scratch_bucket", bucket_arn=self.context.scratch_bucket_arn, bucket_name=self.context.scratch_bucket_arn.split(":::") [1], ), ) else: raise Exception( "Scratch bucket was not provided in Manifest ('ScratchBucketArn')" ) self.role_eks_pod = IamBuilder.build_team_role( scope=self, context=self.context, team_name=self.team_name, policy_names=self.policies, scratch_bucket=cast(s3.IBucket, self.scratch_bucket), team_kms_key=self.team_kms_key, ) shared_fs_name: str = f"orbit-{context.name}-{self.team_name}-shared-fs" if context.shared_efs_fs_id is None: raise Exception( "Shared EFS File system ID was not provided in Manifest ('SharedEfsFsId')" ) if context.shared_efs_sg_id is None: raise Exception( "Shared EFS File system security group ID was not provided in Manifest ('SharedEfsSgId')" ) self.shared_fs: efs.FileSystem = cast( efs.FileSystem, efs.FileSystem.from_file_system_attributes( scope=self, id=shared_fs_name, file_system_id=context.shared_efs_fs_id, security_group=ec2.SecurityGroup.from_security_group_id( scope=self, id="team_sec_group", security_group_id=context.shared_efs_sg_id), ), ) self.efs_ap: efs.AccessPoint = EfsBuilder.build_file_system_access_point( scope=self, team_name=team_name, shared_fs=self.shared_fs) team_ssm_parameter_name: str = f"/orbit/{context.name}/teams/{self.team_name}/team" self.context_parameter: ssm.StringParameter = ssm.StringParameter( scope=self, id=team_ssm_parameter_name, string_value=json.dumps({ "EfsId": self.shared_fs.file_system_id, "EfsApId": self.efs_ap.access_point_id, "EksPodRoleArn": self.role_eks_pod.role_arn, "ScratchBucket": self.scratch_bucket.bucket_name, "TeamKmsKeyArn": self.team_kms_key.key_arn, "TeamSecurityGroupId": self.team_security_group.security_group_id, }), type=ssm.ParameterType.STRING, description="Orbit Workbench Team Context.", parameter_name=team_ssm_parameter_name, simple_name=False, tier=ssm.ParameterTier.INTELLIGENT_TIERING, ) ssm_profile_name = f"/orbit/{self.context.name}/teams/{self.team_name}/user/profiles" self.user_profiles: ssm.StringParameter = ssm.StringParameter( scope=self, id=ssm_profile_name, string_value="[]", type=ssm.ParameterType.STRING, description="Team additional profiles created by the team users", parameter_name=ssm_profile_name, simple_name=False, tier=ssm.ParameterTier.INTELLIGENT_TIERING, )