def provision_efs(self, name: str, cfg: EFS, vpc: ec2.Vpc, security_group: ec2.SecurityGroup): self.efs = efs.FileSystem( self.scope, "Efs", vpc=vpc, # encrypted=True, file_system_name=name, # kms_key, # lifecycle_policy, performance_mode=efs.PerformanceMode.MAX_IO, provisioned_throughput_per_second=cdk.Size.mebibytes(100), # TODO: dev/nondev sizing removal_policy=cdk.RemovalPolicy.DESTROY if cfg.removal_policy_destroy else cdk.RemovalPolicy.RETAIN, security_group=security_group, throughput_mode=efs.ThroughputMode.PROVISIONED, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE), ) self.efs_access_point = self.efs.add_access_point( "access_point", create_acl=efs.Acl( owner_uid="0", owner_gid="0", permissions="777", ), path="/domino", posix_user=efs.PosixUser( uid="0", gid="0", # secondary_gids ), )
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) _vpc = ec2.Vpc(self, 'theVpc', max_azs=2) _fs = efs.FileSystem(self, 'theFileSystem', vpc=_vpc, removal_policy=RemovalPolicy.DESTROY) _access_point = _fs.add_access_point( 'theAccessPoint', create_acl=efs.Acl(owner_gid='1001', owner_uid='1001', permissions='750'), path="/export/lambda", posix_user=efs.PosixUser(gid="1001", uid="1001")) _efs_lambda = _lambda.Function( self, 'lambdaEfsHandler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset('lambda_function'), handler='lambda_function.lambda_handler', vpc=_vpc, filesystem=_lambda.FileSystem.from_efs_access_point( _access_point, '/mnt/text')) _api = api_gateway.HttpApi( self, 'EFS LAMBDA APIGATEWAY', default_integration=integrations.HttpLambdaIntegration( id="LambdaFunction", handler=_efs_lambda)) CfnOutput(self, 'API Url', value=_api.url)
def build_file_system_access_point( scope: core.Construct, team_name: str, shared_fs: efs.FileSystem, path: str, ap_name: str ) -> efs.AccessPoint: return efs.AccessPoint( scope=scope, id=ap_name, file_system=cast(efs.IFileSystem, shared_fs), path=f"/{team_name}/{path}", posix_user=efs.PosixUser(gid="100", uid="1000"), create_acl=efs.Acl(owner_gid="100", owner_uid="1000", permissions="770"), )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # create vpc vpc = aws_ec2.Vpc(self, "vpc", max_azs=3, nat_gateways=1) # create efs share efs_share = aws_efs.FileSystem(self, "efs-backend", vpc=vpc) # create efs acl efs_acl = aws_efs.Acl(owner_gid="1000", owner_uid="1000", permissions="0777") # create efs posix user efs_user = aws_efs.PosixUser(gid="1000", uid="1000") # create efs access point efs_ap = aws_efs.AccessPoint(self, "efs-accesspoint", path="/efs", file_system=efs_share, posix_user=efs_user, create_acl=efs_acl) # create lambda with efs access efs_lambda = aws_lambda.Function( self, "read_efs", runtime=aws_lambda.Runtime.PYTHON_3_8, code=aws_lambda.Code.from_asset("./function"), handler="efsread.handler", timeout=core.Duration.seconds(20), memory_size=128, retry_attempts=0, filesystem=aws_lambda.FileSystem.from_efs_access_point( efs_ap, '/mnt/efs'), tracing=aws_lambda.Tracing.ACTIVE, vpc=vpc, environment={"var": "x"}) # create custom iam policy with efs permissions efs_policy = aws_iam.PolicyStatement(resources=["*"], actions=["elasticfilesystem:*"]) # add efs iam policy to lambda efs_lambda.add_to_role_policy(efs_policy)
def __init__(self,scope:core.Construct, id:str, datalake:DataLakeLayer, project_name:str, concurrency:int=5, **kwargs) ->None: super().__init__(scope,id,**kwargs) self.__datalake = datalake repo = ecr.DockerImageAsset(self,'Repo', directory=os.path.join(root_dir, project_name), repository_name=project_name) self.function = lambda_.DockerImageFunction(self,project_name+'-repo', code = lambda_.DockerImageCode.from_ecr( repository=repo.repository, tag=repo.image_uri.split(':')[-1]), # lambda_.DockerImageCode.from_image_asset(directory=os.path.join(src_root_dir,directory)), description='Python container lambda function for '+repo.repository.repository_name, timeout= core.Duration.minutes(15), memory_size=4096, tracing= lambda_.Tracing.ACTIVE, # Note: This throttles the AWS S3 batch job. # Downloading too fast will cause f-droid to disconnect the crawler reserved_concurrent_executions= concurrency, filesystem= lambda_.FileSystem.from_efs_access_point( ap= self.datalake.efs.add_access_point( project_name, path='/'+project_name, create_acl=efs.Acl(owner_gid="0", owner_uid="0", permissions="777")), mount_path='/mnt/efs' ), environment={ 'EFS_MOUNT':'/mnt/efs' }, vpc= self.datalake.vpc) for name in [ 'AmazonElasticFileSystemClientFullAccess', 'AWSXrayWriteOnlyAccess', 'AmazonS3FullAccess', 'AWSCodeCommitFullAccess', 'AmazonCodeGuruReviewerFullAccess' ]: self.function.role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name(name))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # EFS needs to be setup in a VPC vpc = ec2.Vpc(self, 'Vpc', max_azs=2) # Create a file system in EFS to store information fs = efs.FileSystem(self, 'FileSystem', vpc=vpc, removal_policy=core.RemovalPolicy.DESTROY) access_point = fs.add_access_point( 'AccessPoint', create_acl=efs.Acl(owner_gid='1001', owner_uid='1001', permissions='750'), path="/export/lambda", posix_user=efs.PosixUser(gid="1001", uid="1001")) efs_lambda = _lambda.Function( self, 'rdsProxyHandler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('lambda_fns'), handler='message_wall.lambda_handler', vpc=vpc, filesystem=_lambda.FileSystem.from_efs_access_point( access_point, '/mnt/msg')) # defines an API Gateway Http API resource backed by our "efs_lambda" function. api = api_gw.HttpApi( self, 'EFS Lambda', default_integration=integrations.LambdaProxyIntegration( handler=efs_lambda)) core.CfnOutput(self, 'HTTP API Url', value=api.url)
def __init__(self, scope: core.Construct, construct_id: str, props, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point #Access points allow multiple WordPress file systems to live on the same EFS Volume #The more data on an EFS volume the better it will preform #This provides a high level of security while also optimizing performance AccessPoint = props['file_system'].add_access_point( "local-access-point", path=f"/{props['IdentifierName']}", create_acl=efs.Acl( owner_uid= "100", #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/ owner_gid="101", permissions="0755")) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster cluster = ecs.Cluster( self, "Cluster", vpc=props['vpc'], container_insights=props['ecs_enable_container_insights']) #Get needed secrets #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ssm/StringParameter.html?highlight=from_secure_string_parameter_attributes#aws_cdk.aws_ssm.StringParameter.from_secure_string_parameter_attributes # ParameterStoreTest = ssm.StringParameter.from_secure_string_parameter_attributes( self, "ParameterStoreTest", # parameter_name="", #Remeber, KMS permissions for task execution role for parameter store key! # version=1 # ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Secret.html #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_secretsmanager/SecretStringGenerator.html dbtest = { "database_name": '', "username": '', "host": str(props["rds_instance"].cluster_endpoint.hostname) } WordpressDbConnectionSecret = secretsmanager.Secret( self, "WordpressDbConnectionSecret", generate_secret_string=secretsmanager.SecretStringGenerator( secret_string_template=json.dumps(dbtest), generate_string_key="password", exclude_characters='/"')) #ToDO: Lambda call to populate secrets but only #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Volume.html#aws_cdk.aws_ecs.Volume WordpressEfsVolume = ecs.Volume( name="efs", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=props['file_system'].file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( access_point_id=AccessPoint.access_point_id))) #Create Task Definition #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html WordpressTask = ecs.FargateTaskDefinition( self, "TaskDefinition", cpu=props['ecs_cpu_size'], memory_limit_mib=props['ecs_memory_size'], volumes=[WordpressEfsVolume]) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container WordpressContainer = WordpressTask.add_container( "Wordpress", image=ecs.ContainerImage.from_ecr_repository( repository=ecr.Repository.from_repository_name( self, "wpimage", repository_name=props['ecs_container_repo_name']), tag=props['ecs_container_tag']), logging=ecs.LogDriver.aws_logs( stream_prefix="container", #log_group = "{props['environment']}/{props['unit']}/{props['application']}", #ToDo make sure I like log group name log_retention=logs.RetentionDays( props['ecs_log_retention_period'])), environment={ "TROUBLESHOOTING_MODE_ENABLED": props['TROUBLESHOOTING_MODE_ENABLED'] }, secrets={ # "PARAMETERSTORETEST": ecs.Secret.from_ssm_parameter( ParameterStoreTest ), "DBHOST": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "host"), "DBUSER": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "username"), "DBUSERPASS": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "password"), "DBNAME": ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret, "database_name") }, ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ContainerDefinition.html?highlight=add_port_mappings#aws_cdk.aws_ecs.ContainerDefinition.add_port_mappings WordpressContainer.add_port_mappings( ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ContainerDefinition.html?highlight=add_port_mappings#aws_cdk.aws_ecs.ContainerDefinition.add_port_mappings #https://gist.github.com/phillippbertram/ee312b09c3982d76b9799653ed6d6201 WordpressContainer.add_mount_points( ecs.MountPoint(container_path=props['ecs_container_efs_path'], read_only=False, source_volume=WordpressEfsVolume.name)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs_patterns/ApplicationLoadBalancedFargateService.html EcsService = ecs_patterns.ApplicationLoadBalancedFargateService( self, "EcsService", cluster=cluster, desired_count=props['ecs_container_desired_count'], task_definition=WordpressTask, enable_ecs_managed_tags=True, public_load_balancer=True, domain_name=props['domain_name'], domain_zone=route53.HostedZone.from_hosted_zone_attributes( self, "hostedZone", hosted_zone_id=props['domain_zone'], zone_name=props['zone_name']), listener_port=443, redirect_http=True, protocol=elasticloadbalancingv2.ApplicationProtocol("HTTPS"), target_protocol=elasticloadbalancingv2.ApplicationProtocol("HTTP"), platform_version=ecs.FargatePlatformVersion( "VERSION1_4"), #Required for EFS security_groups=[ ec2.SecurityGroup.from_security_group_id( self, "EcsToRdsSeurityGroup", security_group_id=props["EcsToRdsSeurityGroup"]. security_group_id) ], ) #https://gist.github.com/phillippbertram/ee312b09c3982d76b9799653ed6d6201 #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Connections.html#aws_cdk.aws_ec2.Connections EcsService.service.connections.allow_to( props['file_system'], ec2.Port.tcp(2049)) #Open hole to ECS in EFS SG #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_elasticloadbalancingv2/ApplicationTargetGroup.html#aws_cdk.aws_elasticloadbalancingv2.ApplicationTargetGroup.set_attribute EcsService.target_group.set_attribute( key="load_balancing.algorithm.type", value="least_outstanding_requests") EcsService.target_group.set_attribute( key="deregistration_delay.timeout_seconds", value="30") EcsService.target_group.configure_health_check( healthy_threshold_count=5, #2-10 timeout=core.Duration.seconds(29), ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html#aws_cdk.aws_ecs.FargateService.auto_scale_task_count ECSAutoScaler = EcsService.service.auto_scale_task_count( max_capacity=props['ecs_container_max_count'], min_capacity=props['ecs_container_min_count']) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ScalableTaskCount.html#aws_cdk.aws_ecs.ScalableTaskCount ECSAutoScaler.scale_on_cpu_utilization( "cpuScale", target_utilization_percent=80, scale_out_cooldown=core.Duration.seconds(30), scale_in_cooldown=core.Duration.seconds(60)) ECSAutoScaler.scale_on_memory_utilization( "memScale", target_utilization_percent=80, scale_out_cooldown=core.Duration.seconds(30), scale_in_cooldown=core.Duration.seconds(60))
def __init__(self, scope: core.Construct, id: str, datalake: DataLakeLayer, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.__datalake = datalake self.security_group = ec2.SecurityGroup( self, 'SecurityGroup', vpc=self.datalake.vpc, allow_all_outbound=True, description='SonarQube Security Group') self.security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.all_traffic(), description='Allow any traffic') self.sonarqube_svr_ecr = ecr.DockerImageAsset( self, 'Repo', directory=os.path.join(root_dir, 'images/sonarqube-server'), repository_name='sonarqube') self.sonarqube_cli_ecr = ecr.DockerImageAsset( self, 'Cli', directory=os.path.join(root_dir, 'images/sonarqube-scanner'), repository_name='sonarqube-cli') self.database = rds.DatabaseCluster( self, 'Database', engine=rds.DatabaseClusterEngine.aurora_postgres( version=rds.AuroraPostgresEngineVersion.VER_11_9), default_database_name='sonarqube', removal_policy=core.RemovalPolicy.DESTROY, credentials=rds.Credentials.from_username( username='******', password=core.SecretValue(value='postgres')), instance_props=rds.InstanceProps( vpc=self.datalake.vpc, security_groups=[self.security_group], instance_type=ec2.InstanceType('r6g.xlarge'))) # self.ecs_cluster = ecs.Cluster(self,'SonarCluster', # container_insights=True, # vpc=self.datalake.vpc, # capacity=ecs.AddCapacityOptions( # machine_image_type= ecs.MachineImageType.AMAZON_LINUX_2, # instance_type=ec2.InstanceType('m5.xlarge'), # allow_all_outbound=True, # associate_public_ip_address=False, # vpc_subnets= ec2.SubnetSelection(subnet_type= ec2.SubnetType.PUBLIC), # desired_capacity=2)) # self.service = ecsp.ApplicationLoadBalancedEc2Service(self,'SonarEc2', # cluster=self.ecs_cluster, # desired_count=1, # listener_port=80, # memory_reservation_mib= 4 * 1024, # task_image_options= ecsp.ApplicationLoadBalancedTaskImageOptions( # image= ecs.ContainerImage.from_docker_image_asset(asset=self.sonarqube_svr_ecr), # container_name='sonarqube-svr', # container_port=9000, # enable_logging=True, # environment={ # '_SONAR_JDBC_URL':'jdbc:postgresql://{}/sonarqube'.format( # self.database.cluster_endpoint.hostname), # '_SONAR_JDBC_USERNAME':'******', # '_SONAR_JDBC_PASSWORD':'******' # })) self.service = ecsp.ApplicationLoadBalancedFargateService( self, 'Server', assign_public_ip=True, vpc=self.datalake.vpc, desired_count=1, cpu=4096, memory_limit_mib=8 * 1024, listener_port=80, platform_version=ecs.FargatePlatformVersion.VERSION1_4, security_groups=[self.security_group, self.datalake.efs_sg], task_image_options=ecsp.ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_docker_image_asset( asset=self.sonarqube_svr_ecr), container_name='sonarqube-svr', container_port=9000, enable_logging=True, environment={ '_SONAR_JDBC_URL': 'jdbc:postgresql://{}/sonarqube'.format( self.database.cluster_endpoint.hostname), '_SONAR_JDBC_USERNAME': '******', '_SONAR_JDBC_PASSWORD': '******' })) for name in ['AmazonElasticFileSystemClientFullAccess']: self.service.task_definition.task_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name(name)) # Override container specific settings container = self.service.task_definition.default_container # Required to start remote sql container.add_ulimits( ecs.Ulimit(name=ecs.UlimitName.NOFILE, soft_limit=262145, hard_limit=262145)) for folder in ['data', 'logs']: efs_ap = self.datalake.efs.add_access_point( 'sonarqube-' + folder, create_acl=efs.Acl(owner_gid="0", owner_uid="0", permissions="777"), path='/sonarqube/' + folder) self.service.task_definition.add_volume( name=folder, efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=self.datalake.efs.file_system_id, transit_encryption='ENABLED', authorization_config=ecs.AuthorizationConfig( access_point_id=efs_ap.access_point_id, iam='DISABLED'))) container.add_mount_points( ecs.MountPoint(container_path='/opt/sonarqube/' + folder, source_volume=folder, read_only=False))
def __init__(self, scope: core.Construct, id: str, vpc, efs_mnt_path: str = "/efs", **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create Security Group to connect to EFS self.efs_sg = _ec2.SecurityGroup( self, id="efsSecurityGroup", vpc=vpc, security_group_name=f"efs_sg_{id}", description="Security Group to connect to EFS from the VPC") self.efs_sg.add_ingress_rule( peer=_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=_ec2.Port.tcp(2049), description= "Allow EC2 instances within the same VPC to connect to EFS") # Let us create the EFS Filesystem self.efs_share = _efs.FileSystem( self, "elasticFileSystem", file_system_name=f"high-performance-storage", vpc=vpc, security_group=self.efs_sg, encrypted=False, lifecycle_policy=_efs.LifecyclePolicy.AFTER_7_DAYS, performance_mode=_efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=_efs.ThroughputMode.BURSTING, removal_policy=core.RemovalPolicy.DESTROY) # create efs acl efs_acl = _efs.Acl(owner_gid="1000", owner_uid="1000", permissions="0777") # create efs posix user efs_user = _efs.PosixUser(gid="1000", uid="1000") # create efs access point self.efs_ap = _efs.AccessPoint(self, "efsAccessPoint", path=f"{efs_mnt_path}", file_system=self.efs_share, posix_user=efs_user, create_acl=efs_acl) ########################################### ################# OUTPUTS ################# ########################################### output_0 = core.CfnOutput( self, "AutomationFrom", value=f"{GlobalArgs.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output_1 = core.CfnOutput( self, "MountEfs", value= f"sudo mount -t efs -o tls {self.efs_share.file_system_id}:/ /mnt/efs ", description= "Use this command to mount efs using efs helper utility at location /mnt/efs" )
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html vpc = ec2.Vpc(self, "vpc", cidr=props['vpc_CIDR'], max_azs=3, subnet_configuration=[{ 'cidrMask': 28, 'name': 'public', 'subnetType': ec2.SubnetType.PUBLIC }, { 'cidrMask': 28, 'name': 'private', 'subnetType': ec2.SubnetType.PRIVATE }, { 'cidrMask': 28, 'name': 'db', 'subnetType': ec2.SubnetType.ISOLATED }]) rds_subnetGroup = rds.SubnetGroup( self, "rds_subnetGroup", description= f"Group for {props['environment']}-{props['application']}-{props['unit']} DB", vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html ##TODO:ADD Aurora Serverless Option rds_instance = rds.DatabaseCluster( self, 'wordpress-db', engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_07_2), instances=1, instance_props=rds.InstanceProps( vpc=vpc, enable_performance_insights=props[ 'rds_enable_performance_insights'], instance_type=ec2.InstanceType( instance_type_identifier=props['rds_instance_type'])), subnet_group=rds_subnetGroup, storage_encrypted=props['rds_storage_encrypted'], backup=rds.BackupProps(retention=core.Duration.days( props['rds_automated_backup_retention_days']))) EcsToRdsSeurityGroup = ec2.SecurityGroup( self, "EcsToRdsSeurityGroup", vpc=vpc, description="Allow WordPress containers to talk to RDS") #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html db_cred_generator = _lambda.Function( self, 'db_creds_generator', runtime=_lambda.Runtime.PYTHON_3_8, handler='db_creds_generator.handler', code=_lambda.Code.asset('lambda/db_creds_generator'), vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED ), #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets , environment={ 'SECRET_NAME': rds_instance.secret.secret_name, }) #Set Permissions and Sec Groups rds_instance.connections.allow_from( EcsToRdsSeurityGroup, ec2.Port.tcp(3306)) #Open hole to RDS in RDS SG #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html file_system = efs.FileSystem( self, "MyEfsFileSystem", vpc=vpc, encrypted=True, # file system is not encrypted by default lifecycle_policy=props['efs_lifecycle_policy'], performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, removal_policy=core.RemovalPolicy(props['efs_removal_policy']), enable_automatic_backups=props['efs_automatic_backups']) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster cluster = ecs.Cluster( self, "Cluster", vpc=vpc, container_insights=props['ecs_enable_container_insights']) if props['deploy_bastion_host']: #ToDo: Deploy bastion host with a key file #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc=vpc) rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306)) ####################### ### Developer Tools ### # SFTP into the EFS Shared File System NetToolsSecret = secretsmanager.Secret( self, "NetToolsSecret", generate_secret_string=secretsmanager.SecretStringGenerator( secret_string_template=json.dumps({ "username": '******', "ip": '' }), generate_string_key="password", exclude_characters='/"')) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point AccessPoint = file_system.add_access_point( "access-point", path="/", create_acl=efs.Acl( owner_uid= "100", #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/ owner_gid="101", permissions="0755")) EfsVolume = ecs.Volume( name="efs", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( access_point_id=AccessPoint.access_point_id))) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html NetToolsTask = ecs.FargateTaskDefinition(self, "TaskDefinition", cpu=256, memory_limit_mib=512, volumes=[EfsVolume]) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container NetToolsContainer = NetToolsTask.add_container( "NetTools", image=ecs.ContainerImage.from_registry('netresearch/sftp'), command=['test:test:100:101:efs']) NetToolsContainer.add_port_mappings( ecs.PortMapping(container_port=22, protocol=ecs.Protocol.TCP)) NetToolsContainer.add_mount_points( ecs.MountPoint( container_path= "/home/test/efs", #ToDo build path out with username from secret read_only=False, source_volume=EfsVolume.name, )) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html?highlight=fargateservice#aws_cdk.aws_ecs.FargateService service = ecs.FargateService( self, "Service", cluster=cluster, task_definition=NetToolsTask, platform_version=ecs.FargatePlatformVersion( "VERSION1_4"), #Required for EFS ) #ToDo somehow store container's IP on deploy #Allow traffic to EFS Volume from Net Tools container service.connections.allow_to(file_system, ec2.Port.tcp(2049)) #ToDo allow bastion host into container on port 22 #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html bastion_ip_locator = _lambda.Function( self, 'bastion_ip_locator', function_name= f"{props['environment']}-{props['application']}-{props['unit']}-SFTP-IP", runtime=_lambda.Runtime.PYTHON_3_8, handler='bastion_ip_locator.handler', code=_lambda.Code.asset('lambda/bastion_ip_locator'), environment={ 'CLUSTER_NAME': cluster.cluster_arn, 'SERVICE_NAME': service.service_name }) #Give needed perms to bastion_ip_locator for reading info from ECS bastion_ip_locator.add_to_role_policy( iam.PolicyStatement( actions=["ecs:DescribeTasks"], resources=[ #f"arn:aws:ecs:us-east-1:348757191778:service/{cluster.cluster_name}/{service.service_name}", f"arn:aws:ecs:us-east-1:348757191778:task/{cluster.cluster_name}/*" ])) bastion_ip_locator.add_to_role_policy( iam.PolicyStatement(actions=[ "ecs:ListTasks", ], resources=["*"], conditions={ 'ArnEquals': { 'ecs:cluster': cluster.cluster_arn } })) self.output_props = props.copy() self.output_props["vpc"] = vpc self.output_props["rds_instance"] = rds_instance self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup self.output_props["file_system"] = file_system self.output_props["cluster"] = cluster
def __init__(self, scope: core.Construct, construct_id: str, name: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) """VPC - used in project""" vpc = ec2.Vpc(self, f'{name}-VPC', max_azs=2) """Filesystem - shared between Lambda and Streamlit - Deletes when stack gets shut down""" fs = efs.FileSystem(self, f'{name}-FileSystem', vpc=vpc, removal_policy=core.RemovalPolicy.DESTROY) access_point = fs.add_access_point( 'AccessPoint', create_acl=efs.Acl(owner_gid='1001', owner_uid='1001', permissions='750'), path="/export/lambda", posix_user=efs.PosixUser(gid="1001", uid="1001")) """Model folder that contains Lambda code""" model_folder = os.path.dirname( os.path.realpath(__file__)) + "/../model" lambda_handler = _lambda.DockerImageFunction( self, f'{name}-Lambda', code=_lambda.DockerImageCode.from_image_asset( model_folder), #Uses local code to build the container memory_size=1024, #Adjust to your need - 128MB to 10GB timeout=core.Duration.minutes( 5), #Adjust to your need - up to 15 mins vpc=vpc, filesystem=_lambda.FileSystem.from_efs_access_point( access_point, MOUNT_POINT)) """Custom Log groups for Lambda""" lambda_lgs = logs.LogGroup( self, f'{name}-Lambda-LogGroup', log_group_name=f"/aws/lambda/{lambda_handler.function_name}", retention=logs.RetentionDays.ONE_WEEK, removal_policy=core.RemovalPolicy.DESTROY) """API Gateway - integrates all methods and ressources - used for Lambda invocation""" api = api_gw.HttpApi( self, f'{name}-ApiGw', default_integration=integrations.LambdaProxyIntegration( handler=lambda_handler)) """""" """""" """""" """""" """""" """""" """""" """""" """""" #STREAMLIT RELATED START """""" """""" """""" """""" """""" """""" """""" """""" """""" ''' cluster = ecs.Cluster(self, f"{name}-Streamlit-Cluster", vpc=vpc) ecs_task = ecs.FargateTaskDefinition( self, f'{name}-Streamlit-Task-Def', ) streamlit_container = ecs_task.add_container( f'{name}-Streamlit-Container', image=ecs.ContainerImage.from_asset('streamlit-docker'), essential=True, environment={ 'API_URL': api.url, }, logging=ecs.LogDrivers.aws_logs( stream_prefix=f'{name}-Streamlit-Log' ) ) streamlit_container.add_port_mappings( ecs.PortMapping( container_port=8501, host_port=8501, protocol=ecs.Protocol.TCP ) ) """Efs Volume - shared between Lambda / Streamlit""" ecs_task.add_volume(name=f'{name}-Efs-Volume', efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=fs.file_system_id, )) """Efs Mountpoint""" streamlit_container.add_mount_points( ecs.MountPoint( container_path="/mnt/data", read_only=False, source_volume=f'{name}-Efs-Volume' )) ecs_task.add_to_task_role_policy( statement=iam.PolicyStatement( actions=["efs:*"], resources=['*'], effect=iam.Effect.ALLOW ) ) """Fargate Service that hosts the Streamlit Application""" ecs_service = ecs_patterns.ApplicationLoadBalancedFargateService(self, f'{name}-Fargate-Service', cluster=cluster, cpu=256, desired_count=1, task_definition = ecs_task, memory_limit_mib=512, public_load_balancer=True, platform_version=ecs.FargatePlatformVersion.VERSION1_4, #https://forums.aws.amazon.com/thread.jspa?messageID=960420 ) fs.connections.allow_default_port_from( ecs_service.service.connections) ''' """""" """""" """""" """""" """""" """""" """""" """""" """""" #STREAMLIT RELATED END """""" """""" """""" """""" """""" """""" """""" """""" """""" core.CfnOutput(self, 'URL', value=api.url)