def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) _vpc = ec2.Vpc(self, 'theVpc', max_azs=2) _fs = efs.FileSystem(self, 'theFileSystem', vpc=_vpc, removal_policy=RemovalPolicy.DESTROY) _access_point = _fs.add_access_point( 'theAccessPoint', create_acl=efs.Acl(owner_gid='1001', owner_uid='1001', permissions='750'), path="/export/lambda", posix_user=efs.PosixUser(gid="1001", uid="1001")) _efs_lambda = _lambda.Function( self, 'lambdaEfsHandler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.from_asset('lambda_function'), handler='lambda_function.lambda_handler', vpc=_vpc, filesystem=_lambda.FileSystem.from_efs_access_point( _access_point, '/mnt/text')) _api = api_gateway.HttpApi( self, 'EFS LAMBDA APIGATEWAY', default_integration=integrations.HttpLambdaIntegration( id="LambdaFunction", handler=_efs_lambda)) CfnOutput(self, 'API Url', value=_api.url)
def provision_efs(self, name: str, cfg: EFS, vpc: ec2.Vpc, security_group: ec2.SecurityGroup): self.efs = efs.FileSystem( self.scope, "Efs", vpc=vpc, # encrypted=True, file_system_name=name, # kms_key, # lifecycle_policy, performance_mode=efs.PerformanceMode.MAX_IO, provisioned_throughput_per_second=cdk.Size.mebibytes(100), # TODO: dev/nondev sizing removal_policy=cdk.RemovalPolicy.DESTROY if cfg.removal_policy_destroy else cdk.RemovalPolicy.RETAIN, security_group=security_group, throughput_mode=efs.ThroughputMode.PROVISIONED, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE), ) self.efs_access_point = self.efs.add_access_point( "access_point", create_acl=efs.Acl( owner_uid="0", owner_gid="0", permissions="777", ), path="/domino", posix_user=efs.PosixUser( uid="0", gid="0", # secondary_gids ), )
def build_file_system_access_point( scope: core.Construct, team_name: str, shared_fs: efs.FileSystem, path: str, ap_name: str ) -> efs.AccessPoint: return efs.AccessPoint( scope=scope, id=ap_name, file_system=cast(efs.IFileSystem, shared_fs), path=f"/{team_name}/{path}", posix_user=efs.PosixUser(gid="100", uid="1000"), create_acl=efs.Acl(owner_gid="100", owner_uid="1000", permissions="770"), )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # create vpc vpc = aws_ec2.Vpc(self, "vpc", max_azs=3, nat_gateways=1) # create efs share efs_share = aws_efs.FileSystem(self, "efs-backend", vpc=vpc) # create efs acl efs_acl = aws_efs.Acl(owner_gid="1000", owner_uid="1000", permissions="0777") # create efs posix user efs_user = aws_efs.PosixUser(gid="1000", uid="1000") # create efs access point efs_ap = aws_efs.AccessPoint(self, "efs-accesspoint", path="/efs", file_system=efs_share, posix_user=efs_user, create_acl=efs_acl) # create lambda with efs access efs_lambda = aws_lambda.Function( self, "read_efs", runtime=aws_lambda.Runtime.PYTHON_3_8, code=aws_lambda.Code.from_asset("./function"), handler="efsread.handler", timeout=core.Duration.seconds(20), memory_size=128, retry_attempts=0, filesystem=aws_lambda.FileSystem.from_efs_access_point( efs_ap, '/mnt/efs'), tracing=aws_lambda.Tracing.ACTIVE, vpc=vpc, environment={"var": "x"}) # create custom iam policy with efs permissions efs_policy = aws_iam.PolicyStatement(resources=["*"], actions=["elasticfilesystem:*"]) # add efs iam policy to lambda efs_lambda.add_to_role_policy(efs_policy)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # EFS needs to be setup in a VPC vpc = ec2.Vpc(self, 'Vpc', max_azs=2) # Create a file system in EFS to store information fs = efs.FileSystem(self, 'FileSystem', vpc=vpc, removal_policy=core.RemovalPolicy.DESTROY) access_point = fs.add_access_point( 'AccessPoint', create_acl=efs.Acl(owner_gid='1001', owner_uid='1001', permissions='750'), path="/export/lambda", posix_user=efs.PosixUser(gid="1001", uid="1001")) efs_lambda = _lambda.Function( self, 'rdsProxyHandler', runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('lambda_fns'), handler='message_wall.lambda_handler', vpc=vpc, filesystem=_lambda.FileSystem.from_efs_access_point( access_point, '/mnt/msg')) # defines an API Gateway Http API resource backed by our "efs_lambda" function. api = api_gw.HttpApi( self, 'EFS Lambda', default_integration=integrations.LambdaProxyIntegration( handler=efs_lambda)) core.CfnOutput(self, 'HTTP API Url', value=api.url)
def __init__(self, scope: core.Construct, id: str, vpc, efs_mnt_path: str = "/efs", **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create Security Group to connect to EFS self.efs_sg = _ec2.SecurityGroup( self, id="efsSecurityGroup", vpc=vpc, security_group_name=f"efs_sg_{id}", description="Security Group to connect to EFS from the VPC") self.efs_sg.add_ingress_rule( peer=_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=_ec2.Port.tcp(2049), description= "Allow EC2 instances within the same VPC to connect to EFS") # Let us create the EFS Filesystem self.efs_share = _efs.FileSystem( self, "elasticFileSystem", file_system_name=f"high-performance-storage", vpc=vpc, security_group=self.efs_sg, encrypted=False, lifecycle_policy=_efs.LifecyclePolicy.AFTER_7_DAYS, performance_mode=_efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=_efs.ThroughputMode.BURSTING, removal_policy=core.RemovalPolicy.DESTROY) # create efs acl efs_acl = _efs.Acl(owner_gid="1000", owner_uid="1000", permissions="0777") # create efs posix user efs_user = _efs.PosixUser(gid="1000", uid="1000") # create efs access point self.efs_ap = _efs.AccessPoint(self, "efsAccessPoint", path=f"{efs_mnt_path}", file_system=self.efs_share, posix_user=efs_user, create_acl=efs_acl) ########################################### ################# OUTPUTS ################# ########################################### output_0 = core.CfnOutput( self, "AutomationFrom", value=f"{GlobalArgs.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output_1 = core.CfnOutput( self, "MountEfs", value= f"sudo mount -t efs -o tls {self.efs_share.file_system_id}:/ /mnt/efs ", description= "Use this command to mount efs using efs helper utility at location /mnt/efs" )
def __init__( self, scope: cdk.Construct, construct_id: str, vpc: ec2.Vpc, domain: sagemaker.CfnDomain, **kwargs, ) -> None: super().__init__(scope, construct_id, **kwargs) studio_domain_id = (domain.attr_domain_id ) # cdk.Fn.import_value("StudioDomainId") # Get the security group associated with the EFS volume managed by SageMaker Studio get_parameter = cr.AwsCustomResource( self, "GetEfsSgId", on_update={ # will also be called for a CREATE event "service": "EC2", "action": "describeSecurityGroups", "parameters": { "Filters": [ {"Name": "vpc-id", "Values": [vpc.vpc_id]}, { "Name": "group-name", "Values": [ f"security-group-for-inbound-nfs-{studio_domain_id}" ], }, ] }, "physical_resource_id": cr.PhysicalResourceId.of("GetEfsSgId"), }, policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE ), ) sg_name = get_parameter.get_response_field("SecurityGroups.0.GroupId") sg_efs = ec2.SecurityGroup.from_security_group_id( self, "SG", security_group_id=sg_name) # We can now retrive a handler for the EFS volume StudioDomainEfsId = cdk.Fn.import_value("StudioDomainEfsId") studio_efs = efs.FileSystem.from_file_system_attributes( self, "StudioEFS", file_system_id=StudioDomainEfsId, security_group=sg_efs) # Create EFS access point to enable the lambda fn to mount the EFS volume efs_ap = efs.AccessPoint( self, "EfsAccessPoint", file_system=studio_efs, posix_user=efs.PosixUser(gid="0", uid="0"), ) # Function that takes care of setting up the user environment self.lambda_fn = lambda_python.PythonFunction( self, "UserSetupLambdaFn", entry="populate_git_fn", index="populate_from_git.py", handler="on_event", vpc=vpc, layers=[ lambda_.LayerVersion.from_layer_version_arn( self, "GitLayer", layer_version_arn= f"arn:aws:lambda:{self.region}:553035198032:layer:git-lambda2:8", ), ], filesystem=lambda_.FileSystem.from_efs_access_point( efs_ap, "/mnt/efs"), timeout=cdk.Duration.seconds(300), initial_policy=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "sagemaker:DescribeUserProfile", ], resources=["*"], ) ], ) provider = cr.Provider( self, "Provider", on_event_handler=self.lambda_fn, ) cdk.CfnOutput( self, "StudioUserProviderToken", value=provider.service_token, description="StudioUserProviderToken", export_name="StudioUserProviderToken", ) self.provider = provider
def __init__(self, scope: core.Construct, construct_id: str, name: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) """VPC - used in project""" vpc = ec2.Vpc(self, f'{name}-VPC', max_azs=2) """Filesystem - shared between Lambda and Streamlit - Deletes when stack gets shut down""" fs = efs.FileSystem(self, f'{name}-FileSystem', vpc=vpc, removal_policy=core.RemovalPolicy.DESTROY) access_point = fs.add_access_point( 'AccessPoint', create_acl=efs.Acl(owner_gid='1001', owner_uid='1001', permissions='750'), path="/export/lambda", posix_user=efs.PosixUser(gid="1001", uid="1001")) """Model folder that contains Lambda code""" model_folder = os.path.dirname( os.path.realpath(__file__)) + "/../model" lambda_handler = _lambda.DockerImageFunction( self, f'{name}-Lambda', code=_lambda.DockerImageCode.from_image_asset( model_folder), #Uses local code to build the container memory_size=1024, #Adjust to your need - 128MB to 10GB timeout=core.Duration.minutes( 5), #Adjust to your need - up to 15 mins vpc=vpc, filesystem=_lambda.FileSystem.from_efs_access_point( access_point, MOUNT_POINT)) """Custom Log groups for Lambda""" lambda_lgs = logs.LogGroup( self, f'{name}-Lambda-LogGroup', log_group_name=f"/aws/lambda/{lambda_handler.function_name}", retention=logs.RetentionDays.ONE_WEEK, removal_policy=core.RemovalPolicy.DESTROY) """API Gateway - integrates all methods and ressources - used for Lambda invocation""" api = api_gw.HttpApi( self, f'{name}-ApiGw', default_integration=integrations.LambdaProxyIntegration( handler=lambda_handler)) """""" """""" """""" """""" """""" """""" """""" """""" """""" #STREAMLIT RELATED START """""" """""" """""" """""" """""" """""" """""" """""" """""" ''' cluster = ecs.Cluster(self, f"{name}-Streamlit-Cluster", vpc=vpc) ecs_task = ecs.FargateTaskDefinition( self, f'{name}-Streamlit-Task-Def', ) streamlit_container = ecs_task.add_container( f'{name}-Streamlit-Container', image=ecs.ContainerImage.from_asset('streamlit-docker'), essential=True, environment={ 'API_URL': api.url, }, logging=ecs.LogDrivers.aws_logs( stream_prefix=f'{name}-Streamlit-Log' ) ) streamlit_container.add_port_mappings( ecs.PortMapping( container_port=8501, host_port=8501, protocol=ecs.Protocol.TCP ) ) """Efs Volume - shared between Lambda / Streamlit""" ecs_task.add_volume(name=f'{name}-Efs-Volume', efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=fs.file_system_id, )) """Efs Mountpoint""" streamlit_container.add_mount_points( ecs.MountPoint( container_path="/mnt/data", read_only=False, source_volume=f'{name}-Efs-Volume' )) ecs_task.add_to_task_role_policy( statement=iam.PolicyStatement( actions=["efs:*"], resources=['*'], effect=iam.Effect.ALLOW ) ) """Fargate Service that hosts the Streamlit Application""" ecs_service = ecs_patterns.ApplicationLoadBalancedFargateService(self, f'{name}-Fargate-Service', cluster=cluster, cpu=256, desired_count=1, task_definition = ecs_task, memory_limit_mib=512, public_load_balancer=True, platform_version=ecs.FargatePlatformVersion.VERSION1_4, #https://forums.aws.amazon.com/thread.jspa?messageID=960420 ) fs.connections.allow_default_port_from( ecs_service.service.connections) ''' """""" """""" """""" """""" """""" """""" """""" """""" """""" #STREAMLIT RELATED END """""" """""" """""" """""" """""" """""" """""" """""" """""" core.CfnOutput(self, 'URL', value=api.url)