def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.vpc = ec2.Vpc(self, 'Network')
        VpcEndpointsForAWSServices(self, 'Endpoints', vpc=self.vpc)

        self.product_descr_bucket = s3.Bucket(
            self, 'AndroidProducts', removal_policy=core.RemovalPolicy.DESTROY)

        self.efs_sg = ec2.SecurityGroup(
            self,
            'EfsGroup',
            vpc=self.vpc,
            allow_all_outbound=True,
            description='Security Group for ApkStore EFS')

        self.efs_sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                                     connection=ec2.Port.all_traffic(),
                                     description='Allow any traffic')

        self.efs = efs.FileSystem(
            self,
            'ApkStore',
            vpc=self.vpc,
            security_group=self.efs_sg,
            lifecycle_policy=efs.LifecyclePolicy.AFTER_14_DAYS,
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE)
Ejemplo n.º 2
0
    def provision_efs(self, name: str, cfg: EFS, vpc: ec2.Vpc, security_group: ec2.SecurityGroup):
        self.efs = efs.FileSystem(
            self.scope,
            "Efs",
            vpc=vpc,
            # encrypted=True,
            file_system_name=name,
            # kms_key,
            # lifecycle_policy,
            performance_mode=efs.PerformanceMode.MAX_IO,
            provisioned_throughput_per_second=cdk.Size.mebibytes(100),  # TODO: dev/nondev sizing
            removal_policy=cdk.RemovalPolicy.DESTROY if cfg.removal_policy_destroy else cdk.RemovalPolicy.RETAIN,
            security_group=security_group,
            throughput_mode=efs.ThroughputMode.PROVISIONED,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE),
        )

        self.efs_access_point = self.efs.add_access_point(
            "access_point",
            create_acl=efs.Acl(
                owner_uid="0",
                owner_gid="0",
                permissions="777",
            ),
            path="/domino",
            posix_user=efs.PosixUser(
                uid="0",
                gid="0",
                # secondary_gids
            ),
        )
Ejemplo n.º 3
0
 def build_file_system(
     scope: core.Construct,
     name: str,
     efs_life_cycle: str,
     vpc: ec2.IVpc,
     efs_security_group: ec2.ISecurityGroup,
     subnets: List[ec2.ISubnet],
     team_kms_key: kms.Key,
 ) -> efs.FileSystem:
     fs_name: str = f"orbit-{name}-fs"
     efs_fs: efs.FileSystem = efs.FileSystem(
         scope=scope,
         id=fs_name,
         file_system_name=fs_name,
         vpc=vpc,
         encrypted=True,
         lifecycle_policy=efs.LifecyclePolicy[efs_life_cycle]
         if efs_life_cycle else None,
         performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
         throughput_mode=efs.ThroughputMode.BURSTING,
         security_group=efs_security_group,
         vpc_subnets=ec2.SubnetSelection(subnets=subnets),
         kms_key=cast(Optional[kms.IKey], team_kms_key),
     )
     return efs_fs
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        _vpc = ec2.Vpc(self, 'theVpc', max_azs=2)

        _fs = efs.FileSystem(self,
                             'theFileSystem',
                             vpc=_vpc,
                             removal_policy=RemovalPolicy.DESTROY)

        _access_point = _fs.add_access_point(
            'theAccessPoint',
            create_acl=efs.Acl(owner_gid='1001',
                               owner_uid='1001',
                               permissions='750'),
            path="/export/lambda",
            posix_user=efs.PosixUser(gid="1001", uid="1001"))

        _efs_lambda = _lambda.Function(
            self,
            'lambdaEfsHandler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.from_asset('lambda_function'),
            handler='lambda_function.lambda_handler',
            vpc=_vpc,
            filesystem=_lambda.FileSystem.from_efs_access_point(
                _access_point, '/mnt/text'))

        _api = api_gateway.HttpApi(
            self,
            'EFS LAMBDA APIGATEWAY',
            default_integration=integrations.HttpLambdaIntegration(
                id="LambdaFunction", handler=_efs_lambda))

        CfnOutput(self, 'API Url', value=_api.url)
Ejemplo n.º 5
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 landing_zone: IVpcLandingZone,
                 ds: DirectoryServicesConstruct,
                 subnet_group_name: str = 'Default',
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        core.Tags.of(self).add(key='Source',
                               value=NetworkFileSystemsConstruct.__name__)

        subnet_ids = landing_zone.vpc.select_subnets(
            subnet_group_name=subnet_group_name).subnet_ids
        single_subnet = subnet_ids[0:1]
        preferred_subnet_id = single_subnet[0]
        self.windows_storage = fsx.CfnFileSystem(
            self,
            'WinFs',
            subnet_ids=single_subnet,
            file_system_type='WINDOWS',
            security_group_ids=[landing_zone.security_group.security_group_id],
            # HDD min = 2TB / SSD = 32
            storage_type='SSD',
            storage_capacity=500,
            tags=[
                core.CfnTag(key='Name', value='winfs.virtual.world'),
            ],
            windows_configuration=fsx.CfnFileSystem.
            WindowsConfigurationProperty(
                weekly_maintenance_start_time='1:11:00',  # Mon 6AM (UTC-5)
                # 2^n MiB/s with n between 8 and 2048
                throughput_capacity=16,
                active_directory_id=ds.mad.ref,
                automatic_backup_retention_days=30,
                copy_tags_to_backups=True,
                deployment_type='SINGLE_AZ_2',  # MULTI_AZ_1,
                preferred_subnet_id=preferred_subnet_id))

        self.app_data = efs.FileSystem(
            self,
            'AppData',
            vpc=landing_zone.vpc,
            enable_automatic_backups=True,
            file_system_name='app-data.efs.virtual.world',
            security_group=landing_zone.security_group,
            vpc_subnets=ec2.SubnetSelection(
                subnet_group_name=subnet_group_name),
            lifecycle_policy=efs.LifecyclePolicy.AFTER_14_DAYS,
            removal_policy=core.RemovalPolicy.SNAPSHOT)
Ejemplo n.º 6
0
 def create_efs_volume(self, name):
     # create an EFS filesystem and access point
     fs = efs.FileSystem(
         self,
         name + '-fs',
         vpc=self.vpc,
         performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
         throughput_mode=efs.ThroughputMode.BURSTING,
         security_group=self.efs_sg)
     fs.add_access_point(name, path="/")
     # define an ECS volume for this filesystem
     volume = ecs.Volume(
         name=name + '-volume',
         efs_volume_configuration=ecs.EfsVolumeConfiguration(
             file_system_id=fs.file_system_id))
     return volume
Ejemplo n.º 7
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create vpc
        vpc = aws_ec2.Vpc(self, "vpc", max_azs=3, nat_gateways=1)

        # create efs share
        efs_share = aws_efs.FileSystem(self, "efs-backend", vpc=vpc)

        # create efs acl
        efs_acl = aws_efs.Acl(owner_gid="1000",
                              owner_uid="1000",
                              permissions="0777")

        # create efs posix user
        efs_user = aws_efs.PosixUser(gid="1000", uid="1000")

        # create efs access point
        efs_ap = aws_efs.AccessPoint(self,
                                     "efs-accesspoint",
                                     path="/efs",
                                     file_system=efs_share,
                                     posix_user=efs_user,
                                     create_acl=efs_acl)

        # create lambda with efs access
        efs_lambda = aws_lambda.Function(
            self,
            "read_efs",
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.from_asset("./function"),
            handler="efsread.handler",
            timeout=core.Duration.seconds(20),
            memory_size=128,
            retry_attempts=0,
            filesystem=aws_lambda.FileSystem.from_efs_access_point(
                efs_ap, '/mnt/efs'),
            tracing=aws_lambda.Tracing.ACTIVE,
            vpc=vpc,
            environment={"var": "x"})

        # create custom iam policy with efs permissions
        efs_policy = aws_iam.PolicyStatement(resources=["*"],
                                             actions=["elasticfilesystem:*"])

        # add efs iam policy to lambda
        efs_lambda.add_to_role_policy(efs_policy)
Ejemplo n.º 8
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc,
                 vpc_subnets: list, config: dict, **kwargs):
        super().__init__(scope, id, **kwargs)

        if config["efs"]["enabled"]:
            self._fs = efs.FileSystem(
                self,
                "EFS",
                vpc=vpc,
                vpc_subnets=vpc_subnets,
                encrypted=True,
                enable_automatic_backups=True,
                performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
                throughput_mode=efs.ThroughputMode.BURSTING,
                lifecycle_policy=getattr(efs.LifecyclePolicy,
                                         config["efs"]["lifecycle_policy"]))
            core.CfnOutput(self, "EFS_FSID", value=self._fs.file_system_id)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        volume_name = 'factorio'
        self.vpc = ec2.Vpc(self,
                            "vpc",
                            max_azs=1,
                            nat_gateways=0)
        
        self.efs_fs = efs.FileSystem(self, 'Filesystem', vpc=self.vpc, enable_automatic_backups=True)
        self.ecs = ecs.Cluster(self, "Fargate", vpc=self.vpc )
        self.task_definition = ecs.FargateTaskDefinition(self, 
                                                        "Factorio",
                                                        cpu=2048,
                                                        memory_limit_mib=4096,
                                                        volumes=[
                                                            ecs.Volume(
                                                                name=volume_name,
                                                                efs_volume_configuration=ecs.EfsVolumeConfiguration(file_system_id=self.efs_fs.file_system_id)
                                                                )
                                                            ]
                                                        )
        self.container = self.task_definition.add_container("hello-world",
                                            image=ecs.ContainerImage.from_registry(name="factoriotools/factorio:stable"))
        self.container.add_mount_points(ecs.MountPoint(container_path="/factorio",
                                                       read_only=False,
                                                       source_volume= volume_name))
        udp_34197_mapping= ecs.PortMapping(container_port=34197,
                                            host_port=34197, 
                                            protocol=ecs.Protocol.UDP)

        tcp_27015_mapping= ecs.PortMapping(container_port=27015,
                                            host_port=27015,
                                            protocol=ecs.Protocol.TCP)
        self.container.add_port_mappings(udp_34197_mapping, tcp_27015_mapping)
                                         
        core.CfnOutput(self, "VPC",
                        value=self.vpc.vpc_id)
        core.CfnOutput(self, "EFS",
                        value=self.efs_fs.file_system_id)
        core.CfnOutput(self, "TaskDef",
                        value=self.task_definition.task_definition_arn)    
        core.CfnOutput(self, "Container",
                        value=self.container.container_name)
Ejemplo n.º 10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # EFS needs to be setup in a VPC
        vpc = ec2.Vpc(self, 'Vpc', max_azs=2)

        # Create a file system in EFS to store information
        fs = efs.FileSystem(self,
                            'FileSystem',
                            vpc=vpc,
                            removal_policy=core.RemovalPolicy.DESTROY)

        access_point = fs.add_access_point(
            'AccessPoint',
            create_acl=efs.Acl(owner_gid='1001',
                               owner_uid='1001',
                               permissions='750'),
            path="/export/lambda",
            posix_user=efs.PosixUser(gid="1001", uid="1001"))

        efs_lambda = _lambda.Function(
            self,
            'rdsProxyHandler',
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset('lambda_fns'),
            handler='message_wall.lambda_handler',
            vpc=vpc,
            filesystem=_lambda.FileSystem.from_efs_access_point(
                access_point, '/mnt/msg'))

        # defines an API Gateway Http API resource backed by our "efs_lambda" function.
        api = api_gw.HttpApi(
            self,
            'EFS Lambda',
            default_integration=integrations.LambdaProxyIntegration(
                handler=efs_lambda))

        core.CfnOutput(self, 'HTTP API Url', value=api.url)
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
        vpc = ec2.Vpc(self,
                      "vpc",
                      cidr=props['vpc_CIDR'],
                      max_azs=3,
                      subnet_configuration=[{
                          'cidrMask': 28,
                          'name': 'public',
                          'subnetType': ec2.SubnetType.PUBLIC
                      }, {
                          'cidrMask':
                          28,
                          'name':
                          'private',
                          'subnetType':
                          ec2.SubnetType.PRIVATE
                      }, {
                          'cidrMask':
                          28,
                          'name':
                          'db',
                          'subnetType':
                          ec2.SubnetType.ISOLATED
                      }])

        rds_subnetGroup = rds.SubnetGroup(
            self,
            "rds_subnetGroup",
            description=
            f"Group for {props['environment']}-{props['application']}-{props['unit']} DB",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html
        ##TODO:ADD Aurora Serverless Option
        rds_instance = rds.DatabaseCluster(
            self,
            'wordpress-db',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_2),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                enable_performance_insights=props[
                    'rds_enable_performance_insights'],
                instance_type=ec2.InstanceType(
                    instance_type_identifier=props['rds_instance_type'])),
            subnet_group=rds_subnetGroup,
            storage_encrypted=props['rds_storage_encrypted'],
            backup=rds.BackupProps(retention=core.Duration.days(
                props['rds_automated_backup_retention_days'])))

        EcsToRdsSeurityGroup = ec2.SecurityGroup(
            self,
            "EcsToRdsSeurityGroup",
            vpc=vpc,
            description="Allow WordPress containers to talk to RDS")

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        db_cred_generator = _lambda.Function(
            self,
            'db_creds_generator',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='db_creds_generator.handler',
            code=_lambda.Code.asset('lambda/db_creds_generator'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED
            ),  #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets ,
            environment={
                'SECRET_NAME': rds_instance.secret.secret_name,
            })

        #Set Permissions and Sec Groups
        rds_instance.connections.allow_from(
            EcsToRdsSeurityGroup,
            ec2.Port.tcp(3306))  #Open hole to RDS in RDS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html
        file_system = efs.FileSystem(
            self,
            "MyEfsFileSystem",
            vpc=vpc,
            encrypted=True,  # file system is not encrypted by default
            lifecycle_policy=props['efs_lifecycle_policy'],
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING,
            removal_policy=core.RemovalPolicy(props['efs_removal_policy']),
            enable_automatic_backups=props['efs_automatic_backups'])

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster
        cluster = ecs.Cluster(
            self,
            "Cluster",
            vpc=vpc,
            container_insights=props['ecs_enable_container_insights'])

        if props['deploy_bastion_host']:
            #ToDo: Deploy bastion host with a key file
            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html
            bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc=vpc)
            rds_instance.connections.allow_from(bastion_host,
                                                ec2.Port.tcp(3306))

            #######################
            ### Developer Tools ###
            # SFTP into the EFS Shared File System

            NetToolsSecret = secretsmanager.Secret(
                self,
                "NetToolsSecret",
                generate_secret_string=secretsmanager.SecretStringGenerator(
                    secret_string_template=json.dumps({
                        "username": '******',
                        "ip": ''
                    }),
                    generate_string_key="password",
                    exclude_characters='/"'))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point
            AccessPoint = file_system.add_access_point(
                "access-point",
                path="/",
                create_acl=efs.Acl(
                    owner_uid=
                    "100",  #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/
                    owner_gid="101",
                    permissions="0755"))

            EfsVolume = ecs.Volume(
                name="efs",
                efs_volume_configuration=ecs.EfsVolumeConfiguration(
                    file_system_id=file_system.file_system_id,
                    transit_encryption="ENABLED",
                    authorization_config=ecs.AuthorizationConfig(
                        access_point_id=AccessPoint.access_point_id)))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html
            NetToolsTask = ecs.FargateTaskDefinition(self,
                                                     "TaskDefinition",
                                                     cpu=256,
                                                     memory_limit_mib=512,
                                                     volumes=[EfsVolume])

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container
            NetToolsContainer = NetToolsTask.add_container(
                "NetTools",
                image=ecs.ContainerImage.from_registry('netresearch/sftp'),
                command=['test:test:100:101:efs'])
            NetToolsContainer.add_port_mappings(
                ecs.PortMapping(container_port=22, protocol=ecs.Protocol.TCP))

            NetToolsContainer.add_mount_points(
                ecs.MountPoint(
                    container_path=
                    "/home/test/efs",  #ToDo build path out with username from secret
                    read_only=False,
                    source_volume=EfsVolume.name,
                ))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html?highlight=fargateservice#aws_cdk.aws_ecs.FargateService
            service = ecs.FargateService(
                self,
                "Service",
                cluster=cluster,
                task_definition=NetToolsTask,
                platform_version=ecs.FargatePlatformVersion(
                    "VERSION1_4"),  #Required for EFS
            )
            #ToDo somehow store container's IP on deploy

            #Allow traffic to EFS Volume from Net Tools container
            service.connections.allow_to(file_system, ec2.Port.tcp(2049))
            #ToDo allow bastion host into container on port 22

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
            bastion_ip_locator = _lambda.Function(
                self,
                'bastion_ip_locator',
                function_name=
                f"{props['environment']}-{props['application']}-{props['unit']}-SFTP-IP",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler='bastion_ip_locator.handler',
                code=_lambda.Code.asset('lambda/bastion_ip_locator'),
                environment={
                    'CLUSTER_NAME': cluster.cluster_arn,
                    'SERVICE_NAME': service.service_name
                })

            #Give needed perms to bastion_ip_locator for reading info from ECS
            bastion_ip_locator.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["ecs:DescribeTasks"],
                    resources=[
                        #f"arn:aws:ecs:us-east-1:348757191778:service/{cluster.cluster_name}/{service.service_name}",
                        f"arn:aws:ecs:us-east-1:348757191778:task/{cluster.cluster_name}/*"
                    ]))
            bastion_ip_locator.add_to_role_policy(
                iam.PolicyStatement(actions=[
                    "ecs:ListTasks",
                ],
                                    resources=["*"],
                                    conditions={
                                        'ArnEquals': {
                                            'ecs:cluster': cluster.cluster_arn
                                        }
                                    }))

        self.output_props = props.copy()
        self.output_props["vpc"] = vpc
        self.output_props["rds_instance"] = rds_instance
        self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup
        self.output_props["file_system"] = file_system
        self.output_props["cluster"] = cluster
Ejemplo n.º 12
0
    def __init__(self, scope: core.Construct, id: str, x86_ecr_repo_name: str,
                 arm_ecr_repo_name: str, spec_file_path: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Define CodeBuild resource.
        git_hub_source = codebuild.Source.git_hub(
            owner=GITHUB_REPO_OWNER,
            repo=GITHUB_REPO_NAME,
            webhook=True,
            webhook_filters=[
                codebuild.FilterGroup.in_event_of(
                    codebuild.EventAction.PULL_REQUEST_CREATED,
                    codebuild.EventAction.PULL_REQUEST_UPDATED,
                    codebuild.EventAction.PULL_REQUEST_REOPENED)
            ],
            clone_depth=1)

        # Define a IAM role for this stack.
        code_build_batch_policy = iam.PolicyDocument.from_json(
            code_build_batch_policy_in_json([id]))
        fuzz_policy = iam.PolicyDocument.from_json(
            code_build_publish_metrics_in_json())
        inline_policies = {
            "code_build_batch_policy": code_build_batch_policy,
            "fuzz_policy": fuzz_policy
        }
        role = iam.Role(
            scope=self,
            id="{}-role".format(id),
            assumed_by=iam.ServicePrincipal("codebuild.amazonaws.com"),
            inline_policies=inline_policies)

        # Create the VPC for EFS and CodeBuild
        public_subnet = ec2.SubnetConfiguration(
            name="PublicFuzzingSubnet", subnet_type=ec2.SubnetType.PUBLIC)
        private_subnet = ec2.SubnetConfiguration(
            name="PrivateFuzzingSubnet", subnet_type=ec2.SubnetType.PRIVATE)

        # Create a VPC with a single public and private subnet in a single AZ. This is to avoid the elastic IP limit
        # being used up by a bunch of idle NAT gateways
        fuzz_vpc = ec2.Vpc(
            scope=self,
            id="{}-FuzzingVPC".format(id),
            subnet_configuration=[public_subnet, private_subnet],
            max_azs=1)
        build_security_group = ec2.SecurityGroup(
            scope=self, id="{}-FuzzingSecurityGroup".format(id), vpc=fuzz_vpc)

        build_security_group.add_ingress_rule(
            peer=build_security_group,
            connection=ec2.Port.all_traffic(),
            description="Allow all traffic inside security group")

        efs_subnet_selection = ec2.SubnetSelection(
            subnet_type=ec2.SubnetType.PRIVATE)

        # Create the EFS to store the corpus and logs. EFS allows new filesystems to burst to 100 MB/s for the first 2
        # TB of data read/written, after that the rate is limited based on the size of the filesystem. As of late
        # 2021 our corpus is less than one GB which results in EFS limiting all reads and writes to the minimum 1 MB/s.
        # To have the fuzzing be able to finish in a reasonable amount of time use the Provisioned capacity option.
        # For now this uses 100 MB/s which matches the performance used for 2021. Looking at EFS metrics in late 2021
        # during fuzz runs EFS sees 4-22 MB/s of transfers thus 100 MB/s gives lots of buffer and allows ~4-5 fuzz runs
        # to start at the same time with no issue.
        # https://docs.aws.amazon.com/efs/latest/ug/performance.html
        fuzz_filesystem = efs.FileSystem(
            scope=self,
            id="{}-FuzzingEFS".format(id),
            file_system_name="AWS-LC-Fuzz-Corpus",
            enable_automatic_backups=True,
            encrypted=True,
            security_group=build_security_group,
            vpc=fuzz_vpc,
            vpc_subnets=efs_subnet_selection,
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.PROVISIONED,
            provisioned_throughput_per_second=core.Size.mebibytes(100),
        )

        # Create build spec.
        placeholder_map = {
            "X86_ECR_REPO_PLACEHOLDER": ecr_arn(x86_ecr_repo_name),
            "ARM_ECR_REPO_PLACEHOLDER": ecr_arn(arm_ecr_repo_name)
        }
        build_spec_content = YmlLoader.load(spec_file_path, placeholder_map)

        # Define CodeBuild.
        fuzz_codebuild = codebuild.Project(
            scope=self,
            id="FuzzingCodeBuild",
            project_name=id,
            source=git_hub_source,
            role=role,
            timeout=core.Duration.minutes(120),
            environment=codebuild.BuildEnvironment(
                compute_type=codebuild.ComputeType.LARGE,
                privileged=True,
                build_image=codebuild.LinuxBuildImage.STANDARD_4_0),
            build_spec=codebuild.BuildSpec.from_object(build_spec_content),
            vpc=fuzz_vpc,
            security_groups=[build_security_group])

        # TODO: add build type BUILD_BATCH when CFN finishes the feature release. See CryptoAlg-575.

        # Add 'BuildBatchConfig' property, which is not supported in CDK.
        # CDK raw overrides: https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html#cfn_layer_raw
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-codebuild-project.html#aws-resource-codebuild-project-properties
        cfn_codebuild = fuzz_codebuild.node.default_child
        cfn_codebuild.add_override("Properties.BuildBatchConfig", {
            "ServiceRole": role.role_arn,
            "TimeoutInMins": 120
        })

        # The EFS identifier needs to match tests/ci/common_fuzz.sh, CodeBuild defines an environment variable named
        # codebuild_$identifier.
        # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codebuild-project-projectfilesystemlocation.html
        #
        # TODO: add this to the CDK project above when it supports EfsFileSystemLocation
        cfn_codebuild.add_override("Properties.FileSystemLocations", [{
            "Identifier":
            "fuzzing_root",
            "Location":
            "%s.efs.%s.amazonaws.com:/" %
            (fuzz_filesystem.file_system_id, AWS_REGION),
            "MountPoint":
            "/efs_fuzzing_root",
            "Type":
            "EFS"
        }])
Ejemplo n.º 13
0
security_group.add_ingress_rule(
    ec2.Peer.any_ipv4(),
    ec2.Port.tcp(2049)
)

security_group.add_ingress_rule(
    ec2.Peer.any_ipv4(),
    ec2.Port.tcp(5000)
)

security_group.add_ingress_rule(
    ec2.Peer.any_ipv4(),
    ec2.Port.tcp(3000)
)

efsvol=efs.FileSystem(stack,'efsvolume',vpc=vpc,file_system_name='onetest',security_group=security_group)

efsdns=efsvol.file_system_id+".efs.us-east-1.amazonaws.com"

efs_to_connect="addr=" + efsdns +",nfsvers=4.0,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2"
#efs_to_connect="addr=" + efsvol.file_system_id
device_set=efsdns+":/"
driveropts={
    "type": "nfs",
    "device":device_set,
    "o": efs_to_connect
    #"o": "addr=fs-XXXXXX.efs.us-east-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport"

}

docker_vol_config=ecs.DockerVolumeConfiguration(driver='local', scope=ecs.Scope.TASK, driver_opts=driveropts, labels=None)
Ejemplo n.º 14
0
    def __init__(self, scope: Construct, construct_id: str, env, vpc,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, env=env, **kwargs)

        efs_sg = ec2.SecurityGroup(self,
                                   'EfsSG',
                                   vpc=vpc,
                                   description='EKS EFS SG',
                                   security_group_name='eks-efs')
        efs_sg.add_ingress_rule(ec2.Peer.ipv4('10.3.0.0/16'),
                                ec2.Port.all_traffic(), "EFS VPC access")
        Tags.of(efs_sg).add(key='cfn.eks-dev.stack', value='sg-stack')
        Tags.of(efs_sg).add(key='Name', value='eks-efs')
        Tags.of(efs_sg).add(key='env', value='dev')

        file_system = efs.FileSystem(
            self,
            construct_id,
            vpc=vpc,
            file_system_name='eks-efs',
            lifecycle_policy=efs.LifecyclePolicy.AFTER_14_DAYS,
            removal_policy=RemovalPolicy.DESTROY,
            security_group=efs_sg)

        Tags.of(file_system).add(key='cfn.eks-dev.stack', value='efs-stack')
        Tags.of(file_system).add(key='efs.csi.aws.com/cluster', value='true')
        Tags.of(file_system).add(key='Name', value='eks-efs')
        Tags.of(file_system).add(key='env', value='dev')

        policy_statement_1 = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                "elasticfilesystem:DescribeAccessPoints",
                "elasticfilesystem:DescribeFileSystems"
            ],
            resources=['*'],
            conditions={
                'StringEquals': {
                    "aws:RequestedRegion": "ap-northeast-2"
                }
            })
        policy_statement_2 = iam.PolicyStatement(
            effect=iam.Effect.ALLOW,
            actions=[
                "elasticfilesystem:CreateAccessPoint",
                "elasticfilesystem:DeleteAccessPoint"
            ],
            resources=['*'],
            conditions={
                'StringEquals': {
                    "aws:ResourceTag/efs.csi.aws.com/cluster": "true"
                }
            })

        # EFS CSI SA
        efs_csi_role = iam.Role(
            self,
            'EfsCSIRole',
            role_name='eks-efs-csi-sa',
            assumed_by=iam.FederatedPrincipal(
                federated=oidc_arn,
                assume_role_action='sts:AssumeRoleWithWebIdentity',
                conditions={
                    'StringEquals':
                    string_like('kube-system', 'efs-csi-controller-sa')
                },
            ))

        for stm in [policy_statement_1, policy_statement_2]:
            efs_csi_role.add_to_policy(stm)

        Tags.of(efs_csi_role).add(key='cfn.eks-dev.stack', value='role-stack')
Ejemplo n.º 15
0
    def __init__(self, scope: core.Construct, id: str, region, domain,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # VPC , we need one for ECS cluster ( sadly )
        vpc = ec2.Vpc.from_lookup(self, 'vpc', is_default=True)

        cluster = ecs.Cluster(self, 'Cluster', vpc=vpc)

        # Route53 & SSL Certificate
        zone = dns.HostedZone(self, "dns", zone_name=domain)

        dns.ARecord(self,
                    'MinecraftRecord',
                    zone=zone,
                    record_name='minecraft',
                    target=dns.RecordTarget(values=['1.2.3.4']))

        cert = acm.Certificate(
            self,
            'cert',
            domain_name=f'*.{domain}',
            validation=acm.CertificateValidation.from_dns(zone))

        # ECS ( Cluster, EFS, Task Def)
        fs = efs.FileSystem(self,
                            'EFS',
                            vpc=vpc,
                            removal_policy=core.RemovalPolicy.DESTROY)

        task_definition = ecs.FargateTaskDefinition(self,
                                                    'TaskDef',
                                                    memory_limit_mib=4096,
                                                    cpu=1024)

        container = task_definition.add_container(
            'MinecraftDocker',
            image=ecs.ContainerImage.from_registry('darevee/minecraft-aws'),
            logging=ecs.AwsLogDriver(stream_prefix='Minecraf'),
            cpu=1024,
            memory_limit_mib=4096)
        container.add_mount_points(
            ecs.MountPoint(container_path='/minecraft',
                           source_volume='efs',
                           read_only=False))
        cfn_task = container.task_definition.node.default_child
        cfn_task.add_property_override("Volumes", [{
            "EFSVolumeConfiguration": {
                "FilesystemId": fs.file_system_id
            },
            "Name": "efs"
        }])

        container.add_port_mappings(ecs.PortMapping(container_port=25565))

        sg = ec2.SecurityGroup(self, 'sg', vpc=vpc)
        sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                            connection=ec2.Port.tcp(25565),
                            description='Minecraft Access')
        sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                            connection=ec2.Port.tcp(25575),
                            description='RCONN Access')

        fs.connections.allow_default_port_from(sg)

        subnets = ",".join(vpc.select_subnets().subnet_ids)

        # Cognito ( For ApiGW Authentication)
        userpool = cognito.UserPool(
            self,
            'UserPool',
            user_invitation=cognito.UserInvitationConfig(
                email_body=
                """No cześć {username}, zostałeś zaproszony do naszego Minecraft!
                Twoje tymczasowe hasło to {####}
                """,
                email_subject="Zaproszenie do minecrafta"))

        # APIGW (Gateway, Lambdas, S3 Static content)

        # Lambda Starter
        starter = _lambda.Function(self,
                                   'Starter',
                                   runtime=_lambda.Runtime.PYTHON_3_8,
                                   handler='index.lambda_handler',
                                   code=_lambda.Code.asset('lambda/starter'),
                                   timeout=core.Duration.seconds(300),
                                   environment={
                                       'cluster': cluster.cluster_name,
                                       'subnets': subnets,
                                       'security_groups': sg.security_group_id,
                                       'task_definition':
                                       task_definition.task_definition_arn,
                                       'region': region,
                                       'zone_id': zone.hosted_zone_id,
                                       'domain': domain
                                   })

        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=[
                                    "ecs:ListTasks", "ecs:DescribeTasks",
                                    "ec2:DescribeNetworkInterfaces"
                                ]))
        starter.add_to_role_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                resources=[task_definition.task_definition_arn],
                actions=["ecs:RunTask", "ecs:DescribeTasks"]))
        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    task_definition.task_role.role_arn,
                                    task_definition.execution_role.role_arn
                                ],
                                actions=["iam:PassRole"]))

        starter.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[zone.hosted_zone_arn],
                                actions=["route53:ChangeResourceRecordSets"]))

        # S3 static webpage
        bucket = s3.Bucket(self,
                           "S3WWW",
                           public_read_access=True,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           website_index_document="index.html")
        s3d.BucketDeployment(self,
                             "S3Deploy",
                             destination_bucket=bucket,
                             sources=[s3d.Source.asset("static_page")])

        status = _lambda.Function(self,
                                  'Status',
                                  runtime=_lambda.Runtime.PYTHON_3_8,
                                  handler='index.lambda_handler',
                                  code=_lambda.Code.asset('lambda/status'),
                                  environment={
                                      'url': f"https://minecrafter.{domain}",
                                      'domain': domain
                                  })

        # ApiGW
        apigw = api.LambdaRestApi(self,
                                  'ApiGW',
                                  handler=status,
                                  proxy=False,
                                  domain_name={
                                      "domain_name": f'minecrafter.{domain}',
                                      "certificate": cert
                                  },
                                  default_cors_preflight_options={
                                      "allow_origins": api.Cors.ALL_ORIGINS,
                                      "allow_methods": api.Cors.ALL_METHODS
                                  })

        start = apigw.root.add_resource('start')
        start.add_method('ANY', integration=api.LambdaIntegration(starter))

        apigw.root.add_method('ANY')

        dns.ARecord(self,
                    'PointDNSToApiGW',
                    zone=zone,
                    target=dns.RecordTarget.from_alias(
                        targets.ApiGateway(apigw)),
                    record_name=f"minecrafter.{domain}")
Ejemplo n.º 16
0
                                   vpc=vpc,
                                   allow_all_outbound=True)
security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80))

security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8080))

security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443))
security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2049))
security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.udp(111))
security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5000))

security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(3000))

efsvol = efs.FileSystem(stack,
                        'VISTA_VONC_efsvolume',
                        vpc=vpc,
                        file_system_name='wes',
                        security_group=security_group)

efsdns = efsvol.file_system_id + ".efs.us-east-1.amazonaws.com"

efs_to_connect = "addr=" + efsdns + ",nfsvers=4.0,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2"
#efs_to_connect="addr=" + efsvol.file_system_id
device_set = efsdns + ":/"
driveropts = {
    "type": "nfs",
    "device": device_set,
    "o": efs_to_connect
    #"o": "addr=fs-XXXXXX.efs.us-east-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport"
}
Ejemplo n.º 17
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc,
                 efs_mnt_path: str = "/efs",
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Create Security Group to connect to EFS
        self.efs_sg = _ec2.SecurityGroup(
            self,
            id="efsSecurityGroup",
            vpc=vpc,
            security_group_name=f"efs_sg_{id}",
            description="Security Group to connect to EFS from the VPC")

        self.efs_sg.add_ingress_rule(
            peer=_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=_ec2.Port.tcp(2049),
            description=
            "Allow EC2 instances within the same VPC to connect to EFS")

        # Let us create the EFS Filesystem
        self.efs_share = _efs.FileSystem(
            self,
            "elasticFileSystem",
            file_system_name=f"high-performance-storage",
            vpc=vpc,
            security_group=self.efs_sg,
            encrypted=False,
            lifecycle_policy=_efs.LifecyclePolicy.AFTER_7_DAYS,
            performance_mode=_efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=_efs.ThroughputMode.BURSTING,
            removal_policy=core.RemovalPolicy.DESTROY)

        # create efs acl
        efs_acl = _efs.Acl(owner_gid="1000",
                           owner_uid="1000",
                           permissions="0777")

        # create efs posix user
        efs_user = _efs.PosixUser(gid="1000", uid="1000")

        # create efs access point
        self.efs_ap = _efs.AccessPoint(self,
                                       "efsAccessPoint",
                                       path=f"{efs_mnt_path}",
                                       file_system=self.efs_share,
                                       posix_user=efs_user,
                                       create_acl=efs_acl)

        ###########################################
        ################# OUTPUTS #################
        ###########################################
        output_0 = core.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description=
            "To know more about this automation stack, check out our github page."
        )

        output_1 = core.CfnOutput(
            self,
            "MountEfs",
            value=
            f"sudo mount -t efs -o tls {self.efs_share.file_system_id}:/ /mnt/efs ",
            description=
            "Use this command to mount efs using efs helper utility at location /mnt/efs"
        )
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get config value for alert email
        email = self.node.try_get_context("email")
        if email == 'changeme@localhost':
            exit(
                'ERROR: Change the email in cdk.json or pass it with -c email=changeme@localhost'
            )

        # Create SNS for alarms to be sent to
        alarm_topic = sns.Topic(self,
                                "backup_alarm",
                                display_name="backup_alarm")

        # Subscribe my email so the alarms go to me
        alarm_topic.add_subscription(subscriptions.EmailSubscription(email))

        # Create VPC to run everything in. We make this public just because we don't
        # want to spend $30/mo on a NAT gateway.
        vpc = ec2.Vpc(
            self,
            "VPC",
            nat_gateways=0,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="public",
                                        subnet_type=ec2.SubnetType.PUBLIC)
            ],
        )

        ecs_sg = ec2.SecurityGroup(self, "ecs_sg", vpc=vpc)
        efs_sg = ec2.SecurityGroup(self, "efs_sg", vpc=vpc)
        efs_sg.add_ingress_rule(
            peer=ecs_sg,
            connection=ec2.Port.tcp(2049),
            description="Allow backup runner access",
        )
        # Open this to the VPC
        efs_sg.add_ingress_rule(
            peer=ec2.Peer.ipv4("10.0.0.0/8"),
            connection=ec2.Port.tcp(2049),
            description="Allow backup runner access",
        )

        # Define the EFS
        fileSystem = efs.FileSystem(
            self,
            "MyEfsFileSystem",
            vpc=vpc,
            encrypted=True,
            lifecycle_policy=efs.LifecyclePolicy.AFTER_7_DAYS,
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING,
            security_group=efs_sg,
        )

        # Define the ECS task
        cluster = ecs.Cluster(self, "Cluster", vpc=vpc)
        taskDefinition = ecs.FargateTaskDefinition(
            self,
            "taskDefinition",
            volumes=[
                ecs.Volume(
                    name="efsvolume",
                    efs_volume_configuration=ecs.EfsVolumeConfiguration(
                        file_system_id=fileSystem.file_system_id,
                        root_directory="/",
                        transit_encryption="ENABLED",
                    ),
                )
            ],
            memory_limit_mib=8192,
            cpu=2048,
        )

        log_driver = ecs.AwsLogDriver(
            stream_prefix="backup_runner",
            log_retention=logs.RetentionDays.TWO_WEEKS,
        )

        taskDefinition.add_container(
            "backup-runner",
            image=ecs.ContainerImage.from_asset("./resources/backup_runner"),
            memory_limit_mib=8192,
            cpu=2048,
            logging=log_driver,
        )

        # The previous method to add the container doesn't let us specify the mount point for the EFS,
        # so we have to do it here, and referencing the container that was just added.
        taskDefinition.default_container.add_mount_points(
            ecs.MountPoint(container_path="/mnt/efs",
                           read_only=False,
                           source_volume="efsvolume"))

        # Create rule to trigger this be run every 24 hours
        events.Rule(
            self,
            "scheduled_run",
            rule_name="backup_runner",
            # Run at 2am EST (6am UTC) every night
            schedule=events.Schedule.expression("cron(0 0 * * ? *)"),
            description="Starts the backup runner task every night",
            targets=[
                targets.EcsTask(
                    cluster=cluster,
                    task_definition=taskDefinition,
                    subnet_selection=ec2.SubnetSelection(
                        subnet_type=ec2.SubnetType.PUBLIC),
                    platform_version=ecs.FargatePlatformVersion.
                    VERSION1_4,  # Required to use EFS
                    # Because "Latest" does not yet support EFS
                    security_groups=[ecs_sg],
                )
            ],
        )

        # Create notification topic for backups
        backup_topic = sns.Topic(self,
                                 "backup_topic",
                                 display_name="Backup status")

        # Create AWS Backup
        vault = backup.BackupVault(
            self,
            "Vault",
            access_policy=iam.PolicyDocument(statements=[
                iam.PolicyStatement(
                    effect=iam.Effect.DENY,
                    actions=[
                        "backup:DeleteBackupVault",
                        "backup:DeleteRecoveryPoint",
                        "backup:UpdateRecoveryPointLifecycle",
                        # "backup:PutBackupVaultAccessPolicy", # This results in "Failed putting policy for Backup vault backuprunnerVaultXXX as it will lock down from further policy changes"
                        "backup:DeleteBackupVaultAccessPolicy",
                        "backup:DeleteBackupVaultNotifications",
                        # "backup:PutBackupVaultNotifications", # This causes oher part of this app to fail.
                    ],
                    resources=["*"],
                    principals=[iam.AnyPrincipal()],
                )
            ]),
            notification_topic=alarm_topic,
            notification_events=[
                # Monitor for some failures or access to the backups
                backup.BackupVaultEvents.BACKUP_JOB_EXPIRED,
                backup.BackupVaultEvents.BACKUP_JOB_FAILED,
                backup.BackupVaultEvents.COPY_JOB_FAILED,
                backup.BackupVaultEvents.COPY_JOB_FAILED,
                backup.BackupVaultEvents.COPY_JOB_STARTED,
                backup.BackupVaultEvents.RESTORE_JOB_COMPLETED,
                backup.BackupVaultEvents.RESTORE_JOB_FAILED,
                backup.BackupVaultEvents.RESTORE_JOB_STARTED,
                backup.BackupVaultEvents.RESTORE_JOB_SUCCESSFUL,
            ],
        )

        plan = backup.BackupPlan.daily35_day_retention(self, "backup")
        plan.add_selection(
            "Selection",
            resources=[backup.BackupResource.from_efs_file_system(fileSystem)],
        )

        #
        # Create metric filter for errors in the CloudWatch Logs from the ECS
        #
        METRIC_NAME = "log_errors"
        METRIC_NAMESPACE = "backup_runner"

        metric = cloudwatch.Metric(namespace=METRIC_NAMESPACE,
                                   metric_name=METRIC_NAME)

        error_metric = logs.MetricFilter(
            self,
            "MetricFilterId",
            metric_name=METRIC_NAME,
            metric_namespace=METRIC_NAMESPACE,
            log_group=log_driver.log_group,
            filter_pattern=logs.FilterPattern.any_term("ERROR"),
            metric_value="1",
        )

        error_alarm = cloudwatch.Alarm(
            self,
            "AlarmId",
            metric=metric,
            evaluation_periods=1,
            actions_enabled=True,
            alarm_name="backuper_runner_alarm",
            alarm_description="Errors in backup runner",
            comparison_operator=cloudwatch.ComparisonOperator.
            GREATER_THAN_OR_EQUAL_TO_THRESHOLD,
            treat_missing_data=cloudwatch.TreatMissingData.NOT_BREACHING,
            period=core.Duration.hours(1),
            threshold=1,
            statistic="sum",
        )

        # Connect the alarm to the SNS
        error_alarm.add_alarm_action(cloudwatch_actions.SnsAction(alarm_topic))

        # The above doesn't give it privileges, so add them to the alarm topic resource policy.
        alarm_topic.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=["sns:Publish"],
                resources=[alarm_topic.topic_arn],
                principals=[iam.ServicePrincipal("cloudwatch.amazonaws.com")],
            ))
Ejemplo n.º 19
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        # VPC
        self.vpc = ec2.Vpc(
            self,
            "VPC",
            max_azs=2,
            cidr="10.10.0.0/16",
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=24),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=24)
            ],
            nat_gateways=1)

        # Security group
        # self.sg = ec2.SecurityGroup(self, "securityGroup", self.vpc)
        self.sg = ec2.SecurityGroup.from_security_group_id(
            self,
            "securityGroup",
            self.vpc.vpc_default_security_group,
            mutable=False)

        # Create EFS inside VPC
        self.efs = efs.FileSystem(
            self,
            "commonEFS4Notebooks",
            vpc=self.vpc,
            encrypted=True,
            enable_automatic_backups=True,
            performance_mode=efs.PerformanceMode('MAX_IO'),
            throughput_mode=efs.ThroughputMode('BURSTING'),
            security_group=self.sg)

        # Mount target for EFS
        # self.mount = efs.CfnMountTarget(
        #     self,
        #     "MountID",
        #     file_system_id=self.efs.file_system_id,security_groups=[self.sg.security_group_id,],
        #     subnet_id=self.vpc.private_subnets[0].subnet_id,
        #     )

        # IAM Roles
        #Create role for Notebook instance
        nRole = iam_.Role(self,
                          "notebookAccessRole",
                          assumed_by=iam_.ServicePrincipal('sagemaker'))

        nPolicy = iam_.Policy(self,
                              "notebookAccessPolicy",
                              policy_name="notebookAccessPolicy",
                              statements=[
                                  iam_.PolicyStatement(actions=[
                                      's3:*',
                                  ],
                                                       resources=[
                                                           '*',
                                                       ]),
                              ]).attach_to_role(nRole)

        #Create notebook instances cluster

        # print(self.mount.get_att('attr_ip_address').to_string())
        encodedScript = LifecycleScriptStr.format(self.efs.file_system_id)
        # print("Adding following script to the lifecycle config..\n___\n\n"+encodedScript)

        code = [{"content": core.Fn.base64(encodedScript)}]

        lifecycleconfig = sm.CfnNotebookInstanceLifecycleConfig(
            self,
            "LifeCycleConfig",
            notebook_instance_lifecycle_config_name=LifeCycleConfigName,
            on_create=None,
            on_start=code)

        instances = []
        for i in range(num_instances):
            nid = 'CDK-Notebook-Instance-User-' + str(i)
            instances.append(
                sm.CfnNotebookInstance(
                    self,
                    nid,
                    instance_type='ml.t2.medium',
                    volume_size_in_gb=5,
                    security_group_ids=[self.sg.security_group_id],
                    subnet_id=self.vpc.private_subnets[0].subnet_id,
                    notebook_instance_name=nid,
                    role_arn=nRole.role_arn,
                    lifecycle_config_name=lifecycleconfig.
                    notebook_instance_lifecycle_config_name))

        core.CfnOutput(self, "VPC_id", value=self.vpc.vpc_id)
        core.CfnOutput(self, "EFS_id", value=self.efs.file_system_id)
        [
            core.CfnOutput(self,
                           "NotebookInstance_" + str(c),
                           value=notebook.notebook_instance_name)
            for c, notebook in enumerate(instances)
        ]
    def __init__(self, scope: core.Construct, construct_id: str,
                 properties: WordpressStackProperties, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        database = rds.ServerlessCluster(
            self,
            "WordpressServerless",
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            default_database_name="WordpressDatabase",
            vpc=properties.vpc,
            scaling=rds.ServerlessScalingOptions(
                auto_pause=core.Duration.seconds(0)),
            deletion_protection=False,
            backup_retention=core.Duration.days(7),
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        file_system = efs.FileSystem(
            self,
            "WebRoot",
            vpc=properties.vpc,
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING,
        )

        # docker context directory
        docker_context_path = os.path.dirname(__file__) + "../../src"

        # upload images to ecr
        nginx_image = ecr_assets.DockerImageAsset(
            self,
            "Nginx",
            directory=docker_context_path,
            file="Docker.nginx",
        )

        wordpress_image = ecr_assets.DockerImageAsset(
            self,
            "Php",
            directory=docker_context_path,
            file="Docker.wordpress",
        )

        cluster = ecs.Cluster(self,
                              'ComputeResourceProvider',
                              vpc=properties.vpc)

        wordpress_volume = ecs.Volume(
            name="WebRoot",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=file_system.file_system_id))

        event_task = ecs.FargateTaskDefinition(self,
                                               "WordpressTask",
                                               volumes=[wordpress_volume])

        #
        # webserver
        #
        nginx_container = event_task.add_container(
            "Nginx",
            image=ecs.ContainerImage.from_docker_image_asset(nginx_image))

        nginx_container.add_port_mappings(ecs.PortMapping(container_port=80))

        nginx_container_volume_mount_point = ecs.MountPoint(
            read_only=True,
            container_path="/var/www/html",
            source_volume=wordpress_volume.name)
        nginx_container.add_mount_points(nginx_container_volume_mount_point)

        #
        # application server
        #
        app_container = event_task.add_container(
            "Php",
            environment={
                'WORDPRESS_DB_HOST': database.cluster_endpoint.hostname,
                'WORDPRESS_TABLE_PREFIX': 'wp_'
            },
            secrets={
                'WORDPRESS_DB_USER':
                ecs.Secret.from_secrets_manager(database.secret,
                                                field="username"),
                'WORDPRESS_DB_PASSWORD':
                ecs.Secret.from_secrets_manager(database.secret,
                                                field="password"),
                'WORDPRESS_DB_NAME':
                ecs.Secret.from_secrets_manager(database.secret,
                                                field="dbname"),
            },
            image=ecs.ContainerImage.from_docker_image_asset(wordpress_image))
        app_container.add_port_mappings(ecs.PortMapping(container_port=9000))

        container_volume_mount_point = ecs.MountPoint(
            read_only=False,
            container_path="/var/www/html",
            source_volume=wordpress_volume.name)
        app_container.add_mount_points(container_volume_mount_point)

        #
        # create service
        #
        wordpress_service = ecs.FargateService(
            self,
            "InternalService",
            task_definition=event_task,
            platform_version=ecs.FargatePlatformVersion.VERSION1_4,
            cluster=cluster,
        )

        #
        # scaling
        #
        scaling = wordpress_service.auto_scale_task_count(min_capacity=2,
                                                          max_capacity=50)
        scaling.scale_on_cpu_utilization(
            "CpuScaling",
            target_utilization_percent=85,
            scale_in_cooldown=core.Duration.seconds(120),
            scale_out_cooldown=core.Duration.seconds(30),
        )

        #
        # network acl
        #
        database.connections.allow_default_port_from(wordpress_service,
                                                     "wordpress access to db")
        file_system.connections.allow_default_port_from(wordpress_service)

        #
        # external access
        #
        wordpress_service.connections.allow_from(
            other=properties.load_balancer, port_range=ec2.Port.tcp(80))

        http_listener = properties.load_balancer.add_listener(
            "HttpListener",
            port=80,
        )

        http_listener.add_targets(
            "HttpServiceTarget",
            protocol=elbv2.ApplicationProtocol.HTTP,
            targets=[wordpress_service],
            health_check=elbv2.HealthCheck(healthy_http_codes="200,301,302"))
Ejemplo n.º 21
0
    def __init__(self, scope: core.Construct, construct_id: str, *,
                 secrets: List[Secret]):
        super().__init__(scope, construct_id)

        vpc = aws_ec2.Vpc(
            self,
            "Vpc",
            enable_dns_support=True,
            enable_dns_hostnames=True,
            max_azs=3,
            nat_gateways=0,
            subnet_configuration=[
                aws_ec2.SubnetConfiguration(
                    name="Public", subnet_type=aws_ec2.SubnetType.PUBLIC)
            ],
        )

        postgres_volume_name = "duckbot_dbdata"
        file_system = aws_efs.FileSystem(
            self,
            "PostgresFileSystem",
            vpc=vpc,
            encrypted=True,
            file_system_name=postgres_volume_name,
            removal_policy=core.RemovalPolicy.DESTROY)
        file_system.node.default_child.override_logical_id(
            "FileSystem"
        )  # rename for compatibility with legacy cloudformation template

        task_definition = aws_ecs.TaskDefinition(
            self,
            "TaskDefinition",
            compatibility=aws_ecs.Compatibility.EC2,
            family="duckbot",
            memory_mib="960",
            network_mode=aws_ecs.NetworkMode.BRIDGE)

        postgres_data_path = "/data/postgres"
        postgres = task_definition.add_container(
            "postgres",
            container_name="postgres",
            image=aws_ecs.ContainerImage.from_registry("postgres:13.2"),
            essential=False,
            environment={
                "POSTGRES_USER": "******",
                "POSTGRES_PASSWORD": "******",
                "PGDATA": postgres_data_path,
            },
            health_check=aws_ecs.HealthCheck(
                command=["CMD", "pg_isready", "-U", "duckbot"],
                interval=core.Duration.seconds(30),
                timeout=core.Duration.seconds(5),
                retries=3,
                start_period=core.Duration.seconds(30),
            ),
            logging=aws_ecs.LogDriver.aws_logs(
                stream_prefix="ecs",
                log_retention=aws_logs.RetentionDays.ONE_MONTH),
            memory_reservation_mib=128,
        )
        task_definition.add_volume(
            name=postgres_volume_name,
            efs_volume_configuration=aws_ecs.EfsVolumeConfiguration(
                file_system_id=file_system.file_system_id, root_directory="/"))
        postgres.add_mount_points(
            aws_ecs.MountPoint(source_volume=postgres_volume_name,
                               container_path=postgres_data_path,
                               read_only=False))

        secrets_as_parameters = {
            # note, parameter version is required by cdk, but does not make it into the template; specify version 1 for simplicity
            x.environment_name:
            aws_ssm.StringParameter.from_secure_string_parameter_attributes(
                self,
                x.environment_name,
                parameter_name=x.parameter_name,
                version=1)
            for x in secrets
        }
        duckbot = task_definition.add_container(
            "duckbot",
            container_name="duckbot",
            essential=True,
            image=aws_ecs.ContainerImage.from_registry(
                self.node.try_get_context("duckbot_image")),
            environment={"STAGE": "prod"},
            secrets={
                k: aws_ecs.Secret.from_ssm_parameter(v)
                for k, v in secrets_as_parameters.items()
            },
            health_check=aws_ecs.HealthCheck(
                command=["CMD", "python", "-m", "duckbot.health"],
                interval=core.Duration.seconds(30),
                timeout=core.Duration.seconds(10),
                retries=3,
                start_period=core.Duration.seconds(30),
            ),
            logging=aws_ecs.LogDriver.aws_logs(
                stream_prefix="ecs",
                log_retention=aws_logs.RetentionDays.ONE_MONTH),
            memory_reservation_mib=128,
        )
        duckbot.add_link(postgres)

        asg = aws_autoscaling.AutoScalingGroup(
            self,
            "AutoScalingGroup",
            min_capacity=0,
            max_capacity=1,
            desired_capacity=1,
            machine_image=aws_ecs.EcsOptimizedImage.amazon_linux2(),
            instance_type=aws_ec2.InstanceType("t2.micro"),
            key_name="duckbot",  # needs to be created manually
            instance_monitoring=aws_autoscaling.Monitoring.BASIC,
            vpc=vpc,
        )

        asg.connections.allow_to_default_port(file_system)
        asg.connections.allow_from(aws_ec2.Peer.any_ipv4(),
                                   aws_ec2.Port.tcp(22))
        asg.connections.allow_from(aws_ec2.Peer.any_ipv4(),
                                   aws_ec2.Port.tcp(80))
        asg.connections.allow_from(aws_ec2.Peer.any_ipv4(),
                                   aws_ec2.Port.tcp(443))

        cluster = aws_ecs.Cluster(self,
                                  "Cluster",
                                  cluster_name="duckbot",
                                  vpc=vpc)
        cluster.add_asg_capacity_provider(
            aws_ecs.AsgCapacityProvider(cluster,
                                        "AsgCapacityProvider",
                                        auto_scaling_group=asg),
            can_containers_access_instance_role=True)

        aws_ecs.Ec2Service(
            self,
            "Service",
            service_name="duckbot",
            cluster=cluster,
            task_definition=task_definition,
            desired_count=1,
            min_healthy_percent=0,
            max_healthy_percent=100,
        )
    def __init__(self, scope: core.Stack, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        self.environment_name = 'ecsworkshop'

        ## Importing existing VPC and ECS Cluster ##
        self.vpc = ec2.Vpc.from_lookup(self,
                                       "VPC",
                                       vpc_name='{}-base/BaseVPC'.format(
                                           self.environment_name))

        self.sd_namespace = sd.PrivateDnsNamespace.from_private_dns_namespace_attributes(
            self,
            "SDNamespace",
            namespace_name=core.Fn.import_value('NSNAME'),
            namespace_arn=core.Fn.import_value('NSARN'),
            namespace_id=core.Fn.import_value('NSID'))

        self.ecs_cluster = ecs.Cluster.from_cluster_attributes(
            self,
            "ECSCluster",
            cluster_name=core.Fn.import_value('ECSClusterName'),
            security_groups=[],
            vpc=self.vpc,
            default_cloud_map_namespace=self.sd_namespace)
        ## End VPC and ECS Cluster ##

        ## Load balancer for ECS service ##
        self.frontend_sec_grp = ec2.SecurityGroup(
            self,
            "FrontendIngress",
            vpc=self.vpc,
            allow_all_outbound=True,
            description="Frontend Ingress All port 80",
        )

        self.load_balancer = elbv2.ApplicationLoadBalancer(
            self,
            "ALB",
            security_group=self.frontend_sec_grp,
            internet_facing=True,
            vpc=self.vpc)

        self.target_group = elbv2.ApplicationTargetGroup(
            self,
            "ALBTG",
            port=8000,
            target_group_name="ECSDemoFargateEFS",
            vpc=self.vpc,
            target_type=elbv2.TargetType.IP)

        self.load_balancer.add_listener(
            "FrontendListener",
            default_target_groups=[self.target_group],
            port=80)
        ## End Load balancer ##

        ## EFS Setup ##
        self.service_sec_grp = ec2.SecurityGroup(
            self,
            "EFSSecGrp",
            vpc=self.vpc,
            description="Allow access to self on NFS Port",
        )

        self.service_sec_grp.connections.allow_from(
            other=self.service_sec_grp,
            port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation="Self",
                                from_port=2049,
                                to_port=2049))

        # TODO: possibly create another sec grp for 8000
        self.service_sec_grp.connections.allow_from(
            other=self.frontend_sec_grp,
            port_range=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation="LB2Service",
                                from_port=8000,
                                to_port=8000))

        self.shared_fs = efs.FileSystem(
            self,
            "SharedFS",
            vpc=self.vpc,
            security_group=self.service_sec_grp,
        )
        ## End EFS Setup ##

        ## TODO: IAM Role to access EFS access points for task ##

        # Task execution role
        self.task_execution_role = iam.Role(
            self,
            "TaskExecutionRole",
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
            description="Task execution role for ecs services",
            managed_policies=[
                iam.ManagedPolicy.from_managed_policy_arn(
                    self,
                    'arn',
                    managed_policy_arn=
                    'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy'
                )
            ])

        ## END IAM ##

        ## Logging ##
        self.service_log_group = logs.LogGroup(self, "ECSEFSDemoLogGrp")
        ## END Logging ##

        # Cloudformation Outputs
        core.CfnOutput(self,
                       "ExecutionRoleARN",
                       value=self.task_execution_role.role_arn,
                       export_name="ECSFargateEFSDemoTaskExecutionRoleARN")

        core.CfnOutput(self,
                       "EFSID",
                       value=self.shared_fs.file_system_id,
                       export_name="ECSFargateEFSDemoFSID")

        core.CfnOutput(self,
                       "LBName",
                       value=self.load_balancer.load_balancer_name,
                       export_name="ECSFargateEFSDemoLBName")

        core.CfnOutput(self,
                       "TargetGroupArn",
                       value=self.target_group.target_group_arn,
                       export_name="ECSFargateEFSDemoTGARN")

        core.CfnOutput(self,
                       "VPCPrivateSubnets",
                       value=",".join(
                           [x.subnet_id for x in self.vpc.private_subnets]),
                       export_name="ECSFargateEFSDemoPrivSubnets")

        core.CfnOutput(self,
                       "SecurityGroups",
                       value="{},{}".format(
                           self.frontend_sec_grp.security_group_id,
                           self.service_sec_grp.security_group_id),
                       export_name="ECSFargateEFSDemoSecGrps")

        core.CfnOutput(self,
                       "LBURL",
                       value=self.load_balancer.load_balancer_dns_name,
                       export_name="ECSFargateEFSDemoLBURL")

        core.CfnOutput(self,
                       "LogGroupName",
                       value=self.service_log_group.log_group_name,
                       export_name="ECSFargateEFSDemoLogGroupName")
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        default_vpc = ec2.Vpc.from_lookup(self, 'default-vpc', is_default=True)
        cache_security_group = ec2.SecurityGroup(
            self,
            'devassoc-cache-sg',
            vpc=default_vpc,
            security_group_name='cache-sg-dev-demo',
            description='Elasticache Security Group for AWS Dev Study Guide')
        cache_security_group.add_ingress_rule(
            ec2.Peer.ipv4('99.116.136.249/32'), ec2.Port.tcp(22),
            'SSH from my IP')
        cache_security_group.add_ingress_rule(cache_security_group,
                                              ec2.Port.tcp(2049),
                                              'NFS for mount')

        ecache.CfnCacheCluster(
            self,
            'elasticache',
            engine='Memcached',
            cluster_name='devassoc-memcache',
            num_cache_nodes=2,
            cache_node_type='cache.t2.micro',
            vpc_security_group_ids=[cache_security_group.security_group_id])

        efs_volume = efs.FileSystem(self,
                                    'efs-volume',
                                    vpc=default_vpc,
                                    security_group=cache_security_group,
                                    removal_policy=core.RemovalPolicy.DESTROY)

        ec2.Instance(
            self,
            'ec2-efs-instance',
            instance_name='efs-instance',
            instance_type=type.T2_MICRO,
            machine_image=ec2.MachineImage.generic_linux(ami_map=ami_map),
            vpc=default_vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=cache_security_group,
            key_name='devassoc')

        bucket_name = 'devassoc-storage-versioned'
        bucket = s3.Bucket(self,
                           'bucket-versioned',
                           bucket_name=bucket_name,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           auto_delete_objects=True,
                           versioned=True)
        deploy = s3deploy.BucketDeployment(
            self,
            'DeployFiles',
            destination_bucket=bucket,
            sources=[
                s3deploy.Source.asset('./study_guide_exercises/polly_file')
            ],
            storage_class=s3deploy.StorageClass.ONEZONE_IA,
            cache_control=[s3deploy.CacheControl.set_public()])

        dynamodb_table_name = 'State'
        state_id = dynamodb.Attribute(name='Id',
                                      type=dynamodb.AttributeType.STRING)
        dynamo_db = dynamodb.Table(self,
                                   'dynamodb-stateless-app',
                                   table_name=dynamodb_table_name,
                                   partition_key=state_id,
                                   read_capacity=2,
                                   write_capacity=2,
                                   removal_policy=core.RemovalPolicy.DESTROY)

        core.CfnOutput(self, 'db-table-name', value=dynamo_db.table_name)
        core.CfnOutput(self, 'db-table-arn', value=dynamo_db.table_arn)

        global_table_name = 'Tables'
        table_id = dynamodb.Attribute(name='Id',
                                      type=dynamodb.AttributeType.STRING)
        table_group = dynamodb.Attribute(name='Group',
                                         type=dynamodb.AttributeType.STRING)
        dynamo_db_global = dynamodb.Table(
            self,
            'dynamodb-global',
            table_name=global_table_name,
            partition_key=table_id,
            sort_key=table_group,
            stream=dynamodb.StreamViewType.NEW_AND_OLD_IMAGES,
            replication_regions=['us-west-2', 'eu-central-1'],
            removal_policy=core.RemovalPolicy.DESTROY)

        core.CfnOutput(self,
                       'global-table-name',
                       value=dynamo_db_global.table_name)
        core.CfnOutput(self,
                       'global-table-arn',
                       value=dynamo_db_global.table_arn)

        # TODO: create this in different region and set up replication
        replication_bucket_name = 'devassoc-storage-replica'
        bucket = s3.Bucket(self,
                           'bucket-replica',
                           bucket_name=replication_bucket_name,
                           removal_policy=core.RemovalPolicy.DESTROY,
                           auto_delete_objects=True,
                           versioned=True)

        backup_plan = backup.BackupPlan.daily_weekly_monthly5_year_retention(
            self, 'backup-plan')
        backup_plan.add_selection(
            'backup-selection',
            resources=[backup.BackupResource.from_dynamo_db_table(dynamo_db)],
            backup_selection_name='StateBackup')
    def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
        vpc = ec2.Vpc(self, "vpc",
            cidr=props['vpc_CIDR'],
            max_azs=3,
            subnet_configuration=[
                {
                    'cidrMask': 28,
                    'name': 'public',
                    'subnetType': ec2.SubnetType.PUBLIC
                },
                {
                    'cidrMask': 28,
                    'name': 'private',
                    'subnetType': ec2.SubnetType.PRIVATE
                },
                {
                    'cidrMask': 28,
                    'name': 'db',
                    'subnetType': ec2.SubnetType.ISOLATED
                }
            ]
        )

        rds_subnetGroup = rds.SubnetGroup(self, "rds_subnetGroup",
            description = f"Group for {props['environment']}-{props['application']}-{props['unit']} DB",
            vpc = vpc,
            vpc_subnets = ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED)
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html
        ##TODO:ADD Aurora Serverless Option
        rds_instance = rds.DatabaseCluster(self,'wordpress-db',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_2
            ),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                enable_performance_insights=props['rds_enable_performance_insights'],
                instance_type=ec2.InstanceType(instance_type_identifier=props['rds_instance_type'])
            ),
            subnet_group=rds_subnetGroup,
            storage_encrypted=props['rds_storage_encrypted'],
            backup=rds.BackupProps(
                retention=core.Duration.days(props['rds_automated_backup_retention_days'])
            )
        )

        EcsToRdsSeurityGroup= ec2.SecurityGroup(self, "EcsToRdsSeurityGroup",
            vpc = vpc,
            description = "Allow WordPress containers to talk to RDS"
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        db_cred_generator = _lambda.Function(
            self, 'db_creds_generator',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='db_creds_generator.handler',
            code=_lambda.Code.asset('lambda'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED),        #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets ,
            environment={
                'SECRET_NAME': rds_instance.secret.secret_name,
            }
        )

        #Set Permissions and Sec Groups
        rds_instance.connections.allow_from(EcsToRdsSeurityGroup, ec2.Port.tcp(3306))   #Open hole to RDS in RDS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html
        file_system = efs.FileSystem(self, "MyEfsFileSystem",
            vpc = vpc,
            encrypted=True, # file system is not encrypted by default
            lifecycle_policy = props['efs_lifecycle_policy'],
            performance_mode = efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode = efs.ThroughputMode.BURSTING,
            removal_policy = core.RemovalPolicy(props['efs_removal_policy']),
            enable_automatic_backups = props['efs_automatic_backups']
        )

        if props['deploy_bastion_host']:
            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html
            bastion_host = ec2.BastionHostLinux(self, 'bastion_host',
                vpc = vpc
            )
            rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306))

        self.output_props = props.copy()
        self.output_props["vpc"] = vpc
        self.output_props["rds_instance"] = rds_instance
        self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup
        self.output_props["file_system"] = file_system
Ejemplo n.º 25
0
                                   allow_all_outbound=True)
security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80))

security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8080))

security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443))
security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2049))
security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.udp(111))
security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5000))

security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(3000))

security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(7000))
efsvol = efs.FileSystem(stack,
                        App_Name + "_efsvolume",
                        vpc=vpc,
                        file_system_name='wes',
                        security_group=security_group)

efsdns = efsvol.file_system_id + ".efs.us-east-1.amazonaws.com"

efs_to_connect = "addr=" + efsdns + ",nfsvers=4.0,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2"
#efs_to_connect="addr=" + efsvol.file_system_id
##device_set=efsdns+":/"
#driveropts={
#    "type": "nfs",
#    "device":device_set,
#    "o": efs_to_connect
#    #"o": "addr=fs-XXXXXX.efs.us-east-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport"

#}
Ejemplo n.º 26
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 database: timestream.CfnDatabase, table: timestream.CfnTable,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        vpc = ec2.Vpc(self, "GrafanaVpc", max_azs=2)

        vpc.add_interface_endpoint(
            'EFSEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService.ELASTIC_FILESYSTEM)
        vpc.add_interface_endpoint(
            'SMEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService.SECRETS_MANAGER)

        cluster = ecs.Cluster(self, "MyCluster", vpc=vpc)

        file_system = efs.FileSystem(
            self,
            "EfsFileSystem",
            vpc=vpc,
            encrypted=True,
            lifecycle_policy=efs.LifecyclePolicy.AFTER_14_DAYS,
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING)

        access_point = efs.AccessPoint(self,
                                       "EfsAccessPoint",
                                       file_system=file_system,
                                       path="/var/lib/grafana",
                                       posix_user=PosixUser(gid="1000",
                                                            uid="1000"),
                                       create_acl=Acl(owner_gid="1000",
                                                      owner_uid="1000",
                                                      permissions="755"))

        log_group = logs.LogGroup(self,
                                  "taskLogGroup",
                                  retention=logs.RetentionDays.ONE_MONTH)

        container_log_driver = ecs.LogDrivers.aws_logs(
            stream_prefix="fargate-grafana", log_group=log_group)

        task_role = iam.Role(
            self,
            "taskRole",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"))

        task_role.add_to_policy(
            iam.PolicyStatement(
                effect=iam.Effect.ALLOW,
                actions=[
                    "cloudwatch:DescribeAlarmsForMetric",
                    "cloudwatch:DescribeAlarmHistory",
                    "cloudwatch:DescribeAlarms", "cloudwatch:ListMetrics",
                    "cloudwatch:GetMetricStatistics",
                    "cloudwatch:GetMetricData", "ec2:DescribeTags",
                    "ec2:DescribeInstances", "ec2:DescribeRegions",
                    "tag:GetResources"
                ],
                resources=["*"]))
        self.grant_timestream_read(task_role, database, table)

        execution_role = iam.Role(
            self,
            "executionRole",
            assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"))
        log_group.grant_write(execution_role)

        volume_name = "efsGrafanaVolume"

        volume_config = ecs.Volume(
            name=volume_name,
            efs_volume_configuration=EfsVolumeConfiguration(
                file_system_id=file_system.file_system_id,
                transit_encryption="ENABLED",
                authorization_config=AuthorizationConfig(
                    access_point_id=access_point.access_point_id)))

        task_definition = ecs.FargateTaskDefinition(
            self,
            "TaskDef",
            task_role=task_role,
            execution_role=execution_role,
            volumes=[volume_config])

        grafana_admin_password = secretsmanager.Secret(self,
                                                       "grafanaAdminPassword")
        grafana_admin_password.grant_read(task_role)

        container_web = task_definition.add_container(
            "grafana",
            image=ecs.ContainerImage.from_registry("grafana/grafana"),
            logging=container_log_driver,
            environment={
                "GF_INSTALL_PLUGINS": "grafana-timestream-datasource",
                "GF_AWS_default_REGION": core.Aws.REGION
            },
            secrets={
                "GF_SECURITY_ADMIN_PASSWORD":
                ecs.Secret.from_secrets_manager(grafana_admin_password)
            })

        container_web.add_port_mappings(PortMapping(container_port=3000))
        container_web.add_mount_points(
            MountPoint(container_path="/var/lib/grafana",
                       read_only=False,
                       source_volume=volume_config.name))

        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "MyFargateService",
            cluster=cluster,
            cpu=1024,
            desired_count=1,
            task_definition=task_definition,
            memory_limit_mib=2048,
            platform_version=ecs.FargatePlatformVersion.LATEST)

        fargate_service.target_group.configure_health_check(path="/api/health")
        file_system.connections.allow_default_port_from(
            fargate_service.service.connections)

        core.CfnOutput(self,
                       "GrafanaAdminSecret",
                       value=grafana_admin_password.secret_name,
                       export_name="GrafanaAdminSecret")
Ejemplo n.º 27
0
    def __init__(self, scope: core.Construct, config: dict, id: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if 'vpc_id' in config:
            vpc = ec2.Vpc.from_lookup(self, "ECS-VPC", vpc_id=config["vpc_id"])
        else:
            vpc = None
        cluster = ecs.Cluster(self,
                              cluster_name="commvault-cs",
                              id="commvault",
                              container_insights=True,
                              vpc=vpc)

        ### Create demo bucket
        bucket = s3.Bucket(self,
                           "commvault-bucket",
                           bucket_name="commvault-demo-bucket-{}-{}".format(
                               config["region"], config["account"]))

        ### This will allow the ALB to generate a certificate.
        domain_zone = route53.HostedZone.from_lookup(
            self, "walkerzone", domain_name="code.awalker.dev")

        ### Create EFS
        # kms_key = kms.Key(self, "comm-vault-key")

        commvault_file_system = efs.FileSystem(
            self,
            "comvault-efs",
            vpc=cluster.vpc,
            file_system_name="commvault-efs",
            encrypted=True,
            # kms_key=kms_key ,
        )
        # kms_key.grant_encrypt_decrypt(commvault_file_system.)

        ### Define Task Definition and add the container
        ecs_task = ecs.FargateTaskDefinition(self, "commvault-task")

        ecs_task.add_container(
            "commvault-container",
            image=ecs.ContainerImage.from_registry(
                "store/commvaultrepo/mediaagent:SP7"),
            essential=True,
            command=[
                "-csclientname", "filesys", "-cshost", "-mountpath",
                '"/opt/libraryPath"', "-cvdport", "8600", "-clienthost",
                "-clientname", "dockermediaagent"
            ],
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix="commvault")).add_port_mappings(
                    ecs.PortMapping(container_port=80,
                                    host_port=80,
                                    protocol=ecs.Protocol.TCP))

        ecs_task.add_to_task_role_policy(statement=iam.PolicyStatement(
            actions=["efs:*"], resources=['*'], effect=iam.Effect.ALLOW))
        ### Create the ECS Service using the ApplicationLoadBalancedFargate pattern.
        ecs_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "commvault-service",
            assign_public_ip=False,
            cluster=cluster,
            task_definition=ecs_task,
            protocol=elbv2.Protocol.HTTPS,
            redirect_http=True,
            domain_name="commvault.code.awalker.dev",
            domain_zone=domain_zone,
            platform_version=ecs.FargatePlatformVersion.VERSION1_4,
            public_load_balancer=False)

        ### Grant Read/Write to the s3 Bucket for the task
        bucket.grant_read_write(ecs_service.task_definition.task_role)

        # -v $TMPDIR/CommvaultLogs:/var/log/commvault/Log_Files
        ecs_task.add_volume(
            name="CommvaultLogs",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=commvault_file_system.file_system_id,
                transit_encryption="ENABLED",
                authorization_config=ecs.AuthorizationConfig(
                    #iam="ENABLED",
                    access_point_id=efs.AccessPoint(
                        self,
                        "CommvaultLog-access-point",
                        path="/CommvaultLogs",
                        file_system=commvault_file_system).access_point_id)))
        ecs_task.default_container.add_mount_points(
            ecs.MountPoint(container_path="/var/log/commvault/Log_Files",
                           source_volume="CommvaultLogs",
                           read_only=False))

        # -v $TMPDIR/CommvaultRegistry/:/etc/CommVaultRegistry
        ecs_task.add_volume(
            name="CommVaultRegistry",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=commvault_file_system.file_system_id,
                transit_encryption="ENABLED",
                authorization_config=ecs.AuthorizationConfig(
                    #iam="ENABLED",
                    access_point_id=efs.AccessPoint(
                        self,
                        "CommVaultRegistrys-access-point",
                        path="/CommVaultRegistry",
                        file_system=commvault_file_system).access_point_id)))
        ecs_task.default_container.add_mount_points(
            ecs.MountPoint(container_path="/etc/CommVaultRegistry",
                           source_volume="CommVaultRegistry",
                           read_only=False))

        # -v $TMPDIR/libraryPath/:/opt/libraryPath
        ecs_task.add_volume(
            name="libraryPath",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=commvault_file_system.file_system_id,
                transit_encryption="ENABLED",
                authorization_config=ecs.AuthorizationConfig(
                    #iam="ENABLED",
                    access_point_id=efs.AccessPoint(
                        self,
                        "libraryPath-access-point",
                        path="/libraryPath",
                        file_system=commvault_file_system).access_point_id)))
        ecs_task.default_container.add_mount_points(
            ecs.MountPoint(container_path="/opt/libraryPath",
                           source_volume="libraryPath",
                           read_only=False))

        # -v $TMPDIR/IndexCache/:/opt/IndexCache
        ecs_task.add_volume(
            name="IndexCache",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=commvault_file_system.file_system_id,
                transit_encryption="ENABLED",
                authorization_config=ecs.AuthorizationConfig(
                    #iam="ENABLED",
                    access_point_id=efs.AccessPoint(
                        self,
                        "IndexCache-access-point",
                        path="/IndexCache",
                        file_system=commvault_file_system).access_point_id)))
        ecs_task.default_container.add_mount_points(
            ecs.MountPoint(container_path="/opt/IndexCache",
                           source_volume="IndexCache",
                           read_only=False))

        # -v $TMPDIR/jobResults/:/opt/jobResults
        ecs_task.add_volume(
            name="jobResults",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=commvault_file_system.file_system_id,
                transit_encryption="ENABLED",
                authorization_config=ecs.AuthorizationConfig(
                    #iam="ENABLED",
                    access_point_id=efs.AccessPoint(
                        self,
                        "jobResults-access-point",
                        path="/jobResults",
                        file_system=commvault_file_system).access_point_id)))
        ecs_task.default_container.add_mount_points(
            ecs.MountPoint(container_path="/opt/jobResults",
                           source_volume="jobResults",
                           read_only=False))

        # -v $TMPDIR/certificates:/opt/commvault/Base/certificates
        ecs_task.add_volume(
            name="certificates",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=commvault_file_system.file_system_id,
                transit_encryption="ENABLED",
                authorization_config=ecs.AuthorizationConfig(
                    #iam="ENABLED",
                    access_point_id=efs.AccessPoint(
                        self,
                        "certificates-access-point",
                        path="/certificates",
                        file_system=commvault_file_system).access_point_id)))
        ecs_task.default_container.add_mount_points(
            ecs.MountPoint(container_path="/opt/commvault/Base/certificates",
                           source_volume="certificates",
                           read_only=False))
Ejemplo n.º 28
0
    def __init__(self, scope: core.Construct, construct_id: str, name: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)
        """VPC - used in project"""
        vpc = ec2.Vpc(self, f'{name}-VPC', max_azs=2)
        """Filesystem - shared between Lambda and Streamlit - Deletes when stack gets shut down"""
        fs = efs.FileSystem(self,
                            f'{name}-FileSystem',
                            vpc=vpc,
                            removal_policy=core.RemovalPolicy.DESTROY)

        access_point = fs.add_access_point(
            'AccessPoint',
            create_acl=efs.Acl(owner_gid='1001',
                               owner_uid='1001',
                               permissions='750'),
            path="/export/lambda",
            posix_user=efs.PosixUser(gid="1001", uid="1001"))
        """Model folder that contains Lambda code"""
        model_folder = os.path.dirname(
            os.path.realpath(__file__)) + "/../model"
        lambda_handler = _lambda.DockerImageFunction(
            self,
            f'{name}-Lambda',
            code=_lambda.DockerImageCode.from_image_asset(
                model_folder),  #Uses local code to build the container
            memory_size=1024,  #Adjust to your need - 128MB to 10GB
            timeout=core.Duration.minutes(
                5),  #Adjust to your need - up to 15 mins
            vpc=vpc,
            filesystem=_lambda.FileSystem.from_efs_access_point(
                access_point, MOUNT_POINT))
        """Custom Log groups for Lambda"""
        lambda_lgs = logs.LogGroup(
            self,
            f'{name}-Lambda-LogGroup',
            log_group_name=f"/aws/lambda/{lambda_handler.function_name}",
            retention=logs.RetentionDays.ONE_WEEK,
            removal_policy=core.RemovalPolicy.DESTROY)
        """API Gateway - integrates all methods and ressources - used for Lambda invocation"""
        api = api_gw.HttpApi(
            self,
            f'{name}-ApiGw',
            default_integration=integrations.LambdaProxyIntegration(
                handler=lambda_handler))
        """""" """""" """""" """""" """""" """""" """""" """""" """"""
        #STREAMLIT RELATED START
        """""" """""" """""" """""" """""" """""" """""" """""" """"""
        '''
        cluster = ecs.Cluster(self, f"{name}-Streamlit-Cluster", vpc=vpc)
        
        ecs_task = ecs.FargateTaskDefinition(
            self,
            f'{name}-Streamlit-Task-Def',            
        )

        streamlit_container = ecs_task.add_container(
            f'{name}-Streamlit-Container',
            image=ecs.ContainerImage.from_asset('streamlit-docker'),
            essential=True,
            environment={
                'API_URL': api.url,
            },
            logging=ecs.LogDrivers.aws_logs(
                stream_prefix=f'{name}-Streamlit-Log'
            )            
        )
        
        streamlit_container.add_port_mappings(
            ecs.PortMapping(
                container_port=8501,
                host_port=8501,
                protocol=ecs.Protocol.TCP
            )
        )
        
        """Efs Volume - shared between Lambda / Streamlit"""
        ecs_task.add_volume(name=f'{name}-Efs-Volume',  
                efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=fs.file_system_id,                
        ))
        
        """Efs Mountpoint"""
        streamlit_container.add_mount_points(
            ecs.MountPoint(
                container_path="/mnt/data",
                read_only=False,
                source_volume=f'{name}-Efs-Volume'
        ))
        
       
        ecs_task.add_to_task_role_policy(
            statement=iam.PolicyStatement(
                actions=["efs:*"],
                resources=['*'],
                effect=iam.Effect.ALLOW
            )
        )
       
        """Fargate Service that hosts the Streamlit Application"""
        ecs_service = ecs_patterns.ApplicationLoadBalancedFargateService(self, f'{name}-Fargate-Service',
            cluster=cluster,            
            cpu=256,                    
            desired_count=1,            
            task_definition = ecs_task,
            memory_limit_mib=512,     
            public_load_balancer=True, 
            platform_version=ecs.FargatePlatformVersion.VERSION1_4, #https://forums.aws.amazon.com/thread.jspa?messageID=960420
            
        )  
        
        fs.connections.allow_default_port_from(
            ecs_service.service.connections)
        '''
        """""" """""" """""" """""" """""" """""" """""" """""" """"""
        #STREAMLIT RELATED END
        """""" """""" """""" """""" """""" """""" """""" """""" """"""

        core.CfnOutput(self, 'URL', value=api.url)