Exemplo n.º 1
0
    def __init__(self, scope: core.Construct, id: str, vpc, sg, redissg,
                 kmskey, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        rdskey = kms.Key.from_key_arn(self, "rdskey", key_arn=kmskey)

        db_mysql = rds.DatabaseCluster(
            self,
            "Dev_MySQL",
            default_database_name="msadev",
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            engine_version="5.7.12",
            master_user=rds.Login(username="******"),
            instance_props=rds.InstanceProps(
                vpc=vpc,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED),
                instance_type=ec2.InstanceType(
                    instance_type_identifier="t3.medium")),
            instances=1,
            parameter_group=rds.ClusterParameterGroup.
            from_parameter_group_name(
                self,
                "paramter-group-msadev",
                parameter_group_name="default.aurora-mysql5.7"),
            kms_key=rdskey)
        sgId = ec2.SecurityGroup.from_security_group_id(self, "sgid", sg)
        redis_sg = ec2.SecurityGroup.from_security_group_id(
            self, "redissgid", redissg)

        db_mysql.connections.allow_default_port_from(sgId,
                                                     "Access from Bastion")
        db_mysql.connections.allow_default_port_from(redis_sg,
                                                     "Access from Redis")
Exemplo n.º 2
0
    def __init__(self, scope: core.Construct, id: str, vpc,
                 asg_security_groups, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Ceate Aurora Cluster with 2 instances with CDK High Level API
        # Secrets Manager auto generate and keep the password, don't put password in cdk code directly
        db_Aurora_cluster = rds.DatabaseCluster(
            self,
            "MOStateAppDB",
            default_database_name="MOStateAppDB",
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_5_7_12),
            instance_props=rds.InstanceProps(
                vpc=vpc,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED),
                instance_type=ec2.InstanceType(
                    instance_type_identifier="t2.small"),
            ),
            instances=2,
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self,
                "para-group-aurora",
                parameter_group_name="default.aurora-mysql5.7",
            ),
        )

        for asg_sg in asg_security_groups:
            db_Aurora_cluster.connections.allow_default_port_from(
                asg_sg, "EC2 Autoscaling Group access Aurora")
Exemplo n.º 3
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 lambdasg: ec2.SecurityGroup, bastionsg: ec2.SecurityGroup,
                 kmskey, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context('project_name')
        env_name = self.node.try_get_context('env')

        creds_json_template = {'username': '******'}

        db_creds = sm.Secret(
            self,
            id="db-secret",
            secret_name=f'{env_name}-rds-secret',
            generate_secret_string=sm.SecretStringGenerator(
                include_space=False,  # no space in secret
                password_length=12,
                generate_string_key=
                'rds-password',  # key in json dictionary for the password
                exclude_punctuation=True,
                secret_string_template=json.dumps(creds_json_template)))

        db_name = f'pryancdkdb'
        db_mysql = rds.DatabaseCluster(
            self,
            id=f'{env_name}-mysql',
            default_database_name=db_name,
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_5_7_12),
            master_user=rds.Login(
                username='******',
                password=db_creds.secret_value_from_json('rds-password')),
            instance_props=rds.InstanceProps(
                vpc=vpc,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED),
                # will pick one of the isolated Subnets from the vpc
                instance_type=ec2.InstanceType(
                    instance_type_identifier='t3.small')),
            instances=1,
            storage_encrypted=True,
            storage_encryption_key=kmskey,
            removal_policy=core.RemovalPolicy.DESTROY)

        # we need to define the ingress rules for rds
        db_mysql.connections.allow_default_port_from(
            lambdasg, 'Access from Lambda Functions')
        db_mysql.connections.allow_default_port_from(
            bastionsg, "Access from bastion host")

        # ssm
        ssm.StringParameter(self,
                            id=f'{env_name}-db-host',
                            parameter_name=f"/{env_name}/db-host",
                            string_value=db_mysql.cluster_endpoint.hostname)

        ssm.StringParameter(self,
                            id=f'{env_name}-db-name',
                            parameter_name=f"/{env_name}/db-name",
                            string_value=db_name)
Exemplo n.º 4
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        rds_sg = ec2.SecurityGroup(self,
                                   'rds-sg',
                                   vpc=vpc,
                                   security_group_name=prj_name + env_name +
                                   '-rds-sg',
                                   description="SG for RDS",
                                   allow_all_outbound=True)

        for subnet in vpc.private_subnets:
            rds_sg.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                connection=ec2.Port.tcp(3306),
                description='Allow all private subnet to access RDS')

        db_mysql = rds.DatabaseCluster(
            self,
            'mysql',
            default_database_name=prj_name + env_name,
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_5_7_12),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                instance_type=ec2.InstanceType(
                    instance_type_identifier="t3.small"),
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.PRIVATE)),
            removal_policy=core.RemovalPolicy.DESTROY)
Exemplo n.º 5
0
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Aurora RDS Cluster
        db_Aurora_cluster = rds.DatabaseCluster(
            self,
            "ghost_db_cluster",
            default_database_name=f"{props['namespace']}-db",
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            engine_version="5.7.12",
            master_user=rds.Login(username="******"),
            instance_props=rds.InstanceProps(
                vpc=props['vpc'],
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED),
                security_group=props['db_security_group'],
                instance_type=ec2.InstanceType(
                    instance_type_identifier="t2.small")),
            instances=2,
            parameter_group=rds.ClusterParameterGroup.
            from_parameter_group_name(
                self,
                "para-group-aurora",
                parameter_group_name="default.aurora-mysql5.7"),
        )
Exemplo n.º 6
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 lambdasg: ec2.SecurityGroup, bastionsg: ec2.SecurityGroup,
                 kmskey, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        json_template = {'username': '******'}
        db_creds = sm.Secret(
            self,
            'db-secret',
            secret_name=env_name + '/rds-secret',
            generate_secret_string=sm.SecretStringGenerator(
                include_space=False,
                password_length=12,
                generate_string_key='password',
                exclude_punctuation=True,
                secret_string_template=json.dumps(json_template)))
        db_mysql = rds.DatabaseCluster(
            self,
            'mysql',
            default_database_name=prj_name + env_name,
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            engine_version="5.7.12",
            master_user=rds.Login(
                username='******',
                password=db_creds.secret_value_from_json('password')),
            instance_props=rds.InstanceProps(
                vpc=vpc,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED),
                instance_type=ec2.InstanceType(
                    instance_type_identifier="t3.small")),
            instances=1,
            parameter_group=rds.ClusterParameterGroup.
            from_parameter_group_name(
                self, 'pg-dev',
                parameter_group_name='default.aurora-mysql5.7'),
            kms_key=kmskey,
            removal_policy=core.RemovalPolicy.DESTROY)

        db_mysql.connections.allow_default_port_from(
            lambdasg, "Access from Lambda functions")
        db_mysql.connections.allow_default_port_from(
            bastionsg, "Allow from bastion host")

        #SSM Parameter
        ssm.StringParameter(self,
                            'db-host',
                            parameter_name='/' + env_name + '/db-host',
                            string_value=db_mysql.cluster_endpoint.hostname)

        ssm.StringParameter(self,
                            'db-name',
                            parameter_name='/' + env_name + '/db-name',
                            string_value=prj_name + env_name)
Exemplo n.º 7
0
 def create_rds(self, vpc):
     # Create DB
     rds_cluster = rds.DatabaseCluster(
         self,
         'Database',
         engine=rds.DatabaseClusterEngine.AURORA,
         master_user=rds.Login(username='******'),
         instance_props=rds.InstanceProps(
             instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                               ec2.InstanceSize.SMALL),
             vpc_subnets=ec2.SubnetSelection(
                 subnet_type=ec2.SubnetType.PRIVATE),
             vpc=vpc))
     return rds_cluster
Exemplo n.º 8
0
    def __init__(self, scope: core.Construct, id: str, bmt_vpc: ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        db_subnet_group = rds.SubnetGroup(
            self,
            'Aurora',
            description='aurora subnet group',
            vpc=bmt_vpc,
            removal_policy=core.RemovalPolicy.DESTROY,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED))

        db_security_group = ec2.SecurityGroup(self, 'aurora-sg', vpc=bmt_vpc)

        db_security_group.add_ingress_rule(
            peer=ec2.Peer.ipv4('10.100.0.0/16'),
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="to allow from the vpc internal",
                from_port=3306,
                to_port=3306))

        param_group = rds.ParameterGroup(
            self,
            'bmt-aurora-param',
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL)
        param_group.add_parameter("performance_schema", "1")

        rds.DatabaseCluster(
            self,
            'bmt-aurora-cluster',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_1),
            instance_props=rds.InstanceProps(
                vpc=bmt_vpc,
                instance_type=ec2.InstanceType.of(
                    instance_class=ec2.InstanceClass.BURSTABLE3,
                    instance_size=ec2.InstanceSize.MEDIUM),
                security_groups=[db_security_group]),
            instances=1,
            subnet_group=db_subnet_group,
            parameter_group=param_group,
            removal_policy=core.RemovalPolicy.DESTROY)
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        master_user = _rds.Login(
            username="******", password=core.SecretValue.plain_text("password"))

        rds = _rds.DatabaseCluster(
            self,
            "rds",
            engine=_rds.DatabaseClusterEngine.aurora_mysql(
                version=_rds.AuroraMysqlEngineVersion.VER_5_7_12),
            master_user=master_user,
            instance_props=_rds.InstanceProps(
                vpc=vpc,
                instance_type=_ec2.InstanceType("t3.medium"),
                security_groups=[vpc.sg],
                vpc_subnets=_ec2.SubnetSelection(
                    subnets=vpc.isolated_subnets)),
            instances=2)
Exemplo n.º 10
0
 def rds_cluster(self, vpc, ec2sg, rds_type, rds_param=None):
     postgres = rds.DatabaseCluster(
         self,
         "adl-" + rds_type,
         default_database_name="adldb",
         engine=getattr(rds.DatabaseClusterEngine, rds_type),
         instance_props=rds.InstanceProps(
             vpc=vpc['vpc'],
             vpc_subnets=ec2.SubnetSelection(
                 subnet_type=ec2.SubnetType.PRIVATE),
             instance_type=ec2.InstanceType(
                 instance_type_identifier="t3.medium")),
         master_user=rds.Login(username="******"),
         backup=rds.BackupProps(retention=core.Duration.days(7),
                                preferred_window='01:00-02:00'),
         parameter_group=rds_param,
         preferred_maintenance_window="Sun:23:45-Mon:00:15",
         removal_policy=core.RemovalPolicy.DESTROY,
         storage_encrypted=True)
     postgres.connections.allow_from(ec2sg['ec2-sg'], ec2.Port.all_tcp())
Exemplo n.º 11
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc: ec2.Vpc,
                 asg_sg,
                 stage={},
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prefix_name = f'{stage["vpc_prefix"]}-{stage["stage_name"]}-{self.node.try_get_context("customer")}'

        self._rds_subnet_group = rds.SubnetGroup(
            self,
            f'{prefix_name}-rds-subnet-gruop',
            description="aaa",
            subnet_group_name=f'{prefix_name}-aurora-mysql',
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED),
            vpc=vpc)

        self._rds_cluster = rds.DatabaseCluster(
            self,
            f'{prefix_name}-rds-cluster',
            cluster_identifier=f'{prefix_name}-rds-cluster',
            credentials=rds.Credentials.from_generated_secret("admin"),
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                                  ec2.InstanceSize.SMALL),
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED)),
            port=3306,
            default_database_name=self.node.try_get_context("customer"),
            subnet_group=self._rds_subnet_group)

        for sg in asg_sg:
            self._rds_cluster.connections.allow_default_port_from(
                sg, "Allow EC2 ASG access to RDS MySQL")
Exemplo n.º 12
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 lambdasg: ec2.SecurityGroup, bastionsg: ec2.SecurityGroup,
                 kmskey: kms.Key, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context('project_name')
        env_name = self.node.try_get_context('env')

        db_mysql = rds.DatabaseCluster(
            self,
            'mysql',
            default_database_name=prj_name + env_name,
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED),
                instance_type=ec2.InstanceType(
                    instance_type_identifier="t3.small")),
            instances=1,
            storage_encrypted=True,
            storage_encryption_key=kmskey,
            removal_policy=core.RemovalPolicy.DESTROY)

        db_mysql.connections.allow_default_port_from(
            lambdasg, 'Allow from Lambda function')
        db_mysql.connections.allow_default_port_from(
            bastionsg, "Allow from bastion host")

        ssm.StringParameter(self,
                            'db-host',
                            parameter_name=f"/{env_name}/db-host",
                            string_value=db_mysql.cluster_endpoint.hostname)
        ssm.StringParameter(self,
                            'db-secret-name',
                            parameter_name=f"/{env_name}/db-secret-name",
                            string_value=db_mysql.secret.secret_name)
Exemplo n.º 13
0
    def __init__(self, scope: core.Construct, id: str, env, props, cluster=False, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #TEMP without ASG
        # security_groups = [ec2.SecurityGroup(
        #         self,
        #         id="ecs-sample-mysql",
        #         vpc=props['vpc'],
        #         security_group_name="ecs-sample-mysql"
        # )]


        vpc = props['vpc']
        security_groups=[props['sg_rds']]
        credential = rds.Credentials.from_username(username="******")
        private_subnet_selections = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)
        subnet_group = rds.SubnetGroup(self, "sample-rds-subnet-group",
                                       vpc=vpc,
                                       subnet_group_name="sample-rds-subnet-group",
                                       vpc_subnets=private_subnet_selections,
                                       description="sample-rds-subnet-group")
        self.output_props = props.copy()

        if not cluster:
            rds_instance = rds.DatabaseInstance(
                self, "RDS-instance",
                database_name="sample",
                engine=rds.DatabaseInstanceEngine.mysql(
                    version=rds.MysqlEngineVersion.VER_8_0_16
                ),
                credentials=credential,
                instance_identifier="ecs-sample-db",

                vpc=vpc,
                port=3306,
                instance_type=ec2.InstanceType.of(
                    ec2.InstanceClass.BURSTABLE3,
                    ec2.InstanceSize.MICRO,
                ),
                subnet_group=subnet_group,
                vpc_subnets=private_subnet_selections,
                removal_policy=core.RemovalPolicy.DESTROY,
                deletion_protection=False,
                security_groups=security_groups

            )
            core.CfnOutput(self, "RDS_instnace_endpoint", value=rds_instance.db_instance_endpoint_address)
            self.output_props['rds'] = rds_instance

        else:
            instance_props = rds.InstanceProps(
                vpc=vpc,
                security_groups=security_groups,
                vpc_subnets=private_subnet_selections
            )
            rds_cluster = rds.DatabaseCluster(
                self, "RDS-cluster",
                cluster_identifier="ecs-sample-db-cluster",
                instance_props=instance_props,
                engine=rds.DatabaseClusterEngine.aurora_mysql(
                    version=rds.AuroraMysqlEngineVersion.VER_2_07_1
                ),
                credentials=credential,
                default_database_name="sample",
                instances=1,
                subnet_group=subnet_group,
                removal_policy=core.RemovalPolicy.DESTROY,
                deletion_protection=False
            )
            core.CfnOutput(self, "RDS_cluster_endpoint", value=rds_cluster.cluster_endpoint.hostname)
            self.output_props['rds'] = rds_cluster
Exemplo n.º 14
0
    def __init__(self, scope: core.Construct, config: dict, id: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # create the securty group that will allow to connect to this instance
        # I am lazy and create only 1 SG that allows TCP 5432 from itself
        # database clients (lambda functions) will have TCP 5432 authorized for themselves too,
        # which is not necessary but harmless
        self.db_security_group = ec2.SecurityGroup(self,
                                                   "Database Security Group",
                                                   vpc=config['vpc'])
        self.db_security_group.add_ingress_rule(self.db_security_group,
                                                ec2.Port.tcp(5432))

        self.cluster = rds.DatabaseCluster(
            self,
            config['rds']['name'],
            engine=rds.DatabaseClusterEngine.aurora_postgres(
                version=rds.AuroraPostgresEngineVersion.VER_11_7),
            default_database_name=config['rds']['databaseName'],
            master_user=rds.Login(username=config['rds']['masterUsername']),
            instance_props=rds.InstanceProps(
                vpc=config['vpc'], security_groups=[self.db_security_group]))

        # Add Secrets Manager Password rotation
        self.cluster.add_rotation_single_user()

        # aurora serverless is not yet support by CDK, https://github.com/aws/aws-cdk/issues/929
        # escape hatch https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html#cfn_layer_raw
        # cfn_aurora_cluster = cluster.node.default_child
        # cfn_aurora_cluster.add_override("Properties.EngineMode", "serverless")
        # cfn_aurora_cluster.add_override("Properties.EnableHttpEndpoint",True) # Enable Data API
        # cfn_aurora_cluster.add_override("Properties.ScalingConfiguration", {
        #     'AutoPause': True,
        #     'MaxCapacity': 4,
        #     'MinCapacity': 1,
        #     'SecondsUntilAutoPause': 600
        # })
        # cluster.node.try_remove_child('Instance1') # Remove 'Server' instance that isn't required for serverless Aurora

        # create a custom resource to initialize the data schema
        function = _lambda.Function(
            self,
            config['custom resource lambda']['name'],
            runtime=_lambda.Runtime.PYTHON_3_8,
            code=_lambda.Code.asset('./custom_resources'),
            handler='app.on_event',
            vpc=config['vpc'],
            environment={
                'DB_SECRET_ARN': self.get_secret_arn(),
                'PYTHON_LOGLEVEL': 'DEBUG'
            },
            security_groups=[self.db_security_group])
        # add permission to access the secret
        function.add_to_role_policy(
            iam.PolicyStatement(resources=[self.get_secret_arn()],
                                actions=["secretsmanager:GetSecretValue"]))

        custom_resource_provider = cr.Provider(self,
                                               'Custom Resource Provider',
                                               on_event_handler=function)
        custom_resource = core.CustomResource(
            self,
            'Custom Resource',
            service_token=custom_resource_provider.service_token)

        # Tell CFN to wait for the database to be ready before ot create the custom resource
        custom_resource.node.add_dependency(self.cluster)
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
        vpc = ec2.Vpc(self,
                      "vpc",
                      cidr=props['vpc_CIDR'],
                      max_azs=3,
                      subnet_configuration=[{
                          'cidrMask': 28,
                          'name': 'public',
                          'subnetType': ec2.SubnetType.PUBLIC
                      }, {
                          'cidrMask':
                          28,
                          'name':
                          'private',
                          'subnetType':
                          ec2.SubnetType.PRIVATE
                      }, {
                          'cidrMask':
                          28,
                          'name':
                          'db',
                          'subnetType':
                          ec2.SubnetType.ISOLATED
                      }])

        rds_subnetGroup = rds.SubnetGroup(
            self,
            "rds_subnetGroup",
            description=
            f"Group for {props['environment']}-{props['application']}-{props['unit']} DB",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html
        ##TODO:ADD Aurora Serverless Option
        rds_instance = rds.DatabaseCluster(
            self,
            'wordpress-db',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_2),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                enable_performance_insights=props[
                    'rds_enable_performance_insights'],
                instance_type=ec2.InstanceType(
                    instance_type_identifier=props['rds_instance_type'])),
            subnet_group=rds_subnetGroup,
            storage_encrypted=props['rds_storage_encrypted'],
            backup=rds.BackupProps(retention=core.Duration.days(
                props['rds_automated_backup_retention_days'])))

        EcsToRdsSeurityGroup = ec2.SecurityGroup(
            self,
            "EcsToRdsSeurityGroup",
            vpc=vpc,
            description="Allow WordPress containers to talk to RDS")

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        db_cred_generator = _lambda.Function(
            self,
            'db_creds_generator',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='db_creds_generator.handler',
            code=_lambda.Code.asset('lambda/db_creds_generator'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED
            ),  #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets ,
            environment={
                'SECRET_NAME': rds_instance.secret.secret_name,
            })

        #Set Permissions and Sec Groups
        rds_instance.connections.allow_from(
            EcsToRdsSeurityGroup,
            ec2.Port.tcp(3306))  #Open hole to RDS in RDS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html
        file_system = efs.FileSystem(
            self,
            "MyEfsFileSystem",
            vpc=vpc,
            encrypted=True,  # file system is not encrypted by default
            lifecycle_policy=props['efs_lifecycle_policy'],
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING,
            removal_policy=core.RemovalPolicy(props['efs_removal_policy']),
            enable_automatic_backups=props['efs_automatic_backups'])

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster
        cluster = ecs.Cluster(
            self,
            "Cluster",
            vpc=vpc,
            container_insights=props['ecs_enable_container_insights'])

        if props['deploy_bastion_host']:
            #ToDo: Deploy bastion host with a key file
            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html
            bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc=vpc)
            rds_instance.connections.allow_from(bastion_host,
                                                ec2.Port.tcp(3306))

            #######################
            ### Developer Tools ###
            # SFTP into the EFS Shared File System

            NetToolsSecret = secretsmanager.Secret(
                self,
                "NetToolsSecret",
                generate_secret_string=secretsmanager.SecretStringGenerator(
                    secret_string_template=json.dumps({
                        "username": '******',
                        "ip": ''
                    }),
                    generate_string_key="password",
                    exclude_characters='/"'))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point
            AccessPoint = file_system.add_access_point(
                "access-point",
                path="/",
                create_acl=efs.Acl(
                    owner_uid=
                    "100",  #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/
                    owner_gid="101",
                    permissions="0755"))

            EfsVolume = ecs.Volume(
                name="efs",
                efs_volume_configuration=ecs.EfsVolumeConfiguration(
                    file_system_id=file_system.file_system_id,
                    transit_encryption="ENABLED",
                    authorization_config=ecs.AuthorizationConfig(
                        access_point_id=AccessPoint.access_point_id)))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html
            NetToolsTask = ecs.FargateTaskDefinition(self,
                                                     "TaskDefinition",
                                                     cpu=256,
                                                     memory_limit_mib=512,
                                                     volumes=[EfsVolume])

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container
            NetToolsContainer = NetToolsTask.add_container(
                "NetTools",
                image=ecs.ContainerImage.from_registry('netresearch/sftp'),
                command=['test:test:100:101:efs'])
            NetToolsContainer.add_port_mappings(
                ecs.PortMapping(container_port=22, protocol=ecs.Protocol.TCP))

            NetToolsContainer.add_mount_points(
                ecs.MountPoint(
                    container_path=
                    "/home/test/efs",  #ToDo build path out with username from secret
                    read_only=False,
                    source_volume=EfsVolume.name,
                ))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html?highlight=fargateservice#aws_cdk.aws_ecs.FargateService
            service = ecs.FargateService(
                self,
                "Service",
                cluster=cluster,
                task_definition=NetToolsTask,
                platform_version=ecs.FargatePlatformVersion(
                    "VERSION1_4"),  #Required for EFS
            )
            #ToDo somehow store container's IP on deploy

            #Allow traffic to EFS Volume from Net Tools container
            service.connections.allow_to(file_system, ec2.Port.tcp(2049))
            #ToDo allow bastion host into container on port 22

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
            bastion_ip_locator = _lambda.Function(
                self,
                'bastion_ip_locator',
                function_name=
                f"{props['environment']}-{props['application']}-{props['unit']}-SFTP-IP",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler='bastion_ip_locator.handler',
                code=_lambda.Code.asset('lambda/bastion_ip_locator'),
                environment={
                    'CLUSTER_NAME': cluster.cluster_arn,
                    'SERVICE_NAME': service.service_name
                })

            #Give needed perms to bastion_ip_locator for reading info from ECS
            bastion_ip_locator.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["ecs:DescribeTasks"],
                    resources=[
                        #f"arn:aws:ecs:us-east-1:348757191778:service/{cluster.cluster_name}/{service.service_name}",
                        f"arn:aws:ecs:us-east-1:348757191778:task/{cluster.cluster_name}/*"
                    ]))
            bastion_ip_locator.add_to_role_policy(
                iam.PolicyStatement(actions=[
                    "ecs:ListTasks",
                ],
                                    resources=["*"],
                                    conditions={
                                        'ArnEquals': {
                                            'ecs:cluster': cluster.cluster_arn
                                        }
                                    }))

        self.output_props = props.copy()
        self.output_props["vpc"] = vpc
        self.output_props["rds_instance"] = rds_instance
        self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup
        self.output_props["file_system"] = file_system
        self.output_props["cluster"] = cluster
Exemplo n.º 16
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # get acct id for policies
        # acct_id=env['account']

        # creates a new vpc, subnets, 2 nat gateways, etc
        vpc = ec2.Vpc(self, "VPC")

        # mocking vpc from my environment
        #vpc = ec2.Vpc.from_lookup(self, "nonDefaultVpc", vpc_id="vpc-9931a0fc")

        self._rds_subnet_group = rds.SubnetGroup(
            self,
            'RdsSubnetGroup',
            description="aaa",
            subnet_group_name='aurora-mysql',
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            vpc=vpc)

        # create the RDS cluster
        self._rds_cluster = rds.DatabaseCluster(
            self,
            "RDS Cluster",
            cluster_identifier="rds-test",
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                                  ec2.InstanceSize.SMALL),
            ),
            port=3306,
            default_database_name="test",
            subnet_group=self._rds_subnet_group)

        # enable autoscaling for rds
        # 3 servers maximum
        # scale on 1% cpu for testing, 50% normally

        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_applicationautoscaling/ScalableTarget.html
        self._scaling_target = autoscale.ScalableTarget(
            self,
            "Scaling Target",
            max_capacity=3,
            min_capacity=1,
            resource_id='cluster:' + self._rds_cluster.cluster_identifier,
            scalable_dimension='rds:cluster:ReadReplicaCount',
            service_namespace=autoscale.ServiceNamespace.RDS)

        self._scale_policy = autoscale.TargetTrackingScalingPolicy(
            self,
            "Tracking Scaling Policy",
            policy_name='thisisscalingpolicyname',
            target_value=1,
            predefined_metric=autoscale.PredefinedMetric.
            RDS_READER_AVERAGE_CPU_UTILIZATION,
            scaling_target=self._scaling_target,
            scale_in_cooldown=core.Duration.minutes(5),
            scale_out_cooldown=core.Duration.minutes(5),
        )
Exemplo n.º 17
0
    def __init__(self, scope: core.Construct, id: str, datalake: DataLakeLayer,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.__datalake = datalake
        self.security_group = ec2.SecurityGroup(
            self,
            'SecurityGroup',
            vpc=self.datalake.vpc,
            allow_all_outbound=True,
            description='SonarQube Security Group')

        self.security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(),
                                             connection=ec2.Port.all_traffic(),
                                             description='Allow any traffic')

        self.sonarqube_svr_ecr = ecr.DockerImageAsset(
            self,
            'Repo',
            directory=os.path.join(root_dir, 'images/sonarqube-server'),
            repository_name='sonarqube')

        self.sonarqube_cli_ecr = ecr.DockerImageAsset(
            self,
            'Cli',
            directory=os.path.join(root_dir, 'images/sonarqube-scanner'),
            repository_name='sonarqube-cli')

        self.database = rds.DatabaseCluster(
            self,
            'Database',
            engine=rds.DatabaseClusterEngine.aurora_postgres(
                version=rds.AuroraPostgresEngineVersion.VER_11_9),
            default_database_name='sonarqube',
            removal_policy=core.RemovalPolicy.DESTROY,
            credentials=rds.Credentials.from_username(
                username='******',
                password=core.SecretValue(value='postgres')),
            instance_props=rds.InstanceProps(
                vpc=self.datalake.vpc,
                security_groups=[self.security_group],
                instance_type=ec2.InstanceType('r6g.xlarge')))

        # self.ecs_cluster = ecs.Cluster(self,'SonarCluster',
        #   container_insights=True,
        #   vpc=self.datalake.vpc,
        #   capacity=ecs.AddCapacityOptions(
        #     machine_image_type= ecs.MachineImageType.AMAZON_LINUX_2,
        #     instance_type=ec2.InstanceType('m5.xlarge'),
        #     allow_all_outbound=True,
        #     associate_public_ip_address=False,
        #     vpc_subnets= ec2.SubnetSelection(subnet_type= ec2.SubnetType.PUBLIC),
        #     desired_capacity=2))

        # self.service = ecsp.ApplicationLoadBalancedEc2Service(self,'SonarEc2',
        #   cluster=self.ecs_cluster,
        #   desired_count=1,
        #   listener_port=80,
        #   memory_reservation_mib= 4 * 1024,
        #   task_image_options= ecsp.ApplicationLoadBalancedTaskImageOptions(
        #     image= ecs.ContainerImage.from_docker_image_asset(asset=self.sonarqube_svr_ecr),
        #     container_name='sonarqube-svr',
        #     container_port=9000,
        #     enable_logging=True,
        #     environment={
        #       '_SONAR_JDBC_URL':'jdbc:postgresql://{}/sonarqube'.format(
        #           self.database.cluster_endpoint.hostname),
        #       '_SONAR_JDBC_USERNAME':'******',
        #       '_SONAR_JDBC_PASSWORD':'******'
        #     }))

        self.service = ecsp.ApplicationLoadBalancedFargateService(
            self,
            'Server',
            assign_public_ip=True,
            vpc=self.datalake.vpc,
            desired_count=1,
            cpu=4096,
            memory_limit_mib=8 * 1024,
            listener_port=80,
            platform_version=ecs.FargatePlatformVersion.VERSION1_4,
            security_groups=[self.security_group, self.datalake.efs_sg],
            task_image_options=ecsp.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_docker_image_asset(
                    asset=self.sonarqube_svr_ecr),
                container_name='sonarqube-svr',
                container_port=9000,
                enable_logging=True,
                environment={
                    '_SONAR_JDBC_URL':
                    'jdbc:postgresql://{}/sonarqube'.format(
                        self.database.cluster_endpoint.hostname),
                    '_SONAR_JDBC_USERNAME':
                    '******',
                    '_SONAR_JDBC_PASSWORD':
                    '******'
                }))

        for name in ['AmazonElasticFileSystemClientFullAccess']:
            self.service.task_definition.task_role.add_managed_policy(
                iam.ManagedPolicy.from_aws_managed_policy_name(name))

        # Override container specific settings
        container = self.service.task_definition.default_container

        # Required to start remote sql
        container.add_ulimits(
            ecs.Ulimit(name=ecs.UlimitName.NOFILE,
                       soft_limit=262145,
                       hard_limit=262145))

        for folder in ['data', 'logs']:
            efs_ap = self.datalake.efs.add_access_point(
                'sonarqube-' + folder,
                create_acl=efs.Acl(owner_gid="0",
                                   owner_uid="0",
                                   permissions="777"),
                path='/sonarqube/' + folder)

            self.service.task_definition.add_volume(
                name=folder,
                efs_volume_configuration=ecs.EfsVolumeConfiguration(
                    file_system_id=self.datalake.efs.file_system_id,
                    transit_encryption='ENABLED',
                    authorization_config=ecs.AuthorizationConfig(
                        access_point_id=efs_ap.access_point_id,
                        iam='DISABLED')))

            container.add_mount_points(
                ecs.MountPoint(container_path='/opt/sonarqube/' + folder,
                               source_volume=folder,
                               read_only=False))
    def __init__(
        self, 
        scope: core.Construct, 
        id: str, 
        keycloak_domain: str,
        vpc: ec2.IVpc = None, 
        cluster: ecs.ICluster = None, 
        load_balancer: elbv2.IApplicationLoadBalancer = None, 
        log_group: logs.ILogGroup = None,
        keycloak_database_name: str = 'keykloak',
        keycloak_database_user: str = 'admin',
        **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        
        keycloak_task_role = iam.Role(
            self, 'KeycloakTastRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')
        )

        keycloak_database_secret = secretsmanager.Secret(
            self, 'KeycloakDatabaseSecret',
            description='Keycloak Database Password',
            generate_secret_string=secretsmanager.SecretStringGenerator(exclude_punctuation=True)
        )

        keycloak_database_cluster = rds.DatabaseCluster(
            self, 'KeycloakDatabaseCluster',
            engine= rds.DatabaseClusterEngine.AURORA,
            instance_props=rds.InstanceProps(
                instance_type=ec2.InstanceType.of(
                    instance_class=ec2.InstanceClass.BURSTABLE3, 
                    instance_size=ec2.InstanceSize.SMALL
                ),
                vpc=vpc,
            ),
            master_user= rds.Login(
                username=keycloak_database_user,
                password=keycloak_database_secret.secret_value,
            ),
            instances=1,
            default_database_name=keycloak_database_name,
            removal_policy=core.RemovalPolicy.DESTROY,
        )


        keycloak_hosted_zone = route53.HostedZone.from_lookup(
            self, 'KeycloakHostedZone',
            domain_name=keycloak_domain
        )

        keycloak_certificate = acm.DnsValidatedCertificate(
            self, 'KeycloakCertificate',
            hosted_zone=keycloak_hosted_zone,
            domain_name='keycloak.' + keycloak_domain
        )

        keycloak_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self, 'KeycloakLoadBalancedFargateService',
            load_balancer=load_balancer,
            cluster=cluster,

            task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset('keycloak'),
                container_port=8080,
                enable_logging=True,
                task_role=keycloak_task_role,

                log_driver=ecs.AwsLogDriver(
                    stream_prefix='keycloak',
                    log_group=log_group,
                ),

                secrets={
                    'DB_PASSWORD': ecs.Secret.from_secrets_manager(keycloak_database_secret),
                },
                environment={
                    'DB_VENDOR': 'mysql',
                    'DB_USER': keycloak_database_user,
                    'DB_ADDR': keycloak_database_cluster.cluster_endpoint.hostname,
                    'DB_DATABASE': keycloak_database_name,
                    # 'KEYCLOAK_LOGLEVEL': 'DEBUG',
                    'PROXY_ADDRESS_FORWARDING': 'true',
                },
            ),

            memory_limit_mib=512,
            cpu=256,
            desired_count=1,
            public_load_balancer=True,
            domain_name= 'keycloak.' + keycloak_domain,
            domain_zone= keycloak_hosted_zone,
            protocol=elbv2.ApplicationProtocol.HTTPS,
        )

        keycloak_service.target_group.enable_cookie_stickiness(core.Duration.seconds(24 * 60 * 60))
        keycloak_service.target_group.configure_health_check(
            port='8080',
            path='/auth/realms/master/.well-known/openid-configuration',
            timeout=core.Duration.seconds(20),
            healthy_threshold_count=2,
            unhealthy_threshold_count=10,
            interval=core.Duration.seconds(30),
        )

        keycloak_service.listener.add_certificates(
            'KeycloakListenerCertificate',
            certificates= [ keycloak_certificate ]
        )

        keycloak_database_cluster.connections.allow_default_port_from(keycloak_service.service, 'From Keycloak Fargate Service')
Exemplo n.º 19
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #create VPC
        self.vpc = ec2.Vpc(
            self, 'SonarVPC',
            max_azs=3
        )
        
        #DB Security Group with required ingress rules
        self.sg= ec2.SecurityGroup(
            self, "SonarQubeSG",
            vpc=self.vpc,
            allow_all_outbound=True,
            description="Aurora Security Group"
        )
        self.sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5432), "SonarDBAurora")
        pgroup = rds.ParameterGroup.from_parameter_group_name(
            self, "SonarDBParamGroup",
            parameter_group_name='default.aurora-postgresql11'
        )

        #create RDS Cluster
        self.db= rds.DatabaseCluster(self, 'SonarDBCluster',
            engine= rds.DatabaseClusterEngine.aurora_postgres(version=rds.AuroraPostgresEngineVersion.VER_11_6),
            default_database_name= 'sonarqube',
            parameter_group= pgroup,
            master_user=rds.Login(username= "******"),
            instance_props= rds.InstanceProps(
                instance_type= ec2.InstanceType.of(
                    ec2.InstanceClass.BURSTABLE3,
                    ec2.InstanceSize.MEDIUM
                ),
                security_groups= [self.sg],
                vpc= self.vpc
            )
        )

        #create Cluster
        self.cluster= ecs.Cluster(self, "SonarCluster",
            capacity= ecs.AddCapacityOptions(
            instance_type= ec2.InstanceType('m5.large')),
            vpc= self.vpc
        )

        asg= self.cluster.autoscaling_group
        user_data= asg.user_data
        user_data.add_commands('sysctl -qw vm.max_map_count=262144')
        user_data.add_commands('sysctl -w fs.file-max=65536')
        user_data.add_commands('ulimit -n 65536')
        user_data.add_commands('ulimit -u 4096')

        #Create iam Role for Task
        self.task_role = iam.Role(
            self,
            id= "SonarTaskRole",
            role_name= "SonarTaskRole",
            assumed_by= iam.ServicePrincipal(service= "ecs-tasks.amazonaws.com"),
            managed_policies= [
                iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy")
            ]
        )
        #Grant permission for Task to read secret from SecretsManager
        self.db.secret.grant_read(self.task_role)

        url = 'jdbc:postgresql://{}/sonarqube'.format(self.db.cluster_endpoint.socket_address)
        #create task
        task= ecs_patterns.ApplicationLoadBalancedEc2Service(self, "SonarService",
            # if a cluster is provided use the same vpc
            cluster= self.cluster,            
            cpu=512,
            desired_count=1, 
            task_image_options= ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_registry("sonarqube:8.2-community"),
                container_port=9000,
                secrets={
                    "sonar.jdbc.username": ecs.Secret.from_secrets_manager(self.db.secret, field="username"),
                    "sonar.jdbc.password": ecs.Secret.from_secrets_manager(self.db.secret, field="password")
                },
                environment={
                    'sonar.jdbc.url': url
                },
                task_role= self.task_role
            ),
            memory_limit_mib=2048,
            public_load_balancer=True
        )

        container = task.task_definition.default_container
        container.add_ulimits(
            ecs.Ulimit(
                name=ecs.UlimitName.NOFILE,
                soft_limit=65536,
                hard_limit=65536
            )
        )
Exemplo n.º 20
0
 def _setup_postgresql(self) -> None:
     port = 3306
     database = "postgres"
     schema = "public"
     pg = rds.ParameterGroup(
         self,
         "aws-data-wrangler-postgresql-params",
         engine=rds.DatabaseClusterEngine.aurora_postgres(
             version=rds.AuroraPostgresEngineVersion.VER_11_13,
         ),
         parameters={
             "apg_plan_mgmt.capture_plan_baselines": "off",
         },
     )
     aurora_pg = rds.DatabaseCluster(
         self,
         "aws-data-wrangler-aurora-cluster-postgresql",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_postgres(
             version=rds.AuroraPostgresEngineVersion.VER_11_13,
         ),
         cluster_identifier="postgresql-cluster-wrangler",
         instances=1,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         backup=rds.BackupProps(retention=Duration.days(1)),
         parameter_group=pg,
         s3_import_buckets=[self.bucket],
         s3_export_buckets=[self.bucket],
         instance_props=rds.InstanceProps(
             vpc=self.vpc,
             security_groups=[self.db_security_group],
             publicly_accessible=True,
         ),
         subnet_group=self.rds_subnet_group,
     )
     glue.Connection(
         self,
         "aws-data-wrangler-postgresql-glue-connection",
         description="Connect to Aurora (PostgreSQL).",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-postgresql",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:postgresql://{aurora_pg.cluster_endpoint.hostname}:{port}/{database}",
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-postgresql-secret",
         secret_name="aws-data-wrangler/postgresql",
         description="Postgresql credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "postgresql",
                     "host": aurora_pg.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_pg.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "PostgresqlAddress", value=aurora_pg.cluster_endpoint.hostname)
     CfnOutput(self, "PostgresqlPort", value=str(port))
     CfnOutput(self, "PostgresqlDatabase", value=database)
     CfnOutput(self, "PostgresqlSchema", value=schema)
Exemplo n.º 21
0
 def _setup_mysql(self) -> None:
     port = 3306
     database = "test"
     schema = "test"
     aurora_mysql = rds.DatabaseCluster(
         self,
         "aws-data-wrangler-aurora-cluster-mysql",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_mysql(
             version=rds.AuroraMysqlEngineVersion.VER_5_7_12,
         ),
         cluster_identifier="mysql-cluster-wrangler",
         instances=1,
         default_database_name=database,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         backup=rds.BackupProps(retention=Duration.days(1)),
         instance_props=rds.InstanceProps(
             vpc=self.vpc,
             security_groups=[self.db_security_group],
             publicly_accessible=True,
         ),
         subnet_group=self.rds_subnet_group,
         s3_import_buckets=[self.bucket],
         s3_export_buckets=[self.bucket],
     )
     glue.Connection(
         self,
         "aws-data-wrangler-mysql-glue-connection",
         description="Connect to Aurora (MySQL).",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-mysql",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:mysql://{aurora_mysql.cluster_endpoint.hostname}:{port}/{database}",
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     glue.Connection(
         self,
         "aws-data-wrangler-mysql-glue-connection-ssl",
         description="Connect to Aurora (MySQL) with SSL.",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-mysql-ssl",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:mysql://{aurora_mysql.cluster_endpoint.hostname}:{port}/{database}",
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
             "JDBC_ENFORCE_SSL": "true",
             "CUSTOM_JDBC_CERT": "s3://rds-downloads/rds-combined-ca-bundle.pem",
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-mysql-secret",
         secret_name="aws-data-wrangler/mysql",
         description="MySQL credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "mysql",
                     "host": aurora_mysql.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_mysql.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "MysqlAddress", value=aurora_mysql.cluster_endpoint.hostname)
     CfnOutput(self, "MysqlPort", value=str(port))
     CfnOutput(self, "MysqlDatabase", value=database)
     CfnOutput(self, "MysqlSchema", value=schema)
    def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
        vpc = ec2.Vpc(self, "vpc",
            cidr=props['vpc_CIDR'],
            max_azs=3,
            subnet_configuration=[
                {
                    'cidrMask': 28,
                    'name': 'public',
                    'subnetType': ec2.SubnetType.PUBLIC
                },
                {
                    'cidrMask': 28,
                    'name': 'private',
                    'subnetType': ec2.SubnetType.PRIVATE
                },
                {
                    'cidrMask': 28,
                    'name': 'db',
                    'subnetType': ec2.SubnetType.ISOLATED
                }
            ]
        )

        rds_subnetGroup = rds.SubnetGroup(self, "rds_subnetGroup",
            description = f"Group for {props['environment']}-{props['application']}-{props['unit']} DB",
            vpc = vpc,
            vpc_subnets = ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED)
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html
        ##TODO:ADD Aurora Serverless Option
        rds_instance = rds.DatabaseCluster(self,'wordpress-db',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_2
            ),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                enable_performance_insights=props['rds_enable_performance_insights'],
                instance_type=ec2.InstanceType(instance_type_identifier=props['rds_instance_type'])
            ),
            subnet_group=rds_subnetGroup,
            storage_encrypted=props['rds_storage_encrypted'],
            backup=rds.BackupProps(
                retention=core.Duration.days(props['rds_automated_backup_retention_days'])
            )
        )

        EcsToRdsSeurityGroup= ec2.SecurityGroup(self, "EcsToRdsSeurityGroup",
            vpc = vpc,
            description = "Allow WordPress containers to talk to RDS"
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        db_cred_generator = _lambda.Function(
            self, 'db_creds_generator',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='db_creds_generator.handler',
            code=_lambda.Code.asset('lambda'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED),        #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets ,
            environment={
                'SECRET_NAME': rds_instance.secret.secret_name,
            }
        )

        #Set Permissions and Sec Groups
        rds_instance.connections.allow_from(EcsToRdsSeurityGroup, ec2.Port.tcp(3306))   #Open hole to RDS in RDS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html
        file_system = efs.FileSystem(self, "MyEfsFileSystem",
            vpc = vpc,
            encrypted=True, # file system is not encrypted by default
            lifecycle_policy = props['efs_lifecycle_policy'],
            performance_mode = efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode = efs.ThroughputMode.BURSTING,
            removal_policy = core.RemovalPolicy(props['efs_removal_policy']),
            enable_automatic_backups = props['efs_automatic_backups']
        )

        if props['deploy_bastion_host']:
            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html
            bastion_host = ec2.BastionHostLinux(self, 'bastion_host',
                vpc = vpc
            )
            rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306))

        self.output_props = props.copy()
        self.output_props["vpc"] = vpc
        self.output_props["rds_instance"] = rds_instance
        self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup
        self.output_props["file_system"] = file_system