Esempio n. 1
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 secret_name: str,
                 template: str = None,
                 key: str = None) -> None:
        """Provides a generate pseudo-random password

        Args:
            scope (core.Construct): [description]
            id (str): [description]
            secret_name (str): [description]
            template (str, optional): [description]. Defaults to None.
            key (str, optional): [description]. Defaults to None.
        """
        super().__init__(scope, id)
        self.secret = asm.Secret(
            self,
            id,
            generate_secret_string=asm.SecretStringGenerator(
                secret_string_template=template,
                generate_string_key=key,
                password_length=24,
                exclude_characters='"@/\$'),
            secret_name='{}{}'.format(secret_name, id))
    def __init__(self, scope: core.Construct, id: str, vpc: VpcStack,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        subnet_group = redshift.ClusterSubnetGroup(
            self,
            id="RedshiftSubnetGroup",
            description="Redshift private subnet group",
            vpc=vpc.instance,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED),
        )

        self.redshift_secret = sm.Secret(
            self,
            "redshift-credentials",
            secret_name="redshift-credentials",
            description="Credentials for Amazon Redshift cluster.",
            generate_secret_string=sm.SecretStringGenerator(
                secret_string_template='{"username": "******"}',
                generate_string_key="password",
                password_length=32,
                exclude_characters='"@\\\/',
                exclude_punctuation=True,
            ),
        )

        redshift_login = redshift.Login(
            master_username="******",
            master_password=self.redshift_secret.secret_value_from_json(
                "password"),
        )

        redshift_s3_read_access_role = iam.Role(
            self,
            "redshiftS3AccessRole",
            role_name="redshiftS3AccessRole",
            assumed_by=iam.ServicePrincipal("redshift.amazonaws.com"),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AmazonS3ReadOnlyAccess")
            ],
        )

        redshift_cluster = redshift.Cluster(
            self,
            id="redshift-cluster",
            master_user=redshift_login,
            vpc=vpc,
            cluster_type=redshift.ClusterType.SINGLE_NODE,
            default_database_name="redshift-db",
            encrypted=True,
            node_type=redshift.NodeType.DC2_LARGE,
            port=5439,
            roles=[redshift_s3_read_access_role],
            security_groups=[vpc.redshift_sg],
            subnet_group=subnet_group,
            removal_policy=core.RemovalPolicy.DESTROY,
        )
        self._instance = redshift_cluster
Esempio n. 3
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 lambdasg: ec2.SecurityGroup, bastionsg: ec2.SecurityGroup,
                 kmskey, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context('project_name')
        env_name = self.node.try_get_context('env')

        creds_json_template = {'username': '******'}

        db_creds = sm.Secret(
            self,
            id="db-secret",
            secret_name=f'{env_name}-rds-secret',
            generate_secret_string=sm.SecretStringGenerator(
                include_space=False,  # no space in secret
                password_length=12,
                generate_string_key=
                'rds-password',  # key in json dictionary for the password
                exclude_punctuation=True,
                secret_string_template=json.dumps(creds_json_template)))

        db_name = f'pryancdkdb'
        db_mysql = rds.DatabaseCluster(
            self,
            id=f'{env_name}-mysql',
            default_database_name=db_name,
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_5_7_12),
            master_user=rds.Login(
                username='******',
                password=db_creds.secret_value_from_json('rds-password')),
            instance_props=rds.InstanceProps(
                vpc=vpc,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED),
                # will pick one of the isolated Subnets from the vpc
                instance_type=ec2.InstanceType(
                    instance_type_identifier='t3.small')),
            instances=1,
            storage_encrypted=True,
            storage_encryption_key=kmskey,
            removal_policy=core.RemovalPolicy.DESTROY)

        # we need to define the ingress rules for rds
        db_mysql.connections.allow_default_port_from(
            lambdasg, 'Access from Lambda Functions')
        db_mysql.connections.allow_default_port_from(
            bastionsg, "Access from bastion host")

        # ssm
        ssm.StringParameter(self,
                            id=f'{env_name}-db-host',
                            parameter_name=f"/{env_name}/db-host",
                            string_value=db_mysql.cluster_endpoint.hostname)

        ssm.StringParameter(self,
                            id=f'{env_name}-db-name',
                            parameter_name=f"/{env_name}/db-name",
                            string_value=db_name)
    def __init__(self, scope: core.Construct, id: str, config_dict,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        """ Create a secret in secret manager with Database credentials for Comp Reg Source """
        stack = DatalakeSecretManagerStack.of(self)

        createCompRegSecret = sm.Secret(
            self,
            "createCompRegSecret",
            description="Database credentials for Comp Reg Source",
            secret_name=config_dict['comp_reg_secret_name'],
            generate_secret_string=sm.SecretStringGenerator(
                exclude_characters="{`~!@#$%^&*()_-+={[}}|\:;\"'<,>.?/}",
                generate_string_key="pass_generated_by_SM",
                secret_string_template=stack.to_json_string({
                    'db_username':
                    config_dict['comp_reg_user_name'],
                    'db_password':
                    config_dict['comp_reg_password'],
                    'db_port':
                    config_dict['comp_reg_port'],
                    'db_service_name':
                    config_dict['comp_reg_db_name'],
                    'db_host':
                    config_dict['comp_reg_host_name']
                })))
 def _setup_sqlserver(self) -> None:
     port = 1433
     database = "test"
     schema = "dbo"
     sqlserver = rds.DatabaseInstance(
         self,
         "aws-data-wrangler-sqlserver-instance",
         instance_identifier="sqlserver-instance-wrangler",
         engine=rds.DatabaseInstanceEngine.sql_server_ex(version=rds.SqlServerEngineVersion.VER_15),
         instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL),
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         vpc=self.vpc,
         subnet_group=self.rds_subnet_group,
         security_groups=[self.db_security_group],
         publicly_accessible=True,
         s3_import_role=self.rds_role,
         s3_export_role=self.rds_role,
     )
     glue.Connection(
         self,
         "aws-data-wrangler-sqlserver-glue-connection",
         description="Connect to SQL Server.",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-sqlserver",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:sqlserver://{sqlserver.instance_endpoint.hostname}:{port};databaseName={database}",  # noqa: E501
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-sqlserver-secret",
         secret_name="aws-data-wrangler/sqlserver",
         description="SQL Server credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "sqlserver",
                     "host": sqlserver.instance_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": sqlserver.instance_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "SqlServerAddress", value=sqlserver.instance_endpoint.hostname)
     CfnOutput(self, "SqlServerPort", value=str(port))
     CfnOutput(self, "SqlServerDatabase", value=database)
     CfnOutput(self, "SqlServerSchema", value=schema)
 def _setup_oracle(self) -> None:
     port = 1521
     database = "ORCL"
     schema = "TEST"
     oracle = rds.DatabaseInstance(
         self,
         "aws-data-wrangler-oracle-instance",
         instance_identifier="oracle-instance-wrangler",
         engine=rds.DatabaseInstanceEngine.oracle_ee(version=rds.OracleEngineVersion.VER_19_0_0_0_2021_04_R1),
         instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.SMALL),
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         vpc=self.vpc,
         subnet_group=self.rds_subnet_group,
         security_groups=[self.db_security_group],
         publicly_accessible=True,
         s3_import_role=self.rds_role,
         s3_export_role=self.rds_role,
     )
     glue.Connection(
         self,
         "aws-data-wrangler-oracle-glue-connection",
         description="Connect to Oracle.",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-oracle",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:oracle:thin://@{oracle.instance_endpoint.hostname}:{port}/{database}",  # noqa: E501
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-oracle-secret",
         secret_name="aws-data-wrangler/oracle",
         description="Oracle credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "oracle",
                     "host": oracle.instance_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": oracle.instance_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "OracleAddress", value=oracle.instance_endpoint.hostname)
     CfnOutput(self, "OraclePort", value=str(port))
     CfnOutput(self, "OracleDatabase", value=database)
     CfnOutput(self, "OracleSchema", value=schema)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        params1 = _ssm.StringParameter(
            self,
            "parameter1Id",
            description="Load Testing Configuration",
            parameter_name="NoOfConCurrentUsers",
            string_value="100",
            tier=_ssm.ParameterTier.STANDARD,
        )

        output1 = core.CfnOutput(
            self,
            "parameter1Output",
            description="Number of concurrent users",
            value=f"{params1.string_value}",
        )

        params2 = _ssm.StringParameter(
            self,
            "parameter2Id",
            description="Load Testing Configuration",
            parameter_name="/locus/configuration/NoOfConCurrentUsers",
            string_value="100",
            tier=_ssm.ParameterTier.STANDARD,
        )

        params3 = _ssm.StringParameter(
            self,
            "parameter3Id",
            description="Load Testing Configuration",
            parameter_name="/locus/configuration/DurationInSec",
            string_value="300",
            tier=_ssm.ParameterTier.STANDARD,
        )

        secret1 = _secretsmanager.Secret(self,
                                         "secret1Id",
                                         description="Customer DB password",
                                         secret_name="cust_db_pass")

        output2 = core.CfnOutput(
            self,
            "secret1Output",
            description="secret 1",
            value=f"{secret1.secret_value}",
        )

        templated_secret = _secretsmanager.Secret(
            self,
            "secret2Id",
            description="Templated secret for user data",
            secret_name="user_kon_attributes",
            generate_secret_string=_secretsmanager.SecretStringGenerator(
                secret_string_template=json.dumps({"username": "******"}),
                generate_string_key="password",
            ),
        )
Esempio n. 8
0
 def _set_db_infra(self) -> None:
     self.db_username = "******"
     # fmt: off
     self.db_password_secret = ssm.Secret(
         self,
         "db-password-secret",
         secret_name="aws-data-wrangler/db_password",
         generate_secret_string=ssm.SecretStringGenerator(
             exclude_characters="/@\"\' \\"),
     ).secret_value
     # fmt: on
     self.db_password = self.db_password_secret.to_string()
     self.db_security_group = ec2.SecurityGroup(
         self,
         "aws-data-wrangler-database-sg",
         vpc=self.vpc,
         description=
         "AWS Data Wrangler Test Arena - Database security group",
     )
     self.db_security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                             ec2.Port.all_traffic())
     self.rds_subnet_group = rds.SubnetGroup(
         self,
         "aws-data-wrangler-rds-subnet-group",
         description="RDS Database Subnet Group",
         vpc=self.vpc,
         vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
     )
     self.rds_role = iam.Role(
         self,
         "aws-data-wrangler-rds-role",
         assumed_by=iam.ServicePrincipal("rds.amazonaws.com"),
         inline_policies={
             "S3":
             iam.PolicyDocument(statements=[
                 iam.PolicyStatement(
                     effect=iam.Effect.ALLOW,
                     actions=[
                         "s3:Get*",
                         "s3:List*",
                         "s3:Put*",
                         "s3:AbortMultipartUpload",
                     ],
                     resources=[
                         self.bucket.bucket_arn,
                         f"{self.bucket.bucket_arn}/*",
                     ],
                 )
             ]),
         },
     )
     cdk.CfnOutput(self, "DatabasesUsername", value=self.db_username)
     cdk.CfnOutput(
         self,
         "DatabaseSecurityGroupId",
         value=self.db_security_group.security_group_id,
     )
Esempio n. 9
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 lambdasg: ec2.SecurityGroup, bastionsg: ec2.SecurityGroup,
                 kmskey, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")

        json_template = {'username': '******'}
        db_creds = sm.Secret(
            self,
            'db-secret',
            secret_name=env_name + '/rds-secret',
            generate_secret_string=sm.SecretStringGenerator(
                include_space=False,
                password_length=12,
                generate_string_key='password',
                exclude_punctuation=True,
                secret_string_template=json.dumps(json_template)))
        db_mysql = rds.DatabaseCluster(
            self,
            'mysql',
            default_database_name=prj_name + env_name,
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            engine_version="5.7.12",
            master_user=rds.Login(
                username='******',
                password=db_creds.secret_value_from_json('password')),
            instance_props=rds.InstanceProps(
                vpc=vpc,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED),
                instance_type=ec2.InstanceType(
                    instance_type_identifier="t3.small")),
            instances=1,
            parameter_group=rds.ClusterParameterGroup.
            from_parameter_group_name(
                self, 'pg-dev',
                parameter_group_name='default.aurora-mysql5.7'),
            kms_key=kmskey,
            removal_policy=core.RemovalPolicy.DESTROY)

        db_mysql.connections.allow_default_port_from(
            lambdasg, "Access from Lambda functions")
        db_mysql.connections.allow_default_port_from(
            bastionsg, "Allow from bastion host")

        #SSM Parameter
        ssm.StringParameter(self,
                            'db-host',
                            parameter_name='/' + env_name + '/db-host',
                            string_value=db_mysql.cluster_endpoint.hostname)

        ssm.StringParameter(self,
                            'db-name',
                            parameter_name='/' + env_name + '/db-name',
                            string_value=prj_name + env_name)
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        bucket_name: str,
        postgres_host: str,
        redis_host: str,
        db_secret: secrets.ISecret,
        full_domain_name: str,
        **kwargs,
    ) -> None:
        super().__init__(
            scope,
            id,
            **kwargs,
        )

        self.django_secret_key = secrets.Secret(
            self,
            "DjangoSecretKey",
            generate_secret_string=secrets.SecretStringGenerator(
                exclude_punctuation=True,
                include_space=False,
            ),
        )

        self.regular_variables = {
            "DJANGO_SETTINGS_MODULE":
            "backend.settings.production",
            "DEBUG":
            "",
            "FULL_DOMAIN_NAME":
            full_domain_name,
            "FULL_APP_NAME":
            scope.full_app_name,
            "CELERY_METRICS_TOKEN":
            "my-secret-token",
            "AWS_STORAGE_BUCKET_NAME":
            bucket_name,
            "POSTGRES_SERVICE_HOST":
            postgres_host,
            "POSTGRES_PASSWORD":
            db_secret.secret_value_from_json("password").to_string(),
            "SECRET_KEY":
            os.environ.get(
                "SECRET_KEY",
                "mysecretkey123"),  # self.django_secret_key.to_string(),
            "REDIS_SERVICE_HOST":
            redis_host,
        }

        self.secret_variables = {
            "DJANGO_SECRET_KEY":
            ecs.Secret.from_secrets_manager(self.django_secret_key),
        }
Esempio n. 11
0
 def _setup_mysql_serverless(self) -> None:
     port = 3306
     database = "test"
     schema = "test"
     aurora_mysql = rds.ServerlessCluster(
         self,
         "aws-data-wrangler-aurora-cluster-mysql-serverless",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_mysql(
             version=rds.AuroraMysqlEngineVersion.VER_5_7_12,
         ),
         cluster_identifier="mysql-serverless-cluster-wrangler",
         default_database_name=database,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         scaling=rds.ServerlessScalingOptions(
             auto_pause=Duration.minutes(5),
             min_capacity=rds.AuroraCapacityUnit.ACU_1,
             max_capacity=rds.AuroraCapacityUnit.ACU_1,
         ),
         backup_retention=Duration.days(1),
         vpc=self.vpc,
         vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT),
         subnet_group=self.rds_subnet_group,
         security_groups=[self.db_security_group],
         enable_data_api=True,
     )
     secret = secrets.Secret(
         self,
         "aws-data-wrangler-mysql-serverless-secret",
         secret_name="aws-data-wrangler/mysql-serverless",
         description="MySQL serverless credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "mysql",
                     "host": aurora_mysql.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_mysql.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "MysqlServerlessSecretArn", value=secret.secret_arn)
     CfnOutput(self, "MysqlServerlessClusterArn", value=aurora_mysql.cluster_arn)
     CfnOutput(self, "MysqlServerlessAddress", value=aurora_mysql.cluster_endpoint.hostname)
     CfnOutput(self, "MysqlServerlessPort", value=str(port))
     CfnOutput(self, "MysqlServerlessDatabase", value=database)
     CfnOutput(self, "MysqlServerlessSchema", value=schema)
 def _set_opensearch_infra(self) -> None:
     self.username = "******"
     # fmt: off
     self.password_secret = secrets.Secret(
         self,
         "opensearch-password-secret",
         secret_name="aws-data-wrangler/opensearch_password",
         generate_secret_string=secrets.SecretStringGenerator(
             exclude_characters="/@\"\' \\"),
     ).secret_value
     # fmt: on
     self.password = self.password_secret.to_string()
Esempio n. 13
0
 def _create_secret(self):
     """
     Create a secret for RDS
     """
     db_password_secret_id = f"{self.stack_name}-{self.component_id}-db_password_secret"
     secret_name = f"{self.stack_name}-{self.component_id}-dbPassword"
     self.db_password_secret = sm.Secret(
         scope=self,
         id=db_password_secret_id,
         secret_name=secret_name,
         generate_secret_string=sm.SecretStringGenerator(password_length=20, exclude_punctuation=True),
     )
Esempio n. 14
0
 def console_password(self,
                      secret_name: str,
                      template: str = None,
                      key: str = None):
     self.secret = asm.Secret(
         self,
         id,
         generate_secret_string=asm.SecretStringGenerator(
             secret_string_template=template,
             generate_string_key=key,
             password_length=24,
             exclude_characters='"@/\$'),
         secret_name='{}{}'.format(secret_name, id))
     return core.SecretValue(self.secret.secret_value.to_string())
Esempio n. 15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Let us create AWS secrets & SSM Parameters):
        param1 = _ssm.StringParameter(
            self,
            "parameter1",
            description="Load Testing Configuration",
            parameter_name="NoOfConcurrentUsers",
            string_value="100",
            tier=_ssm.ParameterTier.STANDARD  # choose transaction rate
        )
        param2 = _ssm.StringParameter(
            self,
            "parameter2",
            description="Load Testing Configuration",
            parameter_name="/locust/configs/NoOfConcurrentUsers",
            string_value="100",
            tier=_ssm.ParameterTier.STANDARD)
        param3 = _ssm.StringParameter(
            self,
            "parameter3",
            description="Load Testing Configuration",
            parameter_name="/locust/configs/DurationInSec",
            string_value="300",
            tier=_ssm.ParameterTier.STANDARD)

        secret1 = _secretsmanager.Secret(self,
                                         "secret1",
                                         description="Customer DB password",
                                         secret_name="cust_db_pass")

        # hierarchy of secrets
        templated_secret = _secretsmanager.Secret(
            self,
            "secret2",
            description="A Templated secret for user data",
            secret_name="user_kon_attributes",
            generate_secret_string=_secretsmanager.SecretStringGenerator(
                secret_string_template=json.dumps({"username": "******"}),
                generate_string_key="password"))

        output_1 = core.CfnOutput(self,
                                  "param1",
                                  description="NoOfConcurrentUser",
                                  value=f"{param1.string_value}")
        output_2 = core.CfnOutput(self,
                                  "secret1Value",
                                  description="secret1",
                                  value=f"{secret1.secret_value}")
    def __init__(self, scope: cdk.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        "Create Secrets & SSM Parameters: "
        param1 = _ssm.StringParameter(self,
                                      "Parameter1",
                                      description="Load testing configuration",
                                      parameter_name="No_Of_Concurrent_Users",
                                      string_value="100",
                                      tier=_ssm.ParameterTier.STANDARD)

        param2 = _ssm.StringParameter(
            self,
            "Parameter2",
            description="Load testing configuration",
            parameter_name="/locust/configs/No_Of_Concurrent_Users",
            string_value="100",
            tier=_ssm.ParameterTier.STANDARD)

        param3 = _ssm.StringParameter(
            self,
            "Parameter3",
            description="Load testing configuration",
            parameter_name="/locust/configs/DurationInSec",
            string_value="300",
            tier=_ssm.ParameterTier.STANDARD)
        """ Build Secrets in Secrets Manager: """
        secret1 = _secretsmanager.Secret(self,
                                         "Secret1",
                                         description="Customer DB password",
                                         secret_name="Custom_DB_Password")

        templated_secret = _secretsmanager.Secret(
            self,
            "Secret2",
            description="A Templated secret for user data",
            secret_name="User_Kon_Attributes",
            generate_secret_string=_secretsmanager.SecretStringGenerator(
                secret_string_template=json.dumps({"username": "******"}),
                generate_string_key="password"))
        """ Output: """
        output_1 = cdk.CfnOutput(self,
                                 "Parameter1Value",
                                 description="No_Of_Concurrent_Users",
                                 value=f"{param1.string_value}")

        output_2 = cdk.CfnOutput(self,
                                 "Secret1Value",
                                 value=f"{secret1.secret_value}")
Esempio n. 17
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #SSM parameters
        ssm01 = aws_ssm.StringParameter(
            self,
            "ssmparameter01",
            description="Test Config",
            parameter_name="ConcurrentUsers",
            string_value="100",
            tier=aws_ssm.ParameterTier.STANDARD
        )

        secret01 = aws_secrm.Secret(
            self,
            "secret01",
            description="DB password",
            secret_name="db_password"
        )

        secret_templet01 = aws_secrm.Secret(
            self,
            "secret_templet01",
            description="templatized user credentials",
            secret_name="user_db_auth",
            generate_secret_string=aws_secrm.SecretStringGenerator(
                secret_string_template=json.dumps(
                    {
                        "username": "******"
                    }
                ),
                generate_string_key="password"
            )
        )

        output_ssm01 = core.CfnOutput(
            self,
            "output_ssm01",
            description="ConcurrentUsersCount",
            value=ssm01.string_value
        )

        output_secret01 = core.CfnOutput(
            self,
            "output_secret01",
            description="DB Password-secret01",
            value=f"{secret01.secret_value}"
        )
    def __init__(self, scope: core.Construct, id: str, vpc: VpcStack,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.db_name = "airflow"
        self.rds_secret = sm.Secret(
            self,
            "airflow-rds",
            secret_name="airflow-rds-credentials",
            description="Credentials for RDS PostgreSQL.",
            generate_secret_string=sm.SecretStringGenerator(
                secret_string_template='{"username": "******"}',
                generate_string_key="password",
                password_length=16,
                exclude_characters='"@\\\/',
                exclude_punctuation=True,
            ),
        )
        credentials = rds.Credentials.from_secret(self.rds_secret)

        postgres = rds.DatabaseInstance(
            self,
            "RDS",
            credentials=credentials,
            instance_identifier="airflow-cdk",
            database_name=self.db_name,
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_9_6_18),
            vpc=vpc.instance,
            vpc_placement=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED),
            port=5432,
            instance_type=ec2.InstanceType.of(
                ec2.InstanceClass.BURSTABLE2,
                ec2.InstanceSize.MICRO,
            ),
            allocated_storage=20,
            security_groups=[vpc.postgres_sg],
            removal_policy=core.RemovalPolicy.DESTROY,
            parameter_group=rds.ParameterGroup.from_parameter_group_name(
                self,
                "para-group-postgres",
                parameter_group_name="default.postgres9.6"),
            deletion_protection=False,
        )

        self._instance = postgres
Esempio n. 19
0
    def __init__(self, scope: core.Construct, id: str, password_object: object,
                 secret_name: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.password_object = password_object
        self.Secret = secretsmanager.Secret(
            self,
            id=id,
            generate_secret_string=secretsmanager.SecretStringGenerator(
                secret_string_template=json.dumps(password_object),
                generate_string_key='Password',
                exclude_punctuation=True,
            ),
            secret_name=secret_name)

        self.clear_text_secret = self.Secret.secret_value_from_json(
            'Password').to_string()
    def __init__(self, scope: Construct, cid: str, network: GenericNetwork,
                 security: GenericSecurity, workload_key: _kms.Key,
                 **kwargs) -> None:
        super().__init__(scope, cid, **kwargs)

        mq_sg = security.create_security_group("MQSG")

        secret_name = cid + "Secret"
        sec = _secrets.Secret(
            self,
            secret_name,
            encryption_key=workload_key,
            generate_secret_string=_secrets.SecretStringGenerator(
                exclude_characters="%+~`#$&*()|[]{}=:, ;<>?!'/@",
                password_length=20,
                secret_string_template="{\"username\":\"admin\"}",
                generate_string_key="password"))
        sec_cfn = sec.node.default_child
        sec_cfn.override_logical_id(secret_name)

        self._mq = _mq.CfnBroker(
            self,
            cid,
            auto_minor_version_upgrade=False,
            broker_name=cid,
            deployment_mode="ACTIVE_STANDBY_MULTI_AZ",
            logs=_mq.CfnBroker.LogListProperty(audit=True, general=True),
            encryption_options=_mq.CfnBroker.EncryptionOptionsProperty(
                use_aws_owned_key=False, kms_key_id=workload_key.key_id),
            engine_type="ACTIVEMQ",
            engine_version="5.15.13",
            host_instance_type="mq.m5.large",
            publicly_accessible=False,
            subnet_ids=network.get_isolated_subnets("MQ").subnet_ids,
            security_groups=[mq_sg.security_group_id],
            users=[
                _mq.CfnBroker.UserProperty(username=sec.secret_value_from_json(
                    "username").to_string(),
                                           password=sec.secret_value_from_json(
                                               "password").to_string())
            ])
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        ##
        ##Parametros gerais utilizados para provisioamento de infra
        ##
        project_name_param = core.CfnParameter(scope=self, id='mlflowStack', type='String', default='mlflowStack')
        db_name = 'mlflowdb'
        port = 3306
        username = '******'
        bucket_name = 'mlflowbucket-track-stack'
        container_repo_name = 'mlflow-containers'
        cluster_name = 'mlflow'
        service_name = 'mlflow'

        #Associação das policys gerenciadas a role que sera atribuida a task ECS.        
        role = iam.Role(scope=self, id='TASKROLE', assumed_by=iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'))
        
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonS3FullAccess'))
        
        role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AmazonECS_FullAccess'))

        #Secrets Manager responsavel pelo armazenamento do password do nosso RDS MySQL
        db_password_secret = sm.Secret(
            scope=self,
            id='dbsecret',
            secret_name='dbPassword',
            generate_secret_string=sm.SecretStringGenerator(password_length=20, exclude_punctuation=True)
        )

         #Criação do Bucket S3
        artifact_bucket = s3.Bucket(
            scope=self,
            id='mlflowstacktrack',
            bucket_name=bucket_name,
            public_read_access=False,
            block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
            removal_policy=core.RemovalPolicy.DESTROY
        )
Esempio n. 22
0
    def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        vpc = ec2.Vpc(
            scope=self,
            id="aurora-VPC",
            cidr="10.10.0.0/16"
        )

        db_secret = secretsmanager.Secret(
            scope=self,
            id="templated-secret",
            generate_secret_string=secretsmanager.SecretStringGenerator(
                secret_string_template=json.dumps(
                    {"username": "******"}),
                generate_string_key="password",
                exclude_punctuation=True,
            )
        )

        cluster = rds.ServerlessCluster(
            scope=self,
            id="Cluster",
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            vpc=vpc,
            enable_data_api=True,
            default_database_name ="Racing",
            credentials=rds.Credentials.from_secret(db_secret),
            # removal_policy= core
            scaling=rds.ServerlessScalingOptions(
                # default is to pause after 5 minutes of idle time
                auto_pause=core.Duration.minutes(10),
                # min_capacity=rds.AuroraCapacityUnit.ACU_8,
                # max_capacity=rds.AuroraCapacityUnit.ACU_32
            )
        )
Esempio n. 23
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        with open("stack/config.yml", 'r') as stream:
            configs = yaml.safe_load(stream)

        ### S3 core
        images_S3_bucket = _s3.Bucket(self, "ICS_IMAGES")

        images_S3_bucket.add_cors_rule(
            allowed_methods=[_s3.HttpMethods.POST],
            allowed_origins=["*"] # add API gateway web resource URL
        )

        ### SQS core
        image_deadletter_queue = _sqs.Queue(self, "ICS_IMAGES_DEADLETTER_QUEUE")
        image_queue = _sqs.Queue(self, "ICS_IMAGES_QUEUE",
            dead_letter_queue={
                "max_receive_count": configs["DeadLetterQueue"]["MaxReceiveCount"],
                "queue": image_deadletter_queue
            })

        ### api gateway core
        api_gateway = RestApi(self, 'ICS_API_GATEWAY', rest_api_name='ImageContentSearchApiGateway')
        api_gateway_resource = api_gateway.root.add_resource(configs["ProjectName"])
        api_gateway_landing_page_resource = api_gateway_resource.add_resource('web')
        api_gateway_get_signedurl_resource = api_gateway_resource.add_resource('signedUrl')
        api_gateway_image_search_resource = api_gateway_resource.add_resource('search')

        ### landing page function
        get_landing_page_function = Function(self, "ICS_GET_LANDING_PAGE",
            function_name="ICS_GET_LANDING_PAGE",
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/landingPage"))

        get_landing_page_integration = LambdaIntegration(
            get_landing_page_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_landing_page_resource.add_method('GET', get_landing_page_integration,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }])

        ### cognito
        required_attribute = _cognito.StandardAttribute(required=True)

        users_pool = _cognito.UserPool(self, "ICS_USERS_POOL",
            auto_verify=_cognito.AutoVerifiedAttrs(email=True), #required for self sign-up
            standard_attributes=_cognito.StandardAttributes(email=required_attribute), #required for self sign-up
            self_sign_up_enabled=configs["Cognito"]["SelfSignUp"])

        user_pool_app_client = _cognito.CfnUserPoolClient(self, "ICS_USERS_POOL_APP_CLIENT", 
            supported_identity_providers=["COGNITO"],
            allowed_o_auth_flows=["implicit"],
            allowed_o_auth_scopes=configs["Cognito"]["AllowedOAuthScopes"],
            user_pool_id=users_pool.user_pool_id,
            callback_ur_ls=[api_gateway_landing_page_resource.url],
            allowed_o_auth_flows_user_pool_client=True,
            explicit_auth_flows=["ALLOW_REFRESH_TOKEN_AUTH"])

        user_pool_domain = _cognito.UserPoolDomain(self, "ICS_USERS_POOL_DOMAIN", 
            user_pool=users_pool, 
            cognito_domain=_cognito.CognitoDomainOptions(domain_prefix=configs["Cognito"]["DomainPrefix"]))

        ### get signed URL function
        get_signedurl_function = Function(self, "ICS_GET_SIGNED_URL",
            function_name="ICS_GET_SIGNED_URL",
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_SIGNEDURL_EXPIRY_SECONDS": configs["Functions"]["DefaultSignedUrlExpirySeconds"]
            },
            runtime=Runtime.PYTHON_3_7,
            handler="main.handler",
            code=Code.asset("./src/getSignedUrl"))

        get_signedurl_integration = LambdaIntegration(
            get_signedurl_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_get_signedurl_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            rest_api_id=api_gateway_get_signedurl_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_GET_SIGNED_URL_AUTHORIZER",
            type="COGNITO_USER_POOLS",
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_get_signedurl_resource.add_method('GET', get_signedurl_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_get_signedurl_authorizer.ref)

        images_S3_bucket.grant_put(get_signedurl_function, objects_key_pattern="new/*")

        ### image massage function
        image_massage_function = Function(self, "ICS_IMAGE_MASSAGE",
            function_name="ICS_IMAGE_MASSAGE",
            timeout=core.Duration.seconds(6),
            runtime=Runtime.PYTHON_3_7,
            environment={"ICS_IMAGE_MASSAGE": image_queue.queue_name},
            handler="main.handler",
            code=Code.asset("./src/imageMassage"))

        images_S3_bucket.grant_write(image_massage_function, "processed/*")
        images_S3_bucket.grant_delete(image_massage_function, "new/*")
        images_S3_bucket.grant_read(image_massage_function, "new/*")
        
        new_image_added_notification = _s3notification.LambdaDestination(image_massage_function)

        images_S3_bucket.add_event_notification(_s3.EventType.OBJECT_CREATED, 
            new_image_added_notification, 
            _s3.NotificationKeyFilter(prefix="new/")
            )

        image_queue.grant_send_messages(image_massage_function)

        ### image analyzer function
        image_analyzer_function = Function(self, "ICS_IMAGE_ANALYSIS",
            function_name="ICS_IMAGE_ANALYSIS",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(10),
            environment={
                "ICS_IMAGES_BUCKET": images_S3_bucket.bucket_name,
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "REGION": core.Aws.REGION,
                },
            handler="main.handler",
            code=Code.asset("./src/imageAnalysis")) 

        image_analyzer_function.add_event_source(_lambda_event_source.SqsEventSource(queue=image_queue, batch_size=10))
        image_queue.grant_consume_messages(image_massage_function)

        lambda_rekognition_access = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["rekognition:DetectLabels", "rekognition:DetectModerationLabels"],
            resources=["*"]                    
        )

        image_analyzer_function.add_to_role_policy(lambda_rekognition_access)
        images_S3_bucket.grant_read(image_analyzer_function, "processed/*")

        ### API gateway finalizing
        self.add_cors_options(api_gateway_get_signedurl_resource)
        self.add_cors_options(api_gateway_landing_page_resource)
        self.add_cors_options(api_gateway_image_search_resource)

        ### database 
        database_secret = _secrets_manager.Secret(self, "ICS_DATABASE_SECRET",
            secret_name="rds-db-credentials/image-content-search-rds-secret",
            generate_secret_string=_secrets_manager.SecretStringGenerator(
                generate_string_key='password',
                secret_string_template='{"username": "******"}',
                exclude_punctuation=True,
                exclude_characters='/@\" \\\'',
                require_each_included_type=True
            )
        )

        database = _rds.CfnDBCluster(self, "ICS_DATABASE",
            engine=_rds.DatabaseClusterEngine.aurora_mysql(version=_rds.AuroraMysqlEngineVersion.VER_5_7_12).engine_type,
            engine_mode="serverless",
            database_name=configs["Database"]["Name"],
            enable_http_endpoint=True,
            deletion_protection=configs["Database"]["DeletionProtection"],
            master_username=database_secret.secret_value_from_json("username").to_string(),
            master_user_password=database_secret.secret_value_from_json("password").to_string(),
            scaling_configuration=_rds.CfnDBCluster.ScalingConfigurationProperty(
                auto_pause=configs["Database"]["Scaling"]["AutoPause"],
                min_capacity=configs["Database"]["Scaling"]["Min"],
                max_capacity=configs["Database"]["Scaling"]["Max"],
                seconds_until_auto_pause=configs["Database"]["Scaling"]["SecondsToAutoPause"]
            ),
        )

        database_cluster_arn = "arn:aws:rds:{}:{}:cluster:{}".format(core.Aws.REGION, core.Aws.ACCOUNT_ID, database.ref)
   
        secret_target = _secrets_manager.CfnSecretTargetAttachment(self,"ICS_DATABASE_SECRET_TARGET",
            target_type="AWS::RDS::DBCluster",
            target_id=database.ref,
            secret_id=database_secret.secret_arn
        )

        secret_target.node.add_dependency(database)

        ### database function
        image_data_function_role = _iam.Role(self, "ICS_IMAGE_DATA_FUNCTION_ROLE",
            role_name="ICS_IMAGE_DATA_FUNCTION_ROLE",
            assumed_by=_iam.ServicePrincipal("lambda.amazonaws.com"),
            managed_policies=[
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaVPCAccessExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AWSLambdaBasicExecutionRole"),
                _iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSDataFullAccess")
            ]
        )
        
        image_data_function = Function(self, "ICS_IMAGE_DATA",
            function_name="ICS_IMAGE_DATA",
            runtime=Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(5),
            role=image_data_function_role,
            environment={
                "DEFAULT_MAX_CALL_ATTEMPTS": configs["Functions"]["DefaultMaxApiCallAttempts"],
                "CLUSTER_ARN": database_cluster_arn,
                "CREDENTIALS_ARN": database_secret.secret_arn,
                "DB_NAME": database.database_name,
                "REGION": core.Aws.REGION
                },
            handler="main.handler",
            code=Code.asset("./src/imageData")
        ) 

        image_search_integration = LambdaIntegration(
            image_data_function, 
            proxy=True, 
            integration_responses=[{
                'statusCode': '200',
               'responseParameters': {
                   'method.response.header.Access-Control-Allow-Origin': "'*'",
                }
            }])

        api_gateway_image_search_authorizer = CfnAuthorizer(self, "ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            rest_api_id=api_gateway_image_search_resource.rest_api.rest_api_id,
            name="ICS_API_GATEWAY_IMAGE_SEARCH_AUTHORIZER",
            type="COGNITO_USER_POOLS", 
            identity_source="method.request.header.Authorization",
            provider_arns=[users_pool.user_pool_arn])

        api_gateway_image_search_resource.add_method('POST', image_search_integration,
            authorization_type=AuthorizationType.COGNITO,
            method_responses=[{
                'statusCode': '200',
                'responseParameters': {
                    'method.response.header.Access-Control-Allow-Origin': True,
                }
            }]
            ).node.find_child('Resource').add_property_override('AuthorizerId', api_gateway_image_search_authorizer.ref)


        lambda_access_search = _iam.PolicyStatement(
            effect=_iam.Effect.ALLOW, 
            actions=["translate:TranslateText"],
            resources=["*"]            
        ) 

        image_data_function.add_to_role_policy(lambda_access_search)

        ### custom resource
        lambda_provider = Provider(self, 'ICS_IMAGE_DATA_PROVIDER', 
            on_event_handler=image_data_function
        )

        core.CustomResource(self, 'ICS_IMAGE_DATA_RESOURCE', 
            service_token=lambda_provider.service_token,
            pascal_case_properties=False,
            resource_type="Custom::SchemaCreation",
            properties={
                "source": "Cloudformation"
            }
        )

        ### event bridge
        event_bus = _events.EventBus(self, "ICS_IMAGE_CONTENT_BUS")

        event_rule = _events.Rule(self, "ICS_IMAGE_CONTENT_RULE",
            rule_name="ICS_IMAGE_CONTENT_RULE",
            description="The event from image analyzer to store the data",
            event_bus=event_bus,
            event_pattern=_events.EventPattern(resources=[image_analyzer_function.function_arn]),
        )

        event_rule.add_target(_event_targets.LambdaFunction(image_data_function))

        event_bus.grant_put_events(image_analyzer_function)
        image_analyzer_function.add_environment("EVENT_BUS", event_bus.event_bus_name)

        ### outputs
        core.CfnOutput(self, 'CognitoHostedUILogin',
            value='https://{}.auth.{}.amazoncognito.com/login?client_id={}&response_type=token&scope={}&redirect_uri={}'.format(user_pool_domain.domain_name, core.Aws.REGION, user_pool_app_client.ref, '+'.join(user_pool_app_client.allowed_o_auth_scopes), api_gateway_landing_page_resource.url),
            description='The Cognito Hosted UI Login Page'
        )
Esempio n. 24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ### VPC and subnets
        vpc = ec2.Vpc(self,
                      "vpc",
                      cidr="172.20.0.0/24",
                      nat_gateways=0,
                      max_azs=2,
                      enable_dns_hostnames=True,
                      enable_dns_support=True,
                      subnet_configuration=[
                          ec2.SubnetConfiguration(
                              cidr_mask=26,
                              name="roundcube",
                              subnet_type=ec2.SubnetType.PUBLIC)
                      ])

        ### Define an image and create instance
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Import role
        route53_role = iam.Role.from_role_arn(
            self, "role_id", "arn:aws:iam::585823398980:role/ec2WriteOvpnZone")

        instance = ec2.Instance(self,
                                "instance",
                                instance_type=ec2.InstanceType("t3a.nano"),
                                machine_image=amzn_linux,
                                vpc=vpc,
                                role=route53_role,
                                key_name="roundcube-key")

        instance.connections.allow_from(ec2.Peer.ipv4("109.255.202.235/32"),
                                        ec2.Port.tcp(22), "Allow ssh")
        instance.connections.allow_from(ec2.Peer.ipv4("109.255.202.235/32"),
                                        ec2.Port.tcp(443), "Allow HTTPS")

        ### Aurora cluster
        aurora_secret = sm.Secret(
            self,
            "secret",
            generate_secret_string=sm.SecretStringGenerator(
                generate_string_key="password",
                secret_string_template='{"username": "******"}',
                exclude_punctuation=True,
                password_length=30))

        # no l2 construct for serverless yet
        db_subnet_list = []
        for sn in vpc.public_subnets:
            db_subnet_list.append(sn.subnet_id)

        db_subnets = rds.CfnDBSubnetGroup(
            self,
            "db-subnet-group",
            db_subnet_group_description="subnet group",
            subnet_ids=db_subnet_list)

        rds.CfnDBCluster(
            self,
            "db-cluster",
            database_name="roundcube",
            db_cluster_identifier="serverless-cluster",
            master_username=aurora_secret.secret_value_from_json(
                'username').to_string(),
            master_user_password=aurora_secret.secret_value_from_json(
                'password').to_string(),
            engine="aurora",
            engine_mode="serverless",
            enable_http_endpoint=True,
            scaling_configuration=rds.CfnDBCluster.
            ScalingConfigurationProperty(
                auto_pause=True,
                min_capacity=1,
                max_capacity=1,
                seconds_until_auto_pause=900,
            ),
            deletion_protection=False,
            db_subnet_group_name=db_subnets.ref)

        # rds.DatabaseCluster(self, "aurora_cluster",
        #     engine                = rds.DatabaseClusterEngine.aurora_postgres(version = rds.AuroraPostgresEngineVersion.VER_11_7),
        #     default_database_name = "roundcube",
        #     #parameter_group       = pgroup,
        #     master_user           = rds.Login(username = "******"),
        #     removal_policy        = core.RemovalPolicy.DESTROY,
        #     instance_props        = rds.InstanceProps(
        #         vpc                   = vpc,
        #         instance_type         = ec2.InstanceType.of(
        #             ec2.InstanceClass.MEMORY5,
        #             ec2.InstanceSize.LARGE
        #         )
        #     )
        # )

        ### Resource group
        rg.CfnGroup(self,
                    "env-group",
                    name="roundcube",
                    resource_query=rg.CfnGroup.ResourceQueryProperty(
                        type="TAG_FILTERS_1_0",
                        query=rg.CfnGroup.QueryProperty(
                            resource_type_filters=["AWS::AllSupported"],
                            tag_filters=[
                                rg.CfnGroup.TagFilterProperty(
                                    key="resource-group", values=["roundcube"])
                            ])))
    def __init__(self, scope: core.Construct, id: str, eksname: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Cloudformation input params
        datalake_bucket = core.CfnParameter(
            self,
            "datalakebucket",
            type="String",
            description=
            "You existing S3 bucket to be accessed by Jupyter Notebook and ETL job. Default: blank",
            default="")
        login_name = core.CfnParameter(
            self,
            "jhubuser",
            type="String",
            description="Your username login to jupyter hub",
            default="sparkoneks")

        # Auto-generate a user login in secrets manager
        jhub_secret = secmger.Secret(
            self,
            'jHubPwd',
            generate_secret_string=secmger.SecretStringGenerator(
                exclude_punctuation=True,
                secret_string_template=json.dumps(
                    {'username': login_name.value_as_string}),
                generate_string_key="password"))

        # A new bucket to store app code and access logs
        self.app_s3 = S3AppCodeConst(self, 'appcode')

        # 1. Setup EKS base infrastructure
        network_sg = NetworkSgConst(self, 'network-sg', eksname,
                                    self.app_s3.code_bucket)
        iam = IamConst(self, 'iam_roles', eksname)
        eks_cluster = EksConst(self, 'eks_cluster', eksname, network_sg.vpc,
                               iam.managed_node_role, iam.admin_role,
                               self.region)
        eks_security = EksSAConst(self, 'eks_sa', eks_cluster.my_cluster,
                                  jhub_secret)
        eks_base_app = EksBaseAppConst(self, 'eks_base_app',
                                       eks_cluster.my_cluster, self.region)

        # 2. Setup Spark application access control
        app_security = SparkOnEksSAConst(self, 'spark_service_account',
                                         eks_cluster.my_cluster,
                                         login_name.value_as_string,
                                         self.app_s3.code_bucket,
                                         datalake_bucket.value_as_string)

        # 3. Install ETL orchestrator - Argo
        # can be replaced by other workflow tool, ie. Airflow
        argo_install = eks_cluster.my_cluster.add_helm_chart(
            'ARGOChart',
            chart='argo',
            repository='https://argoproj.github.io/argo-helm',
            release='argo',
            namespace='argo',
            create_namespace=True,
            values=loadYamlLocal('../app_resources/argo-values.yaml'))
        # Create a Spark workflow template with different T-shirt size
        submit_tmpl = eks_cluster.my_cluster.add_manifest(
            'SubmitSparkWrktmpl',
            loadYamlLocal('../app_resources/spark-template.yaml'))
        submit_tmpl.node.add_dependency(argo_install)

        # 4. Install Arc Jupyter notebook to as Spark ETL IDE
        jhub_install = eks_cluster.my_cluster.add_helm_chart(
            'JHubChart',
            chart='jupyterhub',
            repository='https://jupyterhub.github.io/helm-chart',
            release='jhub',
            version='0.11.1',
            namespace='jupyter',
            create_namespace=False,
            values=loadYamlReplaceVarLocal(
                '../app_resources/jupyter-values.yaml',
                fields={
                    "{{codeBucket}}": self.app_s3.code_bucket,
                    "{{region}}": self.region
                }))

        # get Arc Jupyter login from secrets manager
        name_parts = core.Fn.split('-', jhub_secret.secret_name)
        name_no_suffix = core.Fn.join(
            '-',
            [core.Fn.select(0, name_parts),
             core.Fn.select(1, name_parts)])

        config_hub = eks.KubernetesManifest(
            self,
            'JHubConfig',
            cluster=eks_cluster.my_cluster,
            manifest=loadYamlReplaceVarLocal(
                '../app_resources/jupyter-config.yaml',
                fields={
                    "{{MY_SA}}": app_security.jupyter_sa,
                    "{{REGION}}": self.region,
                    "{{SECRET_NAME}}": name_no_suffix
                },
                multi_resource=True))
        config_hub.node.add_dependency(jhub_install)

        # 5.(OPTIONAL) retrieve ALB DNS Name to enable Cloudfront in the following nested stack.
        # Recommend to remove this section and the rest of CloudFront component.
        # Setup your own certificate then add to ALB, to enable the HTTPS.
        self._argo_alb = eks.KubernetesObjectValue(
            self,
            'argoALB',
            cluster=eks_cluster.my_cluster,
            json_path='.status.loadBalancer.ingress[0].hostname',
            object_type='ingress',
            object_name='argo-server',
            object_namespace='argo')
        self._argo_alb.node.add_dependency(argo_install)

        self._jhub_alb = eks.KubernetesObjectValue(
            self,
            'jhubALB',
            cluster=eks_cluster.my_cluster,
            json_path='.status.loadBalancer.ingress[0].hostname',
            object_type='ingress',
            object_name='jupyterhub',
            object_namespace='jupyter')
        self._jhub_alb.node.add_dependency(config_hub)
Esempio n. 26
0
 def _setup_mysql(self) -> None:
     port = 3306
     database = "test"
     schema = "test"
     aurora_mysql = rds.DatabaseCluster(
         self,
         "aws-data-wrangler-aurora-cluster-mysql",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_mysql(
             version=rds.AuroraMysqlEngineVersion.VER_5_7_12,
         ),
         cluster_identifier="mysql-cluster-wrangler",
         instances=1,
         default_database_name=database,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         backup=rds.BackupProps(retention=Duration.days(1)),
         instance_props=rds.InstanceProps(
             vpc=self.vpc,
             security_groups=[self.db_security_group],
             publicly_accessible=True,
         ),
         subnet_group=self.rds_subnet_group,
         s3_import_buckets=[self.bucket],
         s3_export_buckets=[self.bucket],
     )
     glue.Connection(
         self,
         "aws-data-wrangler-mysql-glue-connection",
         description="Connect to Aurora (MySQL).",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-mysql",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:mysql://{aurora_mysql.cluster_endpoint.hostname}:{port}/{database}",
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     glue.Connection(
         self,
         "aws-data-wrangler-mysql-glue-connection-ssl",
         description="Connect to Aurora (MySQL) with SSL.",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-mysql-ssl",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:mysql://{aurora_mysql.cluster_endpoint.hostname}:{port}/{database}",
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
             "JDBC_ENFORCE_SSL": "true",
             "CUSTOM_JDBC_CERT": "s3://rds-downloads/rds-combined-ca-bundle.pem",
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-mysql-secret",
         secret_name="aws-data-wrangler/mysql",
         description="MySQL credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "mysql",
                     "host": aurora_mysql.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_mysql.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "MysqlAddress", value=aurora_mysql.cluster_endpoint.hostname)
     CfnOutput(self, "MysqlPort", value=str(port))
     CfnOutput(self, "MysqlDatabase", value=database)
     CfnOutput(self, "MysqlSchema", value=schema)
Esempio n. 27
0
 def _setup_postgresql(self) -> None:
     port = 3306
     database = "postgres"
     schema = "public"
     pg = rds.ParameterGroup(
         self,
         "aws-data-wrangler-postgresql-params",
         engine=rds.DatabaseClusterEngine.aurora_postgres(
             version=rds.AuroraPostgresEngineVersion.VER_11_13,
         ),
         parameters={
             "apg_plan_mgmt.capture_plan_baselines": "off",
         },
     )
     aurora_pg = rds.DatabaseCluster(
         self,
         "aws-data-wrangler-aurora-cluster-postgresql",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_postgres(
             version=rds.AuroraPostgresEngineVersion.VER_11_13,
         ),
         cluster_identifier="postgresql-cluster-wrangler",
         instances=1,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         backup=rds.BackupProps(retention=Duration.days(1)),
         parameter_group=pg,
         s3_import_buckets=[self.bucket],
         s3_export_buckets=[self.bucket],
         instance_props=rds.InstanceProps(
             vpc=self.vpc,
             security_groups=[self.db_security_group],
             publicly_accessible=True,
         ),
         subnet_group=self.rds_subnet_group,
     )
     glue.Connection(
         self,
         "aws-data-wrangler-postgresql-glue-connection",
         description="Connect to Aurora (PostgreSQL).",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-postgresql",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:postgresql://{aurora_pg.cluster_endpoint.hostname}:{port}/{database}",
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-postgresql-secret",
         secret_name="aws-data-wrangler/postgresql",
         description="Postgresql credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "postgresql",
                     "host": aurora_pg.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_pg.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "PostgresqlAddress", value=aurora_pg.cluster_endpoint.hostname)
     CfnOutput(self, "PostgresqlPort", value=str(port))
     CfnOutput(self, "PostgresqlDatabase", value=database)
     CfnOutput(self, "PostgresqlSchema", value=schema)
Esempio n. 28
0
 def _setup_redshift(self) -> None:
     port = 5439
     database = "test"
     schema = "public"
     redshift_role = iam.Role(
         self,
         "aws-data-wrangler-redshift-role",
         assumed_by=iam.ServicePrincipal("redshift.amazonaws.com"),
         inline_policies={
             "KMS": iam.PolicyDocument(
                 statements=[
                     iam.PolicyStatement(
                         effect=iam.Effect.ALLOW,
                         actions=[
                             "kms:Encrypt",
                             "kms:Decrypt",
                             "kms:GenerateDataKey",
                         ],
                         resources=[self.key.key_arn],
                     )
                 ]
             ),
             "S3": iam.PolicyDocument(
                 statements=[
                     iam.PolicyStatement(
                         effect=iam.Effect.ALLOW,
                         actions=[
                             "s3:Get*",
                             "s3:List*",
                             "s3:Put*",
                         ],
                         resources=[
                             self.bucket.bucket_arn,
                             f"{self.bucket.bucket_arn}/*",
                         ],
                     )
                 ]
             ),
             "LakeFormation": iam.PolicyDocument(
                 statements=[
                     iam.PolicyStatement(
                         effect=iam.Effect.ALLOW,
                         actions=[
                             "lakeformation:GetDataAccess",
                             "lakeformation:GrantPermissions",
                             "lakeformation:GetWorkUnits",
                             "lakeformation:StartQueryPlanning",
                             "lakeformation:GetWorkUnitResults",
                             "lakeformation:GetQueryState",
                         ],
                         resources=["*"],
                     )
                 ]
             ),
             "Glue": iam.PolicyDocument(
                 statements=[
                     iam.PolicyStatement(
                         effect=iam.Effect.ALLOW,
                         actions=[
                             "glue:SearchTables",
                             "glue:GetConnections",
                             "glue:GetDataCatalogEncryptionSettings",
                             "glue:GetTables",
                             "glue:GetTableVersions",
                             "glue:GetPartitions",
                             "glue:DeleteTableVersion",
                             "glue:BatchGetPartition",
                             "glue:GetDatabases",
                             "glue:GetTags",
                             "glue:GetTable",
                             "glue:GetDatabase",
                             "glue:GetPartition",
                             "glue:GetTableVersion",
                             "glue:GetConnection",
                             "glue:GetUserDefinedFunction",
                             "glue:GetUserDefinedFunctions",
                         ],
                         resources=["*"],
                     )
                 ]
             ),
         },
     )
     lf.CfnPermissions(
         self,
         "CodeBuildTestRoleLFPermissions",
         data_lake_principal=lf.CfnPermissions.DataLakePrincipalProperty(
             data_lake_principal_identifier=redshift_role.role_arn
         ),
         resource=lf.CfnPermissions.ResourceProperty(
             table_resource=lf.CfnPermissions.TableResourceProperty(
                 database_name="aws_data_wrangler",
                 table_wildcard={},  # type: ignore
             )
         ),
         permissions=["SELECT", "ALTER", "DESCRIBE", "DROP", "DELETE", "INSERT"],
     )
     redshift.ClusterSubnetGroup(
         self,
         "aws-data-wrangler-redshift-subnet-group",
         description="AWS Data Wrangler Test Athena - Redshift Subnet Group",
         vpc=self.vpc,
         vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
     )
     redshift_cluster = redshift.Cluster(
         self,
         "aws-data-wrangler-redshift-cluster",
         default_database_name=database,
         master_user=redshift.Login(
             master_username=self.db_username,
             master_password=self.db_password_secret,
         ),
         cluster_type=redshift.ClusterType.SINGLE_NODE,
         publicly_accessible=True,
         port=port,
         vpc=self.vpc,
         vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
         security_groups=[self.db_security_group],
         roles=[redshift_role],
     )
     glue.Connection(
         self,
         "aws-data-wrangler-redshift-glue-connection",
         description="Connect to Redshift.",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-redshift",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:redshift://{redshift_cluster.cluster_endpoint.hostname}:{port}/{database}",  # noqa: E501
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secret = secrets.Secret(
         self,
         "aws-data-wrangler-redshift-secret",
         secret_name="aws-data-wrangler/redshift",
         description="Redshift credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "redshift",
                     "host": redshift_cluster.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": redshift_cluster.cluster_name,
                 }
             ),
         ),
     )
     CfnOutput(self, "RedshiftSecretArn", value=secret.secret_arn)
     CfnOutput(self, "RedshiftIdentifier", value=redshift_cluster.cluster_name)
     CfnOutput(
         self,
         "RedshiftAddress",
         value=redshift_cluster.cluster_endpoint.hostname,
     )
     CfnOutput(self, "RedshiftPort", value=str(port))
     CfnOutput(self, "RedshiftDatabase", value=database)
     CfnOutput(self, "RedshiftSchema", value=schema)
     CfnOutput(self, "RedshiftRole", value=redshift_role.role_arn)
Esempio n. 29
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        # Create a VPC
        myvpc = ec2.Vpc(self, "CDKVPC", cidr=vars.cidr)

        # SG for ELB creation
        websitefrontendSG = ec2.SecurityGroup(
            self,
            'websitefrontendSG',
            vpc=myvpc,
            security_group_name='websitefrontendSG')
        websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'),
                                           connection=ec2.Port.tcp(80))
        websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'),
                                           connection=ec2.Port.tcp(443))

        # Create ALB in VPC
        alb = elb.ApplicationLoadBalancer(
            self,
            'websitefrontend-public',
            vpc=myvpc,
            load_balancer_name='websitefrontend-public',
            security_group=websitefrontendSG,
            internet_facing=True)

        # Add target group to ALB
        catalogtargetgroup = elb.ApplicationTargetGroup(
            self,
            'CatalogTargetGroup',
            port=80,
            vpc=myvpc,
            target_type=elb.TargetType.IP)

        if not vars.sslcert:
            # Add http listener to ALB
            alblistenerhttp = elb.ApplicationListener(
                self,
                'alblistenerhttp',
                load_balancer=alb,
                default_target_groups=[catalogtargetgroup],
                port=80)

        if vars.sslcert:
            # Add http listener to ALB
            alblistenerhttp = elb.ApplicationListener(self,
                                                      'alblistenerhttp',
                                                      load_balancer=alb,
                                                      port=80)
            elb.ApplicationListenerRule(self,
                                        'httpredirectionrule',
                                        listener=alblistenerhttp,
                                        redirect_response=elb.RedirectResponse(
                                            status_code='HTTP_301',
                                            port='443',
                                            protocol='HTTPS'))
            # OPTIONAL - Add https listener to ALB & attach certificate
            alblistenerhttps = elb.ApplicationListener(
                self,
                'alblistenerhttps',
                load_balancer=alb,
                default_target_groups=[catalogtargetgroup],
                port=443,
                certificate_arns=[vars.sslcert_arn])

            # OPTIONAL - Redirect HTTP to HTTPS
            alblistenerhttp.add_redirect_response(id='redirectionrule',
                                                  port='443',
                                                  status_code='HTTP_301',
                                                  protocol='HTTPS')

        if vars.customdomain:
            # OPTIONAL - Update DNS with ALB
            webshopxyz_zone = r53.HostedZone.from_hosted_zone_attributes(
                self,
                id='customdomain',
                hosted_zone_id=vars.hosted_zone_id,
                zone_name=vars.zone_name)
            webshop_root_record = r53.ARecord(
                self,
                'ALBAliasRecord',
                zone=webshopxyz_zone,
                target=r53.RecordTarget.from_alias(
                    alias.LoadBalancerTarget(alb)))

        # SG for ECS creation
        ECSSG = ec2.SecurityGroup(self,
                                  'ECSSecurityGroup',
                                  vpc=myvpc,
                                  security_group_name='ECS')
        ECSSG.add_ingress_rule(peer=websitefrontendSG,
                               connection=ec2.Port.tcp(80))

        # SG for MySQL creation
        MySQLSG = ec2.SecurityGroup(self,
                                    'DBSecurityGroup',
                                    vpc=myvpc,
                                    security_group_name='DB')
        MySQLSG.add_ingress_rule(peer=ECSSG, connection=ec2.Port.tcp(3306))

        # Create DB subnet group
        subnetlist = []
        for subnet in myvpc.private_subnets:
            subnetlist.append(subnet.subnet_id)
        subnetgr = rds.CfnDBSubnetGroup(
            self,
            'democlustersubnetgroup',
            db_subnet_group_name='democlustersubnetgroup',
            db_subnet_group_description='DemoCluster',
            subnet_ids=subnetlist)

        # Create secret db passwd
        secret = sm.SecretStringGenerator(
            exclude_characters="\"'@/\\",
            secret_string_template='{"username": "******"}',
            generate_string_key='password',
            password_length=40)
        dbpass = sm.Secret(self,
                           'democlusterpass',
                           secret_name='democlusterpass',
                           generate_secret_string=secret)

        # Create Aurora serverless MySQL instance
        dbcluster = rds.CfnDBCluster(
            self,
            'DemoCluster',
            engine='aurora',
            engine_mode='serverless',
            engine_version='5.6',
            db_cluster_identifier='DemoCluster',
            master_username=dbpass.secret_value_from_json(
                'username').to_string(),
            master_user_password=dbpass.secret_value_from_json(
                'password').to_string(),
            storage_encrypted=True,
            port=3306,
            vpc_security_group_ids=[MySQLSG.security_group_id],
            scaling_configuration=rds.CfnDBCluster.
            ScalingConfigurationProperty(auto_pause=True,
                                         max_capacity=4,
                                         min_capacity=1,
                                         seconds_until_auto_pause=300),
            db_subnet_group_name=subnetgr.db_subnet_group_name)
        dbcluster.add_override('DependsOn', 'democlustersubnetgroup')

        # Attach database to secret
        attach = sm.CfnSecretTargetAttachment(
            self,
            'RDSAttachment',
            secret_id=dbpass.secret_arn,
            target_id=dbcluster.ref,
            target_type='AWS::RDS::DBCluster')

        # Upload image into ECR repo
        ecrdemoimage = ecra.DockerImageAsset(self,
                                             'ecrdemoimage',
                                             directory='../',
                                             repository_name='demorepo',
                                             exclude=['cdk.out'])

        # Create ECS fargate cluster
        ecscluster = ecs.Cluster(self, "ecsCluster", vpc=myvpc)

        # Create task role for productsCatalogTask
        getsecretpolicystatement = iam.PolicyStatement(actions=[
            "secretsmanager:GetResourcePolicy",
            "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret",
            "secretsmanager:ListSecretVersionIds"
        ],
                                                       resources=[
                                                           dbpass.secret_arn
                                                       ],
                                                       effect=iam.Effect.ALLOW)
        getsecretpolicydocument = iam.PolicyDocument(
            statements=[getsecretpolicystatement])
        taskrole = iam.Role(
            self,
            'TaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'),
            role_name='TaskRoleforproductsCatalogTask',
            inline_policies=[getsecretpolicydocument])

        # Create task definition
        taskdefinition = ecs.FargateTaskDefinition(self,
                                                   'productsCatalogTask',
                                                   cpu=1024,
                                                   memory_limit_mib=2048,
                                                   task_role=taskrole)

        # Add container to task definition
        productscatalogcontainer = taskdefinition.add_container(
            'productscatalogcontainer',
            image=ecs.ContainerImage.from_docker_image_asset(
                asset=ecrdemoimage),
            environment={
                "region": vars.region,
                "secretname": "democlusterpass"
            })
        productscatalogcontainer.add_port_mappings(
            ecs.PortMapping(container_port=80, host_port=80))

        # Create service and associate it with the cluster
        catalogservice = ecs.FargateService(
            self,
            'catalogservice',
            task_definition=taskdefinition,
            assign_public_ip=False,
            security_group=ECSSG,
            vpc_subnets=ec2.SubnetSelection(subnets=myvpc.select_subnets(
                subnet_type=ec2.SubnetType.PRIVATE).subnets),
            cluster=ecscluster,
            desired_count=2)

        # Add autoscaling to the service
        scaling = catalogservice.auto_scale_task_count(max_capacity=20,
                                                       min_capacity=1)
        scaling.scale_on_cpu_utilization(
            'ScaleOnCPU',
            target_utilization_percent=70,
            scale_in_cooldown=core.Duration.seconds(amount=1),
            scale_out_cooldown=core.Duration.seconds(amount=0))

        # Associate the fargate service with load balancer targetgroup
        catalogservice.attach_to_application_target_group(catalogtargetgroup)
    def __init__(self, scope: core.Construct, construct_id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point
        #Access points allow multiple WordPress file systems to live on the same EFS Volume
        #The more data on an EFS volume the better it will preform
        #This provides a high level of security while also optimizing performance
        AccessPoint = props['file_system'].add_access_point(
            "local-access-point",
            path=f"/{props['IdentifierName']}",
            create_acl=efs.Acl(
                owner_uid=
                "100",  #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/
                owner_gid="101",
                permissions="0755"))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster
        cluster = ecs.Cluster(
            self,
            "Cluster",
            vpc=props['vpc'],
            container_insights=props['ecs_enable_container_insights'])

        #Get needed secrets
        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ssm/StringParameter.html?highlight=from_secure_string_parameter_attributes#aws_cdk.aws_ssm.StringParameter.from_secure_string_parameter_attributes
        # ParameterStoreTest = ssm.StringParameter.from_secure_string_parameter_attributes( self, "ParameterStoreTest",
        #     parameter_name="", #Remeber, KMS permissions for task execution role for parameter store key!
        #     version=1
        # )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Secret.html
        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_secretsmanager/SecretStringGenerator.html
        dbtest = {
            "database_name": '',
            "username": '',
            "host": str(props["rds_instance"].cluster_endpoint.hostname)
        }
        WordpressDbConnectionSecret = secretsmanager.Secret(
            self,
            "WordpressDbConnectionSecret",
            generate_secret_string=secretsmanager.SecretStringGenerator(
                secret_string_template=json.dumps(dbtest),
                generate_string_key="password",
                exclude_characters='/"'))

        #ToDO: Lambda call to populate secrets but only

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Volume.html#aws_cdk.aws_ecs.Volume
        WordpressEfsVolume = ecs.Volume(
            name="efs",
            efs_volume_configuration=ecs.EfsVolumeConfiguration(
                file_system_id=props['file_system'].file_system_id,
                transit_encryption="ENABLED",
                authorization_config=ecs.AuthorizationConfig(
                    access_point_id=AccessPoint.access_point_id)))

        #Create Task Definition
        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html
        WordpressTask = ecs.FargateTaskDefinition(
            self,
            "TaskDefinition",
            cpu=props['ecs_cpu_size'],
            memory_limit_mib=props['ecs_memory_size'],
            volumes=[WordpressEfsVolume])

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container
        WordpressContainer = WordpressTask.add_container(
            "Wordpress",
            image=ecs.ContainerImage.from_ecr_repository(
                repository=ecr.Repository.from_repository_name(
                    self,
                    "wpimage",
                    repository_name=props['ecs_container_repo_name']),
                tag=props['ecs_container_tag']),
            logging=ecs.LogDriver.aws_logs(
                stream_prefix="container",
                #log_group = "{props['environment']}/{props['unit']}/{props['application']}", #ToDo make sure I like log group name
                log_retention=logs.RetentionDays(
                    props['ecs_log_retention_period'])),
            environment={
                "TROUBLESHOOTING_MODE_ENABLED":
                props['TROUBLESHOOTING_MODE_ENABLED']
            },
            secrets={
                # "PARAMETERSTORETEST": ecs.Secret.from_ssm_parameter( ParameterStoreTest ),
                "DBHOST":
                ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret,
                                                "host"),
                "DBUSER":
                ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret,
                                                "username"),
                "DBUSERPASS":
                ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret,
                                                "password"),
                "DBNAME":
                ecs.Secret.from_secrets_manager(WordpressDbConnectionSecret,
                                                "database_name")
            },
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ContainerDefinition.html?highlight=add_port_mappings#aws_cdk.aws_ecs.ContainerDefinition.add_port_mappings
        WordpressContainer.add_port_mappings(
            ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ContainerDefinition.html?highlight=add_port_mappings#aws_cdk.aws_ecs.ContainerDefinition.add_port_mappings
        #https://gist.github.com/phillippbertram/ee312b09c3982d76b9799653ed6d6201
        WordpressContainer.add_mount_points(
            ecs.MountPoint(container_path=props['ecs_container_efs_path'],
                           read_only=False,
                           source_volume=WordpressEfsVolume.name))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs_patterns/ApplicationLoadBalancedFargateService.html
        EcsService = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "EcsService",
            cluster=cluster,
            desired_count=props['ecs_container_desired_count'],
            task_definition=WordpressTask,
            enable_ecs_managed_tags=True,
            public_load_balancer=True,
            domain_name=props['domain_name'],
            domain_zone=route53.HostedZone.from_hosted_zone_attributes(
                self,
                "hostedZone",
                hosted_zone_id=props['domain_zone'],
                zone_name=props['zone_name']),
            listener_port=443,
            redirect_http=True,
            protocol=elasticloadbalancingv2.ApplicationProtocol("HTTPS"),
            target_protocol=elasticloadbalancingv2.ApplicationProtocol("HTTP"),
            platform_version=ecs.FargatePlatformVersion(
                "VERSION1_4"),  #Required for EFS
            security_groups=[
                ec2.SecurityGroup.from_security_group_id(
                    self,
                    "EcsToRdsSeurityGroup",
                    security_group_id=props["EcsToRdsSeurityGroup"].
                    security_group_id)
            ],
        )

        #https://gist.github.com/phillippbertram/ee312b09c3982d76b9799653ed6d6201
        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Connections.html#aws_cdk.aws_ec2.Connections
        EcsService.service.connections.allow_to(
            props['file_system'],
            ec2.Port.tcp(2049))  #Open hole to ECS in EFS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_elasticloadbalancingv2/ApplicationTargetGroup.html#aws_cdk.aws_elasticloadbalancingv2.ApplicationTargetGroup.set_attribute
        EcsService.target_group.set_attribute(
            key="load_balancing.algorithm.type",
            value="least_outstanding_requests")
        EcsService.target_group.set_attribute(
            key="deregistration_delay.timeout_seconds", value="30")
        EcsService.target_group.configure_health_check(
            healthy_threshold_count=5,  #2-10
            timeout=core.Duration.seconds(29),
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html#aws_cdk.aws_ecs.FargateService.auto_scale_task_count
        ECSAutoScaler = EcsService.service.auto_scale_task_count(
            max_capacity=props['ecs_container_max_count'],
            min_capacity=props['ecs_container_min_count'])

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/ScalableTaskCount.html#aws_cdk.aws_ecs.ScalableTaskCount
        ECSAutoScaler.scale_on_cpu_utilization(
            "cpuScale",
            target_utilization_percent=80,
            scale_out_cooldown=core.Duration.seconds(30),
            scale_in_cooldown=core.Duration.seconds(60))
        ECSAutoScaler.scale_on_memory_utilization(
            "memScale",
            target_utilization_percent=80,
            scale_out_cooldown=core.Duration.seconds(30),
            scale_in_cooldown=core.Duration.seconds(60))