def rds_aurora_mysql_2_08_1(self):
     rds_aurora_mysql_2_08_1 = rds.ParameterGroup(
         self,
         "rds-auroraParamGrp-2-08-01",
         engine=rds.DatabaseClusterEngine.aurora_mysql(
             version=rds.AuroraMysqlEngineVersion.VER_2_08_1))
     rds_aurora_mysql_2_08_1.add_parameter("character_set_client", "utf8")
     return rds_aurora_mysql_2_08_1
 def rds_aurora_postgres_11_6(self):
     rds_aurora_postgres_11_6 = rds.ParameterGroup(
         self,
         "rds-auroraParamGrp-pg-11-6",
         engine=rds.DatabaseClusterEngine.aurora_postgres(
             version=rds.AuroraPostgresEngineVersion.VER_11_6))
     rds_aurora_postgres_11_6.add_parameter("client_encoding", "UTF8")
     return rds_aurora_postgres_11_6
    def __init__(self, scope: core.Construct, **kwargs) -> None:
        self.deploy_env = active_environment
        super().__init__(scope, id=f"{self.deploy_env.value}-common-stack", **kwargs)

        self.custom_vpc = ec2.Vpc(self, f"vpc-{self.deploy_env.value}")

        self.orders_rds_sg = ec2.SecurityGroup(
            self,
            f"orders-{self.deploy_env.value}-sg",
            vpc=self.custom_vpc,
            allow_all_outbound=True,
            security_group_name=f"orders-{self.deploy_env.value}-sg",
        )

        self.orders_rds_sg.add_ingress_rule(
            peer=ec2.Peer.ipv4("0.0.0.0/0"), connection=ec2.Port.tcp(5432)
        )

        for subnet in self.custom_vpc.private_subnets:
            self.orders_rds_sg.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block), connection=ec2.Port.tcp(5432)
            )

        self.orders_rds_parameter_group = rds.ParameterGroup(
            self,
            f"orders-{self.deploy_env.value}-rds-parameter-group",
            description="Parameter group to allow CDC from RDS using DMS.",
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_12_4
            ),
            parameters={"rds.logical_replication": "1", "wal_sender_timeout": "0"},
        )

        self.orders_rds = rds.DatabaseInstance(
            self,
            f"orders-{self.deploy_env.value}-rds",
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_12_4
            ),
            database_name="orders",
            instance_type=ec2.InstanceType("t3.micro"),
            vpc=self.custom_vpc,
            instance_identifier=f"rds-{self.deploy_env.value}-orders-db",
            port=5432,
            vpc_placement=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            subnet_group=rds.SubnetGroup(
                self,
                f"rds-{self.deploy_env.value}-subnet",
                description="place RDS on public subnet",
                vpc=self.custom_vpc,
                vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            ),
            parameter_group=self.orders_rds_parameter_group,
            security_groups=[self.orders_rds_sg],
            removal_policy=core.RemovalPolicy.DESTROY,
            **kwargs,
        )
Example #4
0
    def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc,
                 security_group: ec2.SecurityGroup, config: dict,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vdb_rds_params = rds.ParameterGroup(self,
                                            id='rds-pg-vdb-cdk',
                                            family='postgres11',
                                            parameters={
                                                'autovacuum': '1',
                                                'autovacuum_work_mem': '-1',
                                                'autovacuum_max_workers': '3',
                                                'huge_pages': 'on',
                                                'log_min_duration_statement':
                                                '1000',
                                                'track_counts': '1',
                                                'maintenance_work_mem':
                                                '524288',
                                                'shared_buffers': '262144',
                                                'seq_page_cost': '1',
                                                'random_page_cost': '2',
                                                'min_wal_size': '512',
                                                'max_wal_size': '4096',
                                                'wal_compression': '1',
                                                'work_mem': '262144',
                                                'temp_file_limit': '10485760',
                                                'effective_cache_size':
                                                '786432'
                                            })

        self.vdb_rds = rds.DatabaseInstance(
            self,
            id='VdbCdk',
            database_name=config['DATABASE_NAME'],
            instance_identifier='vdb-prod-cdk',
            master_username=config['DATABASE_USER'],
            master_user_password=core.SecretValue(
                value=config['DATABASE_PASSWORD']),
            port=5432,
            engine=rds.DatabaseInstanceEngine.POSTGRES,
            engine_version='11.6',
            instance_class=ec2.InstanceType('t3.large'),
            allocated_storage=100,
            storage_encrypted=False,
            multi_az=False,
            storage_type=rds.StorageType.GP2,
            allow_major_version_upgrade=False,
            auto_minor_version_upgrade=False,
            preferred_maintenance_window='sun:02:00-sun:04:00',
            copy_tags_to_snapshot=True,
            backup_retention=core.Duration.days(7),
            preferred_backup_window='04:00-06:00',
            parameter_group=vdb_rds_params,
            vpc=vpc,
            security_groups=[security_group])
    def __init__(self, scope: core.Construct, id: str, bmt_vpc: ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        db_subnet_group = rds.SubnetGroup(
            self,
            'Aurora',
            description='aurora subnet group',
            vpc=bmt_vpc,
            removal_policy=core.RemovalPolicy.DESTROY,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED))

        db_security_group = ec2.SecurityGroup(self, 'aurora-sg', vpc=bmt_vpc)

        db_security_group.add_ingress_rule(
            peer=ec2.Peer.ipv4('10.100.0.0/16'),
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="to allow from the vpc internal",
                from_port=3306,
                to_port=3306))

        param_group = rds.ParameterGroup(
            self,
            'bmt-aurora-param',
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL)
        param_group.add_parameter("performance_schema", "1")

        rds.DatabaseCluster(
            self,
            'bmt-aurora-cluster',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_1),
            instance_props=rds.InstanceProps(
                vpc=bmt_vpc,
                instance_type=ec2.InstanceType.of(
                    instance_class=ec2.InstanceClass.BURSTABLE3,
                    instance_size=ec2.InstanceSize.MEDIUM),
                security_groups=[db_security_group]),
            instances=1,
            subnet_group=db_subnet_group,
            parameter_group=param_group,
            removal_policy=core.RemovalPolicy.DESTROY)
 def _setup_postgresql(self) -> None:
     port = 3306
     database = "postgres"
     schema = "public"
     pg = rds.ParameterGroup(
         self,
         "aws-data-wrangler-postgresql-params",
         engine=rds.DatabaseClusterEngine.aurora_postgres(
             version=rds.AuroraPostgresEngineVersion.VER_11_13,
         ),
         parameters={
             "apg_plan_mgmt.capture_plan_baselines": "off",
         },
     )
     aurora_pg = rds.DatabaseCluster(
         self,
         "aws-data-wrangler-aurora-cluster-postgresql",
         removal_policy=RemovalPolicy.DESTROY,
         engine=rds.DatabaseClusterEngine.aurora_postgres(
             version=rds.AuroraPostgresEngineVersion.VER_11_13,
         ),
         cluster_identifier="postgresql-cluster-wrangler",
         instances=1,
         credentials=rds.Credentials.from_password(
             username=self.db_username,
             password=self.db_password_secret,
         ),
         port=port,
         backup=rds.BackupProps(retention=Duration.days(1)),
         parameter_group=pg,
         s3_import_buckets=[self.bucket],
         s3_export_buckets=[self.bucket],
         instance_props=rds.InstanceProps(
             vpc=self.vpc,
             security_groups=[self.db_security_group],
             publicly_accessible=True,
         ),
         subnet_group=self.rds_subnet_group,
     )
     glue.Connection(
         self,
         "aws-data-wrangler-postgresql-glue-connection",
         description="Connect to Aurora (PostgreSQL).",
         type=glue.ConnectionType.JDBC,
         connection_name="aws-data-wrangler-postgresql",
         properties={
             "JDBC_CONNECTION_URL": f"jdbc:postgresql://{aurora_pg.cluster_endpoint.hostname}:{port}/{database}",
             "USERNAME": self.db_username,
             "PASSWORD": self.db_password,
         },
         subnet=self.vpc.private_subnets[0],
         security_groups=[self.db_security_group],
     )
     secrets.Secret(
         self,
         "aws-data-wrangler-postgresql-secret",
         secret_name="aws-data-wrangler/postgresql",
         description="Postgresql credentials",
         generate_secret_string=secrets.SecretStringGenerator(
             generate_string_key="dummy",
             secret_string_template=json.dumps(
                 {
                     "username": self.db_username,
                     "password": self.db_password,
                     "engine": "postgresql",
                     "host": aurora_pg.cluster_endpoint.hostname,
                     "port": port,
                     "dbClusterIdentifier": aurora_pg.cluster_identifier,
                     "dbname": database,
                 }
             ),
         ),
     )
     CfnOutput(self, "PostgresqlAddress", value=aurora_pg.cluster_endpoint.hostname)
     CfnOutput(self, "PostgresqlPort", value=str(port))
     CfnOutput(self, "PostgresqlDatabase", value=database)
     CfnOutput(self, "PostgresqlSchema", value=schema)
Example #7
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        vpc_name = self.node.try_get_context("vpc_name")
        vpc = aws_ec2.Vpc.from_lookup(self,
                                      "ExistingVPC",
                                      is_default=True,
                                      vpc_name=vpc_name)

        sg_use_mysql = aws_ec2.SecurityGroup(
            self,
            'MySQLClientSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for mysql client',
            security_group_name='use-mysql-sg')
        core.Tags.of(sg_use_mysql).add('Name', 'mysql-client-sg')

        sg_mysql_server = aws_ec2.SecurityGroup(
            self,
            'MySQLServerSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for mysql',
            security_group_name='mysql-server-sg')
        sg_mysql_server.add_ingress_rule(peer=sg_use_mysql,
                                         connection=aws_ec2.Port.tcp(3306),
                                         description='use-mysql-sg')
        core.Tags.of(sg_mysql_server).add('Name', 'mysql-server-sg')

        rds_subnet_group = aws_rds.SubnetGroup(
            self,
            'RdsSubnetGroup',
            description='subnet group for mysql',
            subnet_group_name='aurora-mysql',
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PRIVATE),
            vpc=vpc)

        rds_engine = aws_rds.DatabaseClusterEngine.aurora_mysql(
            version=aws_rds.AuroraMysqlEngineVersion.VER_2_08_1)

        rds_cluster_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraMySQLClusterParamGroup',
            engine=rds_engine,
            description='Custom cluster parameter group for aurora-mysql5.7',
            parameters={
                'innodb_flush_log_at_trx_commit': '2',
                'slow_query_log': '1',
                'tx_isolation': 'READ-COMMITTED',
                'wait_timeout': '300',
                'character-set-client-handshake': '0',
                'character_set_server': 'utf8mb4',
                'collation_server': 'utf8mb4_unicode_ci',
                'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
            })

        rds_db_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraMySQLDBParamGroup',
            engine=rds_engine,
            description='Custom parameter group for aurora-mysql5.7',
            parameters={
                'slow_query_log': '1',
                'tx_isolation': 'READ-COMMITTED',
                'wait_timeout': '300',
                'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
            })

        db_cluster_name = self.node.try_get_context('db_cluster_name')
        #    #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name
        #    #XXX: therefore, first create Secret and then use it to create database
        #    db_secret_name = self.node.try_get_context('db_secret_name')
        #    #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
        #    db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format(
        #      region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID, resource_name=db_secret_name)
        #    db_secret = aws_secretsmanager.Secret.from_secret_arn(self, 'DBSecretFromArn', db_secret_arn)
        #    rds_credentials = aws_rds.Credentials.from_secret(db_secret)
        rds_credentials = aws_rds.Credentials.from_generated_secret("admin")
        db_cluster = aws_rds.DatabaseCluster(
            self,
            'Database',
            engine=rds_engine,
            credentials=rds_credentials,
            instance_props={
                'instance_type':
                aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3,
                                        aws_ec2.InstanceSize.MEDIUM),
                'parameter_group':
                rds_db_param_group,
                'vpc_subnets': {
                    'subnet_type': aws_ec2.SubnetType.PRIVATE
                },
                'vpc':
                vpc,
                'auto_minor_version_upgrade':
                False,
                'security_groups': [sg_mysql_server]
            },
            instances=2,
            parameter_group=rds_cluster_param_group,
            cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS,
            cluster_identifier=db_cluster_name,
            subnet_group=rds_subnet_group,
            backup=aws_rds.BackupProps(retention=core.Duration.days(3),
                                       preferred_window="03:00-04:00"))

        sagemaker_notebook_role_policy_doc = aws_iam.PolicyDocument()
        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect": aws_iam.Effect.ALLOW,
                    "resources": [db_cluster.secret.secret_full_arn],
                    "actions": ["secretsmanager:GetSecretValue"]
                }))

        sagemaker_notebook_role = aws_iam.Role(
            self,
            'SageMakerNotebookRoleForRDS',
            role_name='AWSSageMakerNotebookRoleForRDS',
            assumed_by=aws_iam.ServicePrincipal('sagemaker.amazonaws.com'),
            inline_policies={
                'AuroraMySQLSecretPolicy': sagemaker_notebook_role_policy_doc
            })

        cf_readonly_access_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name(
            'AWSCloudFormationReadOnlyAccess')
        sagemaker_notebook_role.add_managed_policy(cf_readonly_access_policy)

        #XXX: skip downloading rds-combined-ca-bundle.pem if not use SSL with a MySQL DB instance
        # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.SSLSupport
        rds_wb_lifecycle_content = '''#!/bin/bash
sudo -u ec2-user -i <<'EOF'
echo "export AWS_REGION={AWS_Region}" >> ~/.bashrc
source /home/ec2-user/anaconda3/bin/activate python3
pip install --upgrade ipython-sql
pip install --upgrade PyMySQL 
pip install --upgrade pretty_errors
source /home/ec2-user/anaconda3/bin/deactivate
cd /home/ec2-user/SageMaker
wget -N https://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem
wget -N https://raw.githubusercontent.com/ksmin23/my-aws-cdk-examples/main/rds/sagemaker-aurora_mysql/ipython-sql.ipynb
EOF
'''.format(AWS_Region=core.Aws.REGION)

        rds_wb_lifecycle_config_prop = aws_sagemaker.CfnNotebookInstanceLifecycleConfig.NotebookInstanceLifecycleHookProperty(
            content=core.Fn.base64(rds_wb_lifecycle_content))

        rds_wb_lifecycle_config = aws_sagemaker.CfnNotebookInstanceLifecycleConfig(
            self,
            'MySQLWorkbenchLifeCycleConfig',
            notebook_instance_lifecycle_config_name=
            'MySQLWorkbenchLifeCycleConfig',
            on_start=[rds_wb_lifecycle_config_prop])

        rds_workbench = aws_sagemaker.CfnNotebookInstance(
            self,
            'AuroraMySQLWorkbench',
            instance_type='ml.t3.xlarge',
            role_arn=sagemaker_notebook_role.role_arn,
            lifecycle_config_name=rds_wb_lifecycle_config.
            notebook_instance_lifecycle_config_name,
            notebook_instance_name='AuroraMySQLWorkbench',
            root_access='Disabled',
            security_group_ids=[sg_use_mysql.security_group_name],
            subnet_id=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids[0])

        core.CfnOutput(self,
                       'StackName',
                       value=self.stack_name,
                       export_name='StackName')
        core.CfnOutput(self, 'VpcId', value=vpc.vpc_id, export_name='VpcId')

        core.CfnOutput(self,
                       'DBClusterName',
                       value=db_cluster.cluster_identifier,
                       export_name='DBClusterName')
        core.CfnOutput(self,
                       'DBCluster',
                       value=db_cluster.cluster_endpoint.socket_address,
                       export_name='DBCluster')
        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_secretsmanager/README.html
        # secret_arn="arn:aws:secretsmanager:<region>:<account-id-number>:secret:<secret-name>-<random-6-characters>",
        core.CfnOutput(self,
                       'DBSecret',
                       value=db_cluster.secret.secret_name,
                       export_name='DBSecret')

        core.CfnOutput(self,
                       'SageMakerRole',
                       value=sagemaker_notebook_role.role_name,
                       export_name='SageMakerRole')
        core.CfnOutput(self,
                       'SageMakerNotebookInstance',
                       value=rds_workbench.notebook_instance_name,
                       export_name='SageMakerNotebookInstance')
        core.CfnOutput(self,
                       'SageMakerNotebookInstanceLifecycleConfig',
                       value=rds_workbench.lifecycle_config_name,
                       export_name='SageMakerNotebookInstanceLifecycleConfig')
Example #8
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self._table = ddb.Table(self,
                                'AirTicketOrder',
                                partition_key={
                                    'name': 'customer_id',
                                    'type': ddb.AttributeType.STRING
                                },
                                stream=ddb.StreamViewType.NEW_AND_OLD_IMAGES,
                                removal_policy=core.RemovalPolicy.DESTROY)

        self.lambda_cmd = _lambda.Function(
            self,
            'CommandDDBSaver',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("./lambda/cmd/"),
            handler="cmd.lambda_handler",
            environment={
                "ORDER_TABLE_NAME": self._table.table_name,
            })

        self._table.grant_read_write_data(self.lambda_cmd)

        # Allow Command lambda to invoke other lambda
        self.lambda_cmd.add_to_role_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=["*"],
                                actions=["lambda:InvokeFunction"]))

        api = apigw.LambdaRestApi(
            self,
            "CommandEndPoint",
            handler=self.lambda_cmd,
        )

        # TODO: 因为2个AZ,可以只生命一个公网和一个私网,这样X2 AZ就会2 pub + 2 pri
        # Lambda access RDS Aurora MySQL requires VPC for security and perf
        vpc = ec2.Vpc(
            self,
            'air-ticket',
            cidr="10.125.0.0/16",
            max_azs=2,
            nat_gateways=1,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="public1",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(name="public2",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(name="private1",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.PRIVATE),
                ec2.SubnetConfiguration(name="private2",
                                        cidr_mask=24,
                                        subnet_type=ec2.SubnetType.PRIVATE)
            ])

        query_lambda_sg = ec2.SecurityGroup(
            self,
            'Query-Lambda-SG',
            vpc=vpc,
            description="Allows DB connections from Query Lambda SG",
        )

        sink_lambda_sg = ec2.SecurityGroup(
            self,
            'RDS-Sink-Lambda-SG',
            vpc=vpc,
            description="Allows DB connections from Sink Lambda SG",
        )

        db_name = "Demo"
        db_user_name = 'admin'
        db_user_passowrd = 'password'

        parameter_group = rds.ParameterGroup(self,
                                             "ParameterGroup",
                                             family="mysql5.7",
                                             parameters={})
        aurora_db = rds.DatabaseInstance(
            self,
            "air-ticket-db",
            master_user_password=core.SecretValue.ssm_secure(
                'AirTicket.AdminPass', version='1'),
            master_username=db_user_name,
            engine=rds.DatabaseInstanceEngine.MYSQL,
            engine_version="5.7",
            parameter_group=parameter_group,
            vpc=vpc,
            # Disable deletion protection for auto deletion
            deletion_protection=False,
            instance_class=ec2.InstanceType.of(ec2.InstanceClass.MEMORY5,
                                               ec2.InstanceSize.XLARGE),
            removal_policy=core.RemovalPolicy.DESTROY)

        self._query_handler = _lambda.Function(
            self,
            'QueryHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("./lambda/query/"),
            handler="query.lambda_handler",
            timeout=core.Duration.seconds(60),
            vpc=vpc,
            security_group=query_lambda_sg,
            environment={
                "AuroraEndpoint": aurora_db.db_instance_endpoint_address,
                "dbName": db_name,
                "dbPassword": db_user_passowrd,
                "dbUser": db_user_name
            })

        query_api = apigw.LambdaRestApi(
            self,
            "Query",
            handler=self._query_handler,
        )

        # Init DB Lambda
        self.lambda_init = _lambda.Function(
            self,
            'InitDBHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("./lambda/initdb/"),
            handler="init.lambda_handler",
            timeout=core.Duration.seconds(60),
            vpc=vpc,
            security_group=query_lambda_sg,
            environment={
                "AuroraEndpoint": aurora_db.db_instance_endpoint_address,
                "dbName": db_name,
                "dbPassword": db_user_passowrd,
                "dbUser": db_user_name
            })

        self.lambda_cmd.add_environment('INITDB_LAMBDA_NAME',
                                        self.lambda_init.function_name)

        # Create stream for fan-out
        stream_name = 'kinesis-stream-for-fanout'

        # Sync DDB stream delta to RDS Lambda
        self.lambda_sync = _lambda.Function(
            self,
            'SyncHandler',
            runtime=_lambda.Runtime.PYTHON_3_7,
            code=_lambda.Code.asset("./lambda/sync/"),
            handler="sync.lambda_handler",
            timeout=core.Duration.seconds(60),
            vpc=vpc,
            security_group=query_lambda_sg,
            environment={"streamName": stream_name})

        # Add DDB stream trigger to sync lambda
        self.lambda_sync.add_event_source(
            event_sources.DynamoEventSource(
                self._table,
                starting_position=_lambda.StartingPosition.TRIM_HORIZON))

        self._table.grant_stream_read(self.lambda_sync)

        # Allow init/sync lambda access MySQL
        aurora_db.connections.allow_from(
            query_lambda_sg,
            ec2.Port.tcp(3306),
            "Allow MySQL access from Query Lambda (because Aurora actually exposes PostgreSQL/MySQL on port 3306)",
        )

        aurora_db.connections.allow_from(
            sink_lambda_sg,
            ec2.Port.tcp(3306),
            "Allow MySQL access from Sink Lambda (because Aurora actually exposes PostgreSQL/MySQL on port 3306)",
        )

        strm = kinesis.Stream(self,
                              'kinesis-stream-for-fanout',
                              stream_name=stream_name)

        # Create RDS Sink Lambda
        self.lambda_rds_sink = _lambda.Function(
            self,
            'RDS_SINK_1',
            handler='rds_sinker.lambda_handler',
            code=_lambda.Code.asset("./lambda/sink/"),
            runtime=_lambda.Runtime.PYTHON_3_7,
            timeout=core.Duration.seconds(300),
            vpc=vpc,
            security_group=sink_lambda_sg,
            environment={
                "AuroraEndpoint": aurora_db.db_instance_endpoint_address,
                "dbName": db_name,
                "dbPassword": db_user_passowrd,
                "dbUser": db_user_name
            })

        # Update Lambda Permissions To Use Stream
        strm.grant_read_write(self.lambda_sync)
        strm.grant_read(self.lambda_rds_sink)

        stream_consumer = kinesis.CfnStreamConsumer(
            self,
            'lambda-efo-consumer-id',
            consumer_name='lambda-efo-consumer',
            stream_arn=strm.stream_arn)

        e_s_mappnig = _lambda.EventSourceMapping(
            self,
            'lambda-efo-consumer-event-source-mapping',
            target=self.lambda_rds_sink,
            event_source_arn=stream_consumer.stream_arn,
            batch_size=1,
            starting_position=_lambda.StartingPosition.TRIM_HORIZON,
        )

        # self.lambda_rds_sink.add_event_source_mapping(e_s_mappnig)

        # CDK below create lambda as a standand Kinesis consumer instead of EFO
        #
        # # Create New Kinesis Event Source
        # kinesis_stream_event_source = event_sources.KinesisEventSource(
        #     stream=strm,
        #     starting_position=_lambda.StartingPosition.TRIM_HORIZON,
        #     batch_size=1
        # )

        # # Attach New Event Source To Lambda
        # self.lambda_rds_sink.add_event_source(kinesis_stream_event_source)

        # Create dead letter queue and grant send permission to sync/sink lambda
        self._queue = sqs.Queue(
            self,
            "DeadLetterQueue",

            #Amazon SQS sets a visibility timeout, a period of time during which Amazon
            # SQS prevents other consumers from receiving and processing the message.
            # The default visibility timeout for a message is 30 seconds.
            # The minimum is 0 seconds. The maximum is 12 hours.
            visibility_timeout=core.Duration.seconds(300),
        )

        self._queue.grant_send_messages(self.lambda_sync)
        self._queue.grant_send_messages(self.lambda_rds_sink)

        self.lambda_sync.add_environment("DLQ_NAME", self._queue.queue_name)
Example #9
0
  def __init__(self, scope: Construct, id: str, **kwargs) -> None:
    super().__init__(scope, id, **kwargs)

    vpc_name = self.node.try_get_context("vpc_name")
    vpc = aws_ec2.Vpc.from_lookup(self, "ExistingVPC",
      is_default=True,
      vpc_name=vpc_name)

    sg_use_mysql = aws_ec2.SecurityGroup(self, 'MySQLClientSG',
      vpc=vpc,
      allow_all_outbound=True,
      description='security group for mysql client',
      security_group_name='default-mysql-client-sg'
    )
    cdk.Tags.of(sg_use_mysql).add('Name', 'default-mysql-client-sg')

    sg_mysql_server = aws_ec2.SecurityGroup(self, 'MySQLServerSG',
      vpc=vpc,
      allow_all_outbound=True,
      description='security group for mysql',
      security_group_name='default-mysql-server-sg'
    )
    sg_mysql_server.add_ingress_rule(peer=sg_use_mysql, connection=aws_ec2.Port.tcp(3306),
      description='default-mysql-client-sg')
    sg_mysql_server.add_ingress_rule(peer=sg_mysql_server, connection=aws_ec2.Port.all_tcp(),
      description='default-mysql-server-sg')
    cdk.Tags.of(sg_mysql_server).add('Name', 'default-mysql-server-sg')

    rds_subnet_group = aws_rds.SubnetGroup(self, 'MySQLSubnetGroup',
      description='subnet group for mysql',
      subnet_group_name='aurora-mysql',
      vpc_subnets=aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT),
      vpc=vpc
    )

    rds_engine = aws_rds.DatabaseClusterEngine.aurora_mysql(version=aws_rds.AuroraMysqlEngineVersion.VER_3_01_0)

    #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Reference.html#AuroraMySQL.Reference.Parameters.Cluster
    rds_cluster_param_group = aws_rds.ParameterGroup(self, 'AuroraMySQLClusterParamGroup',
      engine=rds_engine,
      description='Custom cluster parameter group for aurora-mysql8.x',
      parameters={
        # For Aurora MySQL version 3, Aurora always uses the default value of 1.
        # 'innodb_flush_log_at_trx_commit': '2',
        'slow_query_log': '1',
        # Removed from Aurora MySQL version 3.
        # 'tx_isolation': 'READ-COMMITTED',
        'wait_timeout': '300',
        'character-set-client-handshake': '0',
        'character_set_server': 'utf8mb4',
        'collation_server': 'utf8mb4_unicode_ci',
        'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
      }
    )

    #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Reference.html#AuroraMySQL.Reference.Parameters.Instance
    rds_db_param_group = aws_rds.ParameterGroup(self, 'AuroraMySQLDBParamGroup',
      engine=rds_engine,
      description='Custom parameter group for aurora-mysql8.x',
      parameters={
        'slow_query_log': '1',
        # Removed from Aurora MySQL version 3.
        # 'tx_isolation': 'READ-COMMITTED',
        'wait_timeout': '300',
        'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
      }
    )

    db_cluster_name = self.node.try_get_context('db_cluster_name')
    #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name
    # therefore, first create Secret and then use it to create database
    db_secret_name = self.node.try_get_context('db_secret_name')
    #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
    db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format(
      region=cdk.Aws.REGION, account=cdk.Aws.ACCOUNT_ID, resource_name=db_secret_name)
    db_secret = aws_secretsmanager.Secret.from_secret_partial_arn(self, 'DBSecretFromArn', db_secret_arn)
    rds_credentials = aws_rds.Credentials.from_secret(db_secret)

    db_cluster = aws_rds.DatabaseCluster(self, 'Database',
      engine=rds_engine,
      credentials=rds_credentials, # A username of 'admin' (or 'postgres' for PostgreSQL) and SecretsManager-generated password
      instance_props={
        'instance_type': aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM),
        'parameter_group': rds_db_param_group,
        'vpc_subnets': {
          'subnet_type': aws_ec2.SubnetType.PRIVATE_WITH_NAT
        },
        'vpc': vpc,
        'auto_minor_version_upgrade': False,
        'security_groups': [sg_mysql_server]
      },
      instances=2,
      parameter_group=rds_cluster_param_group,
      cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS,
      cluster_identifier=db_cluster_name,
      subnet_group=rds_subnet_group,
      backup=aws_rds.BackupProps(
        retention=cdk.Duration.days(3),
        preferred_window="03:00-04:00"
      )
    )

    cdk.CfnOutput(self, 'DBClusterEndpoint', value=db_cluster.cluster_endpoint.socket_address, export_name='DBClusterEndpoint')
    cdk.CfnOutput(self, 'DBClusterReadEndpoint', value=db_cluster.cluster_read_endpoint.socket_address, export_name='DBClusterReadEndpoint')
Example #10
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        vpc_name = self.node.try_get_context("vpc_name")
        vpc = aws_ec2.Vpc.from_lookup(self,
                                      "ExistingVPC",
                                      is_default=True,
                                      vpc_name=vpc_name)

        sg_use_mysql = aws_ec2.SecurityGroup(
            self,
            'MySQLClientSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for mysql client',
            security_group_name='use-default-mysql')
        core.Tags.of(sg_use_mysql).add('Name', 'use-default-mysql')

        sg_mysql_server = aws_ec2.SecurityGroup(
            self,
            'MySQLServerSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for mysql',
            security_group_name='default-mysql-server')
        sg_mysql_server.add_ingress_rule(peer=sg_use_mysql,
                                         connection=aws_ec2.Port.tcp(3306),
                                         description='use-default-mysql')
        core.Tags.of(sg_mysql_server).add('Name', 'mysql-server')

        rds_subnet_group = aws_rds.SubnetGroup(
            self,
            'RdsSubnetGroup',
            description='subnet group for mysql',
            subnet_group_name='aurora-mysql',
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PRIVATE),
            vpc=vpc)

        rds_engine = aws_rds.DatabaseClusterEngine.aurora_mysql(
            version=aws_rds.AuroraMysqlEngineVersion.VER_2_08_1)

        rds_cluster_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraMySQLClusterParamGroup',
            engine=rds_engine,
            description='Custom cluster parameter group for aurora-mysql5.7',
            parameters={
                'innodb_flush_log_at_trx_commit': '2',
                'slow_query_log': '1',
                'tx_isolation': 'READ-COMMITTED',
                'wait_timeout': '300',
                'character-set-client-handshake': '0',
                'character_set_server': 'utf8mb4',
                'collation_server': 'utf8mb4_unicode_ci',
                'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
            })

        rds_db_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraMySQLDBParamGroup',
            engine=rds_engine,
            description='Custom parameter group for aurora-mysql5.7',
            parameters={
                'slow_query_log': '1',
                'tx_isolation': 'READ-COMMITTED',
                'wait_timeout': '300',
                'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
            })

        db_cluster_name = self.node.try_get_context('db_cluster_name')
        #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name
        #XXX: therefore, first create Secret and then use it to create database
        db_secret_name = self.node.try_get_context('db_secret_name')
        #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
        db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format(
            region=core.Aws.REGION,
            account=core.Aws.ACCOUNT_ID,
            resource_name=db_secret_name)
        db_secret = aws_secretsmanager.Secret.from_secret_arn(
            self, 'DBSecretFromArn', db_secret_arn)
        rds_credentials = aws_rds.Credentials.from_secret(db_secret)

        db_cluster = aws_rds.DatabaseCluster(
            self,
            'Database',
            engine=rds_engine,
            credentials=rds_credentials,
            instance_props={
                'instance_type':
                aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3,
                                        aws_ec2.InstanceSize.MEDIUM),
                'parameter_group':
                rds_db_param_group,
                'vpc_subnets': {
                    'subnet_type': aws_ec2.SubnetType.PRIVATE
                },
                'vpc':
                vpc,
                'auto_minor_version_upgrade':
                False,
                'security_groups': [sg_mysql_server]
            },
            instances=2,
            parameter_group=rds_cluster_param_group,
            cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS,
            cluster_identifier=db_cluster_name,
            subnet_group=rds_subnet_group,
            backup=aws_rds.BackupProps(retention=core.Duration.days(3),
                                       preferred_window="03:00-04:00"))

        sg_mysql_public_proxy = aws_ec2.SecurityGroup(
            self,
            'MySQLPublicProxySG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for mysql public proxy',
            security_group_name='default-mysql-public-proxy')
        sg_mysql_public_proxy.add_ingress_rule(
            peer=aws_ec2.Peer.any_ipv4(),
            connection=aws_ec2.Port.tcp(3306),
            description='mysql public proxy')
        core.Tags.of(sg_mysql_public_proxy).add('Name', 'mysql-public-proxy')

        #XXX: Datbase Proxy use only Secret Arn of target database or database cluster
        #XXX: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbproxy-authformat.html
        #XXX: If new Secret for database user is created, it is necessary to update Resource of Proxy IAM Role to access new Secret.
        #XXX: Otherwise, new database user can not connect to database by RDS Proxy.
        db_proxy = aws_rds.DatabaseProxy(
            self,
            'DBProxy',
            proxy_target=aws_rds.ProxyTarget.from_cluster(db_cluster),
            secrets=[db_secret],
            vpc=vpc,
            db_proxy_name='{}-proxy'.format(db_cluster_name),
            idle_client_timeout=core.Duration.minutes(10),
            max_connections_percent=90,
            max_idle_connections_percent=10,
            security_groups=[sg_use_mysql, sg_mysql_public_proxy],
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))
Example #11
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        vpc_name = self.node.try_get_context("vpc_name")
        vpc = aws_ec2.Vpc.from_lookup(self,
                                      "ExistingVPC",
                                      is_default=True,
                                      vpc_name=vpc_name)

        sg_postgresql_client = aws_ec2.SecurityGroup(
            self,
            'PostgreSQLClientSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for postgresql client',
            security_group_name='default-postgresql-client-sg')
        cdk.Tags.of(sg_postgresql_client).add('Name',
                                              'default-postgresql-client-sg')

        sg_postgresql_server = aws_ec2.SecurityGroup(
            self,
            'PostgreSQLServerSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for postgresql',
            security_group_name='default-postgresql-server-sg')
        sg_postgresql_server.add_ingress_rule(
            peer=sg_postgresql_client,
            connection=aws_ec2.Port.tcp(5432),
            description='default-postgresql-client-sg')
        sg_postgresql_server.add_ingress_rule(
            peer=sg_postgresql_server,
            connection=aws_ec2.Port.all_tcp(),
            description='default-postgresql-server-sg')
        cdk.Tags.of(sg_postgresql_server).add('Name',
                                              'default-postgresql-server-sg')

        rds_subnet_group = aws_rds.SubnetGroup(
            self,
            'PostgreSQLSubnetGroup',
            description='subnet group for postgresql',
            subnet_group_name='aurora-postgresql',
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT),
            vpc=vpc)

        db_cluster_name = self.node.try_get_context('db_cluster_name')
        #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name
        # therefore, first create Secret and then use it to create database
        db_secret_name = self.node.try_get_context('db_secret_name')
        #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
        db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format(
            region=cdk.Aws.REGION,
            account=cdk.Aws.ACCOUNT_ID,
            resource_name=db_secret_name)
        db_secret = aws_secretsmanager.Secret.from_secret_partial_arn(
            self, 'DBSecretFromArn', db_secret_arn)
        rds_credentials = aws_rds.Credentials.from_secret(db_secret)

        rds_engine = aws_rds.DatabaseClusterEngine.aurora_postgres(
            version=aws_rds.AuroraPostgresEngineVersion.VER_13_4)

        #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Reference.ParameterGroups.html#AuroraPostgreSQL.Reference.Parameters.Cluster
        rds_cluster_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraPostgreSQLClusterParamGroup',
            engine=rds_engine,
            description=
            'Custom cluster parameter group for aurora-postgresql13',
            parameters={
                'log_min_duration_statement': '15000',  # 15 sec
                'default_transaction_isolation': 'read committed',
                'client_encoding': 'UTF8'
            })

        #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Reference.ParameterGroups.html#AuroraPostgreSQL.Reference.Parameters.Instance
        rds_db_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraPostgreSQLDBParamGroup',
            engine=rds_engine,
            description='Custom parameter group for aurora-postgresql13',
            parameters={
                'log_min_duration_statement': '15000',  # 15 sec
                'default_transaction_isolation': 'read committed'
            })

        db_cluster = aws_rds.DatabaseCluster(
            self,
            'Database',
            engine=rds_engine,
            credentials=
            rds_credentials,  # A username of 'admin' (or 'postgres' for PostgreSQL) and SecretsManager-generated password
            instance_props={
                'instance_type':
                aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3,
                                        aws_ec2.InstanceSize.MEDIUM),
                'parameter_group':
                rds_db_param_group,
                'vpc_subnets': {
                    'subnet_type': aws_ec2.SubnetType.PRIVATE_WITH_NAT
                },
                'vpc':
                vpc,
                'auto_minor_version_upgrade':
                False,
                'security_groups': [sg_postgresql_server]
            },
            instances=2,
            parameter_group=rds_cluster_param_group,
            cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS,
            cluster_identifier=db_cluster_name,
            subnet_group=rds_subnet_group,
            backup=aws_rds.BackupProps(retention=cdk.Duration.days(3),
                                       preferred_window="03:00-04:00"))

        cdk.CfnOutput(self,
                      'DBClusterEndpoint',
                      value=db_cluster.cluster_endpoint.socket_address,
                      export_name='DBClusterEndpoint')
        cdk.CfnOutput(self,
                      'DBClusterReadEndpoint',
                      value=db_cluster.cluster_read_endpoint.socket_address,
                      export_name='DBClusterReadEndpoint')
Example #12
0
    def __init__(self, scope: core.Construct, vpc: ec2.Vpc, **kwargs):
        self.deploy_env = active_environment
        self.custom_vpc = vpc
        super().__init__(scope,
                         id=f"{self.deploy_env.value}-rds-stack",
                         **kwargs)

        # Security Group
        self.sg_ecommerce_rds = ec2.SecurityGroup(
            self,
            id=f"sg-rds-ecommerce-{self.deploy_env.value}",
            vpc=self.custom_vpc,
            allow_all_outbound=True,
            security_group_name=f"rds-ecommerce-{self.deploy_env.value}-sg",
        )

        # Security Group Ingress Rules (Public)
        self.sg_ecommerce_rds.add_ingress_rule(peer=ec2.Peer.ipv4("0.0.0.0/0"),
                                               connection=ec2.Port.tcp(5432))

        for subnet in self.custom_vpc.private_subnets:
            self.sg_ecommerce_rds.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                connection=ec2.Port.tcp(5432),
            )

        # Parameter Group - parameters to read rds with dms
        self.ecommerce_rds_parameter_group = rds.ParameterGroup(
            self,
            id=f"ecommerce-{self.deploy_env.value}-rds-parameter-group",
            description="Parameter group to allow CDC from RDS using DMS.",
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_12_4),
            parameters={
                "rds.logical_replication": "1",
                "wal_sender_timeout": "0"
            },
        )

        # Define Credentials
        # Definitely Not best practice, as we should use secrets manager
        # But we want to avoid extra costs in this demonstration
        self.rds_credentials = rds.Credentials.from_password(
            username=db_username,
            password=core.SecretValue.plain_text(db_password))

        # Postgres DataBase Instance
        self.ecommerce_rds = rds.DatabaseInstance(
            self,
            id=f"rds-ecommerce-{self.deploy_env.value}",
            database_name=db_name,
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_12_4),
            credentials=self.rds_credentials,
            instance_type=ec2.InstanceType("t3.micro"),
            vpc=self.custom_vpc,
            instance_identifier=f"rds-{self.deploy_env.value}-ecommerce-db",
            port=5432,
            vpc_placement=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            subnet_group=rds.SubnetGroup(
                self,
                f"rds-{self.deploy_env.value}-subnet",
                description="place RDS on public subnet",
                vpc=self.custom_vpc,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.PUBLIC),
            ),
            parameter_group=self.ecommerce_rds_parameter_group,
            security_groups=[self.sg_ecommerce_rds],
            removal_policy=core.RemovalPolicy.DESTROY,
            **kwargs,
        )

        self._rds_host = self.ecommerce_rds.db_instance_endpoint_address

        self._rds_port = self.ecommerce_rds.db_instance_endpoint_port

        @property
        def rds_endpoint_address(self):
            return self._rds_host

        @property
        def rds_endpoint_port(self):
            return self._rds_port
    def __init__(self, scope: core.Construct, environment: Environment, **kwargs) -> None:
        self.env = environment.value
        super().__init__(scope, id=f'{self.env}-common', **kwargs)

        self.custom_vpc = ec2.Vpc(
            self,
            f'vpc-{self.env}'
        )

        self.orders_rds_sg = ec2.SecurityGroup(
            self,
            f'orders-{self.env}-sg',
            vpc=self.custom_vpc,
            allow_all_outbound=True,
            security_group_name=f'orders-{self.env}-sg',
        )

        self.orders_rds_sg.add_ingress_rule(
            peer=ec2.Peer.ipv4('37.156.75.55/32'),
            connection=ec2.Port.tcp(5432)
        )

        for subnet in self.custom_vpc.private_subnets:
            self.orders_rds_sg.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                connection=ec2.Port.tcp(5432)
            )

        self.orders_rds_parameter_group = rds.ParameterGroup(
            self,
            f'orders-{self.env}-rds-parameter-group',
            description='Parameter group to allow CDC from RDS using DMS.',
            engine=rds.DatabaseInstanceEngine.postgres(version=rds.PostgresEngineVersion.VER_12_4),
            parameters={
                "rds.logical_replication": "1",
                "wal_sender_timeout": "0"
            }
        )

        self.orders_rds = rds.DatabaseInstance(
            self,
            f'orders-{self.env}-rds',
            engine=rds.DatabaseInstanceEngine.postgres(version=rds.PostgresEngineVersion.VER_12_4),
            database_name='orders',
            instance_type=ec2.InstanceType('t3.micro'),
            vpc=self.custom_vpc,
            instance_identifier=f'rds-{self.env}-orders-db',
            port=5432,
            vpc_placement=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            subnet_group=rds.SubnetGroup(
                self,
                f'rds-{self.env}-subnet',
                description='place RDS on public subnet',
                vpc=self.custom_vpc,
                vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)
            ),
            parameter_group=self.orders_rds_parameter_group,
            security_groups=[
                self.orders_rds_sg
            ],
            removal_policy=core.RemovalPolicy.DESTROY,
            **kwargs
        )
Example #14
0
    def __init__(
        self,
        scope: cdk.Construct,
        construct_id: str,
        stack_log_level: str,
        vpc,
        rds_instance_size: str,
        enable_multi_az: bool,
        enable_perf_insights: bool,
        **kwargs
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.db_name = "store_events"
        self.db_secret = _sm.Secret(
            self,
            "storeEventsDbSecret",
            secret_name=f"store-events-db-credentials-{construct_id}",
            description="Credentials for Store Events DB in RDS MySQL.",
            generate_secret_string=_sm.SecretStringGenerator(
                secret_string_template='{"username": "******"}',
                generate_string_key="password",
                password_length=16,
                exclude_characters='"@\\\/',
                exclude_punctuation=True,
            ),
            removal_policy=cdk.RemovalPolicy.DESTROY
        )
        store_events_db_credentials = _rds.Credentials.from_secret(
            self.db_secret)

        # Create Security Group for MySQL Server Instance
        self.my_sql_db_sg = _ec2.SecurityGroup(
            self,
            id="mySqlDbSecurityGroup",
            vpc=vpc,
            security_group_name=f"mysql_db_sg_{construct_id}",
            description="Security Group for MySQL"
        )
        cdk.Tags.of(self.my_sql_db_sg).add("name", "mysql_db_sg")

        self.my_sql_db_sg.add_ingress_rule(
            peer=_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=_ec2.Port.tcp(3306),
            description="Allow Incoming DB Traffic from within VPC"
        )

        self.my_sql_db_sg.add_ingress_rule(
            peer=self.my_sql_db_sg,
            connection=_ec2.Port.all_tcp(),
            description="Allow ALL PORTS for TCP within SG for GLUE Connections"
        )

        # Create Security Group for PostgreSQL Server Instance
        self.pgsql_db_sg = _ec2.SecurityGroup(
            self,
            id="postgreSqlDbSecurityGroup",
            vpc=vpc,
            security_group_name=f"pgsql_db_sg_{construct_id}",
            description="Security Group for PostgreSQL"
        )
        cdk.Tags.of(self.pgsql_db_sg).add("name", "pgsql_db_sg")

        self.pgsql_db_sg.add_ingress_rule(
            peer=_ec2.Peer.ipv4(vpc.vpc_cidr_block),
            connection=_ec2.Port.tcp(5432),
            description="Allow Incoming DB Traffic from within VPC"
        )

        # Param Group for Postgresql
        self.store_events_db_params_group = _rds.ParameterGroup(
            self,
            "ordersDbParamsGroup",
            description='Parameter group to allow CDC from RDS using DMS.',
            engine=_rds.DatabaseInstanceEngine.postgres(
                version=_rds.PostgresEngineVersion.VER_11_6),
            parameters={
                "rds.logical_replication": "1",
                "wal_sender_timeout": "0"
            }
        )

        # Create an RDS Database):
        self.store_events_db = _rds.DatabaseInstance(
            self,
            "storeEventsDb",
            credentials=store_events_db_credentials,
            database_name=f"{self.db_name}",
            # engine=_rds.DatabaseInstanceEngine.MYSQL,
            engine=_rds.DatabaseInstanceEngine.postgres(
                version=_rds.PostgresEngineVersion.VER_11_6),
            # engine=_rds.DatabaseInstanceEngine.mysql(
            #     version=_rds.MysqlEngineVersion.VER_5_7_31
            # ),
            vpc=vpc,
            port=5432,
            security_groups=[self.pgsql_db_sg],
            allocated_storage=50,
            multi_az=False,
            # cloudwatch_logs_exports=["error", "general", "slowquery"], # Only for MySQLs
            instance_type=_ec2.InstanceType.of(
                _ec2.InstanceClass.BURSTABLE2,
                _ec2.InstanceSize.MICRO
            ),
            # instance_type=_ec2.InstanceType(
            #     instance_type_identifier=rds_instance_size
            # ),
            parameter_group=self.store_events_db_params_group,
            removal_policy=cdk.RemovalPolicy.DESTROY,
            deletion_protection=False,
            delete_automated_backups=True,
            backup_retention=cdk.Duration.days(
                7)
            # , enable_performance_insights=True
        )

        # Let us configure performance insights
        if enable_perf_insights:
            add_pi = self.store_events_db.node.default_child
            add_pi.add_override(
                "Properties.EnablePerformanceInsights", True)
        # Let us configure performance insights
        if enable_multi_az:
            add_ha = self.store_events_db.node.default_child
            add_ha.add_override(
                "Properties.MultiAZ", True)

        # store_events_db.connections.allow_from(
        #     other=_ec2.Peer.ipv4(vpc.vpc_cidr_block),
        #     port_range=_ec2.Port.tcp(3306),
        #     description="Allow Incoming DB Traffic from within VPC"
        # )

        self.store_events_db_endpoint = self.store_events_db.db_instance_endpoint_address

        ###########################################
        ################# OUTPUTS #################
        ###########################################

        output_0 = cdk.CfnOutput(
            self,
            "AutomationFrom",
            value=f"{GlobalArgs.SOURCE_INFO}",
            description="To know more about this automation stack, check out our github page."
        )

        output_1 = cdk.CfnOutput(
            self,
            "StoreEventsDatabase",
            value=f"https://console.aws.amazon.com/rds/home?region={cdk.Aws.REGION}#dbinstance:id={self.store_events_db.instance_identifier}",
            description="Store Events Database in RDS"
        )
        output_2 = cdk.CfnOutput(
            self,
            "DatabaseConnectionCommand",
            value=f"psql -h {self.store_events_db_endpoint} -P 5432 -u mystiquemaster -p",
            description="Connect to the database using this command"
        )
        output_3 = cdk.CfnOutput(
            self,
            "StoreEventsDatabaseSecretArn",
            value=f"{self.db_secret.secret_full_arn}",
            description="The credentials to connect to Store events database"
        )
Example #15
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here

        #获取vpc
        #vpc = ec2.Vpc.from_lookup(self, 'default',is_default=True,vpc_name='default')
        vpc = ec2.Vpc.from_lookup(self,
                                  'dms-vpc',
                                  vpc_id='vpc-08b56fb6053ca2c75')

        #创建RDS参数组
        db_parameter = rds.ParameterGroup(
            self,
            'dms-param-mysql5.7',
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7),
            parameters={"binlog_format": "ROW"})

        # sourceDB = rds.DatabaseInstanceFromSnapshot(
        #     self,'dms-rds-soruce',
        #     snapshot_identifier= 'tickets-mysql57',
        #     engine=rds.DatabaseInstanceEngine.MYSQL,
        #     instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3,ec2.InstanceSize.MEDIUM),
        #     vpc=vpc,
        #     parameter_group=db_parameter
        #     )

        # sourceDB = rds.DatabaseInstance(
        #     self,'dms-rds-soruce',
        #     #instance_identifier='dms-rds-soruce',
        #     engine=rds.DatabaseInstanceEngine.mysql(
        #         version=rds.MysqlEngineVersion.VER_5_7
        #     ),
        #     instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3,ec2.InstanceSize.MEDIUM),
        #     vpc=vpc,
        #     parameter_group=db_parameter,
        #     #credentials=rdsPasswordSecret
        #     )

        # sourceDB.connections.allow_default_port_internally()

        dms_rep = dms.CfnReplicationInstance(
            self,
            'dms-replication',
            replication_instance_class='dms.c5.large',
            engine_version='3.4.0')

        stream = kinesis.Stream(self, 'dms-steam')

        streamWriteRole = iam.Role(
            self,
            'dms-stream-role',
            assumed_by=iam.ServicePrincipal('dms.amazonaws.com'))

        streamWriteRole.add_to_policy(
            iam.PolicyStatement(resources=[stream.stream_arn],
                                actions=[
                                    'kinesis:DescribeStream',
                                    'kinesis:PutRecord', 'kinesis:PutRecords'
                                ]))

        source = dms.CfnEndpoint(
            self,
            'dms-source',
            endpoint_type='source',
            engine_name='mysql',
            username='******',
            password='******',
            server_name=
            "dms-rdssource.c7iucbqgd2xo.us-east-1.rds.amazonaws.com",
            port=3306)

        target = dms.CfnEndpoint(self,
                                 'dms-target',
                                 endpoint_type='target',
                                 engine_name='kinesis',
                                 kinesis_settings={
                                     "messageFormat":
                                     "JSON",
                                     'streamArn':
                                     stream.stream_arn,
                                     "serviceAccessRoleArn":
                                     streamWriteRole.role_arn
                                 })

        dmsTableMappings = {
            "rules": [{
                "rule-type": "selection",
                "rule-id": "1",
                "rule-name": "1",
                "object-locator": {
                    "schema-name": "dms_sample",
                    "table-name": "t_log_levelup"
                },
                "rule-action": "include"
            }]
        }

        dms.CfnReplicationTask(self,
                               'dms-stream-repTask',
                               replication_instance_arn=dms_rep.ref,
                               migration_type='full-load-and-cdc',
                               source_endpoint_arn=source.ref,
                               target_endpoint_arn=target.ref,
                               table_mappings=json.dumps(dmsTableMappings))

        analyticsRole = iam.Role(
            self,
            'KinesisAnalyticsRole',
            assumed_by=iam.ServicePrincipal('kinesisanalytics.amazonaws.com'))

        kinesisanalytics.CfnApplicationV2(
            self,
            'KinesisAnalytics',
            application_name='dms-stream-anlytics',
            service_execution_role=analyticsRole.role_arn,
            runtime_environment='SQL-1_0',
            application_configuration={
                'sqlApplicationConfiguration': {
                    'inputs': [{
                        'namePrefix': "exampleNamePrefix",
                        'inputSchema': {
                            'recordColumns': [{
                                'name': "example",
                                'sqlType': "VARCHAR(16)",
                                'mapping': "$.example"
                            }],
                            'recordFormat': {
                                'recordFormatType': "JSON",
                                'mappingParameters': {
                                    'jsonMappingParameters': {
                                        'recordRowPath': "$"
                                    }
                                }
                            }
                        },
                        'kinesisStreamsInput': {
                            'resourceArn': stream.stream_arn
                        }
                    }]
                },
                'applicationCodeConfiguration': {
                    'codeContent': {
                        'textContent': "Example Application Code"
                    },
                    'codeContentType': "PLAINTEXT"
                }
            })