def _create_rds(self):
        """
        Create RDS for training.
        """
        subnet_group = rds.SubnetGroup(
            self,
            "RdsSubnetGroup",
            description="for RDS",
            vpc=self.vpc,
            removal_policy=RemovalPolicy.DESTROY,
            subnet_group_name=f"{self.name_prefix}-sg-rds",
            vpc_subnets=self.subnet_selection)

        self.rds = rds.DatabaseInstance(
            self,
            f"{self.name_prefix}-database",
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_8_0_16),
            vpc=self.vpc,
            port=3306,
            instance_type=ec2.InstanceType.of(ec2.InstanceClass["MEMORY4"],
                                              ec2.InstanceSize["LARGE"]),
            removal_policy=RemovalPolicy.DESTROY,
            deletion_protection=False,
            security_groups=[self.security_group],
            subnet_group=subnet_group)
Beispiel #2
0
 def _set_db_infra(self) -> None:
     self.db_username = "******"
     # fmt: off
     self.db_password_secret = ssm.Secret(
         self,
         "db-password-secret",
         secret_name="aws-data-wrangler/db_password",
         generate_secret_string=ssm.SecretStringGenerator(
             exclude_characters="/@\"\' \\"),
     ).secret_value
     # fmt: on
     self.db_password = self.db_password_secret.to_string()
     self.db_security_group = ec2.SecurityGroup(
         self,
         "aws-data-wrangler-database-sg",
         vpc=self.vpc,
         description=
         "AWS Data Wrangler Test Arena - Database security group",
     )
     self.db_security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                             ec2.Port.all_traffic())
     self.rds_subnet_group = rds.SubnetGroup(
         self,
         "aws-data-wrangler-rds-subnet-group",
         description="RDS Database Subnet Group",
         vpc=self.vpc,
         vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
     )
     self.rds_role = iam.Role(
         self,
         "aws-data-wrangler-rds-role",
         assumed_by=iam.ServicePrincipal("rds.amazonaws.com"),
         inline_policies={
             "S3":
             iam.PolicyDocument(statements=[
                 iam.PolicyStatement(
                     effect=iam.Effect.ALLOW,
                     actions=[
                         "s3:Get*",
                         "s3:List*",
                         "s3:Put*",
                         "s3:AbortMultipartUpload",
                     ],
                     resources=[
                         self.bucket.bucket_arn,
                         f"{self.bucket.bucket_arn}/*",
                     ],
                 )
             ]),
         },
     )
     cdk.CfnOutput(self, "DatabasesUsername", value=self.db_username)
     cdk.CfnOutput(
         self,
         "DatabaseSecurityGroupId",
         value=self.db_security_group.security_group_id,
     )
    def __init__(self, scope: core.Construct, **kwargs) -> None:
        self.deploy_env = active_environment
        super().__init__(scope, id=f"{self.deploy_env.value}-common-stack", **kwargs)

        self.custom_vpc = ec2.Vpc(self, f"vpc-{self.deploy_env.value}")

        self.orders_rds_sg = ec2.SecurityGroup(
            self,
            f"orders-{self.deploy_env.value}-sg",
            vpc=self.custom_vpc,
            allow_all_outbound=True,
            security_group_name=f"orders-{self.deploy_env.value}-sg",
        )

        self.orders_rds_sg.add_ingress_rule(
            peer=ec2.Peer.ipv4("0.0.0.0/0"), connection=ec2.Port.tcp(5432)
        )

        for subnet in self.custom_vpc.private_subnets:
            self.orders_rds_sg.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block), connection=ec2.Port.tcp(5432)
            )

        self.orders_rds_parameter_group = rds.ParameterGroup(
            self,
            f"orders-{self.deploy_env.value}-rds-parameter-group",
            description="Parameter group to allow CDC from RDS using DMS.",
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_12_4
            ),
            parameters={"rds.logical_replication": "1", "wal_sender_timeout": "0"},
        )

        self.orders_rds = rds.DatabaseInstance(
            self,
            f"orders-{self.deploy_env.value}-rds",
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_12_4
            ),
            database_name="orders",
            instance_type=ec2.InstanceType("t3.micro"),
            vpc=self.custom_vpc,
            instance_identifier=f"rds-{self.deploy_env.value}-orders-db",
            port=5432,
            vpc_placement=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            subnet_group=rds.SubnetGroup(
                self,
                f"rds-{self.deploy_env.value}-subnet",
                description="place RDS on public subnet",
                vpc=self.custom_vpc,
                vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            ),
            parameter_group=self.orders_rds_parameter_group,
            security_groups=[self.orders_rds_sg],
            removal_policy=core.RemovalPolicy.DESTROY,
            **kwargs,
        )
    def __init__(self, scope: core.Construct, environment: Environment, **kwargs) -> None:
        self.env = environment.value
        super().__init__(scope, id=f'{self.env}-common', **kwargs)

        self.custom_vpc = ec2.Vpc(
            self,
            f'vpc-{self.env}'
        )

        self.orders_rds_sg = ec2.SecurityGroup(
            self,
            f'orders-{self.env}-sg',
            vpc=self.custom_vpc,
            allow_all_outbound=True,
            security_group_name=f'orders-{self.env}-sg',
        )

        self.orders_rds_sg.add_ingress_rule(
            peer=ec2.Peer.ipv4('37.156.75.55/32'),
            connection=ec2.Port.tcp(5432)
        )

        for subnet in self.custom_vpc.private_subnets:
            self.orders_rds_sg.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                connection=ec2.Port.tcp(5432)
            )

        self.orders_rds = rds.DatabaseInstance(
            self,
            f'orders-{self.env}-rds',
            engine=rds.DatabaseInstanceEngine.postgres(version=rds.PostgresEngineVersion.VER_11_2),
            database_name='orders',
            instance_type=ec2.InstanceType('t3.micro'),
            vpc=self.custom_vpc,
            instance_identifier=f'rds-{self.env}-orders-db',
            port=5432,
            vpc_placement=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            subnet_group=rds.SubnetGroup(
                self,
                f'rds-{self.env}-subnet',
                description='place RDS on public subnet',
                vpc=self.custom_vpc,
                vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)
            ),
            security_groups=[
                self.orders_rds_sg
            ],
            removal_policy=core.RemovalPolicy.DESTROY,
            **kwargs
        )
    def __init__(self, scope: core.Construct, id: str, bmt_vpc: ec2.Vpc,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        db_subnet_group = rds.SubnetGroup(
            self,
            'Aurora',
            description='aurora subnet group',
            vpc=bmt_vpc,
            removal_policy=core.RemovalPolicy.DESTROY,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED))

        db_security_group = ec2.SecurityGroup(self, 'aurora-sg', vpc=bmt_vpc)

        db_security_group.add_ingress_rule(
            peer=ec2.Peer.ipv4('10.100.0.0/16'),
            connection=ec2.Port(
                protocol=ec2.Protocol.TCP,
                string_representation="to allow from the vpc internal",
                from_port=3306,
                to_port=3306))

        param_group = rds.ParameterGroup(
            self,
            'bmt-aurora-param',
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL)
        param_group.add_parameter("performance_schema", "1")

        rds.DatabaseCluster(
            self,
            'bmt-aurora-cluster',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_1),
            instance_props=rds.InstanceProps(
                vpc=bmt_vpc,
                instance_type=ec2.InstanceType.of(
                    instance_class=ec2.InstanceClass.BURSTABLE3,
                    instance_size=ec2.InstanceSize.MEDIUM),
                security_groups=[db_security_group]),
            instances=1,
            subnet_group=db_subnet_group,
            parameter_group=param_group,
            removal_policy=core.RemovalPolicy.DESTROY)
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc: ec2.Vpc,
                 asg_sg,
                 stage={},
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prefix_name = f'{stage["vpc_prefix"]}-{stage["stage_name"]}-{self.node.try_get_context("customer")}'

        self._rds_subnet_group = rds.SubnetGroup(
            self,
            f'{prefix_name}-rds-subnet-gruop',
            description="aaa",
            subnet_group_name=f'{prefix_name}-aurora-mysql',
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED),
            vpc=vpc)

        self._rds_cluster = rds.DatabaseCluster(
            self,
            f'{prefix_name}-rds-cluster',
            cluster_identifier=f'{prefix_name}-rds-cluster',
            credentials=rds.Credentials.from_generated_secret("admin"),
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                                  ec2.InstanceSize.SMALL),
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.ISOLATED)),
            port=3306,
            default_database_name=self.node.try_get_context("customer"),
            subnet_group=self._rds_subnet_group)

        for sg in asg_sg:
            self._rds_cluster.connections.allow_default_port_from(
                sg, "Allow EC2 ASG access to RDS MySQL")
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # create s3 bucket
        s3_Bucket = s3.Bucket(self,
                              "AWS-Cookbook-Recipe-404",
                              removal_policy=RemovalPolicy.DESTROY,
                              auto_delete_objects=True)

        aws_s3_deployment.BucketDeployment(
            self,
            'S3Deployment',
            destination_bucket=s3_Bucket,
            sources=[aws_s3_deployment.Source.asset("./s3_content")],
            retain_on_delete=False)

        isolated_subnets = ec2.SubnetConfiguration(
            name="ISOLATED",
            subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
            cidr_mask=24)

        # create VPC
        vpc = ec2.Vpc(self,
                      'AWS-Cookbook-VPC',
                      cidr='10.10.0.0/23',
                      subnet_configuration=[isolated_subnets])

        vpc.add_interface_endpoint(
            'VPCSecretsManagerInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService(
                'secretsmanager'
            ),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
        )

        vpc.add_gateway_endpoint(
            's3GateWayEndPoint',
            service=ec2.GatewayVpcEndpointAwsService('s3'),
            subnets=[
                ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)
            ],
        )

        subnet_group = rds.SubnetGroup(
            self,
            'rds_subnet_group',
            description='VPC Subnet Group for RDS',
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        rds_instance = rds.DatabaseInstance(
            self,
            'DBInstance',
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_8_0_23),
            instance_type=ec2.InstanceType("m5.large"),
            vpc=vpc,
            multi_az=False,
            database_name='AWSCookbookRecipe404',
            instance_identifier='awscookbook404db-orig',
            delete_automated_backups=True,
            deletion_protection=False,
            # iam_authentication=
            removal_policy=RemovalPolicy.DESTROY,
            allocated_storage=8,
            subnet_group=subnet_group)

        # mkdir -p lambda-layers/sqlparse/python
        # cd layers/sqlparse/python
        # pip install sqlparse --target="."
        # cd ../../../

        # create Lambda Layer
        sqlparse = aws_lambda.LayerVersion(
            self,
            "sqlparse",
            code=aws_lambda.AssetCode('lambda-layers/sqlparse'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="sqlparse",
            license=
            "https://github.com/andialbrecht/sqlparse/blob/master/LICENSE")

        pymysql = aws_lambda.LayerVersion(
            self,
            "pymysql",
            code=aws_lambda.AssetCode('lambda-layers/pymysql'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="pymysql",
            license="MIT")

        smartopen = aws_lambda.LayerVersion(
            self,
            "smartopen",
            code=aws_lambda.AssetCode('lambda-layers/smart_open'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="smartopen",
            license="MIT")

        lambda_function = aws_lambda.Function(
            self,
            'LambdaRDS',
            code=aws_lambda.AssetCode("./mysql-lambda/"),
            handler="lambda_function.lambda_handler",
            environment={
                "DB_SECRET_ARN": rds_instance.secret.secret_arn,
                "S3_BUCKET": s3_Bucket.bucket_name
            },
            layers=[sqlparse, pymysql, smartopen],
            memory_size=1024,
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            timeout=Duration.seconds(600),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        rds_instance.secret.grant_read(lambda_function)

        rds_instance.connections.allow_from(lambda_function.connections,
                                            ec2.Port.tcp(3306), "Ingress")

        s3_Bucket.grant_read(lambda_function)

        create_params = {
            "FunctionName": lambda_function.function_arn,
        }

        on_create = custom_resources.AwsSdkCall(
            action='invoke',
            service='Lambda',
            parameters=create_params,
            physical_resource_id=custom_resources.PhysicalResourceId.of(
                'LambdaRDS'))

        policy_statement = iam.PolicyStatement(
            actions=["lambda:InvokeFunction"],
            effect=iam.Effect.ALLOW,
            resources=[lambda_function.function_arn],
        )

        policy = custom_resources.AwsCustomResourcePolicy.from_statements(
            statements=[policy_statement])

        custom_resources.AwsCustomResource(
            self,
            'CustomResource',
            policy=policy,
            on_create=on_create,
            log_retention=logs.RetentionDays.TWO_WEEKS)

        # outputs

        CfnOutput(self, 'RdsSubnetGroup', value=subnet_group.subnet_group_name)

        CfnOutput(self,
                  'RdsDatabaseId',
                  value=rds_instance.instance_identifier)
Beispiel #8
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        vpc_name = self.node.try_get_context("vpc_name")
        vpc = aws_ec2.Vpc.from_lookup(self,
                                      "ExistingVPC",
                                      is_default=True,
                                      vpc_name=vpc_name)

        sg_postgresql_client = aws_ec2.SecurityGroup(
            self,
            'PostgreSQLClientSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for postgresql client',
            security_group_name='default-postgresql-client-sg')
        cdk.Tags.of(sg_postgresql_client).add('Name',
                                              'default-postgresql-client-sg')

        sg_postgresql_server = aws_ec2.SecurityGroup(
            self,
            'PostgreSQLServerSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for postgresql',
            security_group_name='default-postgresql-server-sg')
        sg_postgresql_server.add_ingress_rule(
            peer=sg_postgresql_client,
            connection=aws_ec2.Port.tcp(5432),
            description='default-postgresql-client-sg')
        sg_postgresql_server.add_ingress_rule(
            peer=sg_postgresql_server,
            connection=aws_ec2.Port.all_tcp(),
            description='default-postgresql-server-sg')
        cdk.Tags.of(sg_postgresql_server).add('Name',
                                              'default-postgresql-server-sg')

        rds_subnet_group = aws_rds.SubnetGroup(
            self,
            'PostgreSQLSubnetGroup',
            description='subnet group for postgresql',
            subnet_group_name='aurora-postgresql',
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT),
            vpc=vpc)

        db_cluster_name = self.node.try_get_context('db_cluster_name')
        #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name
        # therefore, first create Secret and then use it to create database
        db_secret_name = self.node.try_get_context('db_secret_name')
        #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
        db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format(
            region=cdk.Aws.REGION,
            account=cdk.Aws.ACCOUNT_ID,
            resource_name=db_secret_name)
        db_secret = aws_secretsmanager.Secret.from_secret_partial_arn(
            self, 'DBSecretFromArn', db_secret_arn)
        rds_credentials = aws_rds.Credentials.from_secret(db_secret)

        rds_engine = aws_rds.DatabaseClusterEngine.aurora_postgres(
            version=aws_rds.AuroraPostgresEngineVersion.VER_13_4)

        #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Reference.ParameterGroups.html#AuroraPostgreSQL.Reference.Parameters.Cluster
        rds_cluster_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraPostgreSQLClusterParamGroup',
            engine=rds_engine,
            description=
            'Custom cluster parameter group for aurora-postgresql13',
            parameters={
                'log_min_duration_statement': '15000',  # 15 sec
                'default_transaction_isolation': 'read committed',
                'client_encoding': 'UTF8'
            })

        #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Reference.ParameterGroups.html#AuroraPostgreSQL.Reference.Parameters.Instance
        rds_db_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraPostgreSQLDBParamGroup',
            engine=rds_engine,
            description='Custom parameter group for aurora-postgresql13',
            parameters={
                'log_min_duration_statement': '15000',  # 15 sec
                'default_transaction_isolation': 'read committed'
            })

        db_cluster = aws_rds.DatabaseCluster(
            self,
            'Database',
            engine=rds_engine,
            credentials=
            rds_credentials,  # A username of 'admin' (or 'postgres' for PostgreSQL) and SecretsManager-generated password
            instance_props={
                'instance_type':
                aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3,
                                        aws_ec2.InstanceSize.MEDIUM),
                'parameter_group':
                rds_db_param_group,
                'vpc_subnets': {
                    'subnet_type': aws_ec2.SubnetType.PRIVATE_WITH_NAT
                },
                'vpc':
                vpc,
                'auto_minor_version_upgrade':
                False,
                'security_groups': [sg_postgresql_server]
            },
            instances=2,
            parameter_group=rds_cluster_param_group,
            cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS,
            cluster_identifier=db_cluster_name,
            subnet_group=rds_subnet_group,
            backup=aws_rds.BackupProps(retention=cdk.Duration.days(3),
                                       preferred_window="03:00-04:00"))

        cdk.CfnOutput(self,
                      'DBClusterEndpoint',
                      value=db_cluster.cluster_endpoint.socket_address,
                      export_name='DBClusterEndpoint')
        cdk.CfnOutput(self,
                      'DBClusterReadEndpoint',
                      value=db_cluster.cluster_read_endpoint.socket_address,
                      export_name='DBClusterReadEndpoint')
Beispiel #9
0
  def __init__(self, scope: Construct, id: str, **kwargs) -> None:
    super().__init__(scope, id, **kwargs)

    vpc_name = self.node.try_get_context("vpc_name")
    vpc = aws_ec2.Vpc.from_lookup(self, "ExistingVPC",
      is_default=True,
      vpc_name=vpc_name)

    sg_use_mysql = aws_ec2.SecurityGroup(self, 'MySQLClientSG',
      vpc=vpc,
      allow_all_outbound=True,
      description='security group for mysql client',
      security_group_name='default-mysql-client-sg'
    )
    cdk.Tags.of(sg_use_mysql).add('Name', 'default-mysql-client-sg')

    sg_mysql_server = aws_ec2.SecurityGroup(self, 'MySQLServerSG',
      vpc=vpc,
      allow_all_outbound=True,
      description='security group for mysql',
      security_group_name='default-mysql-server-sg'
    )
    sg_mysql_server.add_ingress_rule(peer=sg_use_mysql, connection=aws_ec2.Port.tcp(3306),
      description='default-mysql-client-sg')
    sg_mysql_server.add_ingress_rule(peer=sg_mysql_server, connection=aws_ec2.Port.all_tcp(),
      description='default-mysql-server-sg')
    cdk.Tags.of(sg_mysql_server).add('Name', 'default-mysql-server-sg')

    rds_subnet_group = aws_rds.SubnetGroup(self, 'MySQLSubnetGroup',
      description='subnet group for mysql',
      subnet_group_name='aurora-mysql',
      vpc_subnets=aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT),
      vpc=vpc
    )

    rds_engine = aws_rds.DatabaseClusterEngine.aurora_mysql(version=aws_rds.AuroraMysqlEngineVersion.VER_3_01_0)

    #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Reference.html#AuroraMySQL.Reference.Parameters.Cluster
    rds_cluster_param_group = aws_rds.ParameterGroup(self, 'AuroraMySQLClusterParamGroup',
      engine=rds_engine,
      description='Custom cluster parameter group for aurora-mysql8.x',
      parameters={
        # For Aurora MySQL version 3, Aurora always uses the default value of 1.
        # 'innodb_flush_log_at_trx_commit': '2',
        'slow_query_log': '1',
        # Removed from Aurora MySQL version 3.
        # 'tx_isolation': 'READ-COMMITTED',
        'wait_timeout': '300',
        'character-set-client-handshake': '0',
        'character_set_server': 'utf8mb4',
        'collation_server': 'utf8mb4_unicode_ci',
        'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
      }
    )

    #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Reference.html#AuroraMySQL.Reference.Parameters.Instance
    rds_db_param_group = aws_rds.ParameterGroup(self, 'AuroraMySQLDBParamGroup',
      engine=rds_engine,
      description='Custom parameter group for aurora-mysql8.x',
      parameters={
        'slow_query_log': '1',
        # Removed from Aurora MySQL version 3.
        # 'tx_isolation': 'READ-COMMITTED',
        'wait_timeout': '300',
        'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
      }
    )

    db_cluster_name = self.node.try_get_context('db_cluster_name')
    #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name
    # therefore, first create Secret and then use it to create database
    db_secret_name = self.node.try_get_context('db_secret_name')
    #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
    db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format(
      region=cdk.Aws.REGION, account=cdk.Aws.ACCOUNT_ID, resource_name=db_secret_name)
    db_secret = aws_secretsmanager.Secret.from_secret_partial_arn(self, 'DBSecretFromArn', db_secret_arn)
    rds_credentials = aws_rds.Credentials.from_secret(db_secret)

    db_cluster = aws_rds.DatabaseCluster(self, 'Database',
      engine=rds_engine,
      credentials=rds_credentials, # A username of 'admin' (or 'postgres' for PostgreSQL) and SecretsManager-generated password
      instance_props={
        'instance_type': aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM),
        'parameter_group': rds_db_param_group,
        'vpc_subnets': {
          'subnet_type': aws_ec2.SubnetType.PRIVATE_WITH_NAT
        },
        'vpc': vpc,
        'auto_minor_version_upgrade': False,
        'security_groups': [sg_mysql_server]
      },
      instances=2,
      parameter_group=rds_cluster_param_group,
      cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS,
      cluster_identifier=db_cluster_name,
      subnet_group=rds_subnet_group,
      backup=aws_rds.BackupProps(
        retention=cdk.Duration.days(3),
        preferred_window="03:00-04:00"
      )
    )

    cdk.CfnOutput(self, 'DBClusterEndpoint', value=db_cluster.cluster_endpoint.socket_address, export_name='DBClusterEndpoint')
    cdk.CfnOutput(self, 'DBClusterReadEndpoint', value=db_cluster.cluster_read_endpoint.socket_address, export_name='DBClusterReadEndpoint')
    def __init__(self, scope: core.Construct, id: str, env, props, cluster=False, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #TEMP without ASG
        # security_groups = [ec2.SecurityGroup(
        #         self,
        #         id="ecs-sample-mysql",
        #         vpc=props['vpc'],
        #         security_group_name="ecs-sample-mysql"
        # )]


        vpc = props['vpc']
        security_groups=[props['sg_rds']]
        credential = rds.Credentials.from_username(username="******")
        private_subnet_selections = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE)
        subnet_group = rds.SubnetGroup(self, "sample-rds-subnet-group",
                                       vpc=vpc,
                                       subnet_group_name="sample-rds-subnet-group",
                                       vpc_subnets=private_subnet_selections,
                                       description="sample-rds-subnet-group")
        self.output_props = props.copy()

        if not cluster:
            rds_instance = rds.DatabaseInstance(
                self, "RDS-instance",
                database_name="sample",
                engine=rds.DatabaseInstanceEngine.mysql(
                    version=rds.MysqlEngineVersion.VER_8_0_16
                ),
                credentials=credential,
                instance_identifier="ecs-sample-db",

                vpc=vpc,
                port=3306,
                instance_type=ec2.InstanceType.of(
                    ec2.InstanceClass.BURSTABLE3,
                    ec2.InstanceSize.MICRO,
                ),
                subnet_group=subnet_group,
                vpc_subnets=private_subnet_selections,
                removal_policy=core.RemovalPolicy.DESTROY,
                deletion_protection=False,
                security_groups=security_groups

            )
            core.CfnOutput(self, "RDS_instnace_endpoint", value=rds_instance.db_instance_endpoint_address)
            self.output_props['rds'] = rds_instance

        else:
            instance_props = rds.InstanceProps(
                vpc=vpc,
                security_groups=security_groups,
                vpc_subnets=private_subnet_selections
            )
            rds_cluster = rds.DatabaseCluster(
                self, "RDS-cluster",
                cluster_identifier="ecs-sample-db-cluster",
                instance_props=instance_props,
                engine=rds.DatabaseClusterEngine.aurora_mysql(
                    version=rds.AuroraMysqlEngineVersion.VER_2_07_1
                ),
                credentials=credential,
                default_database_name="sample",
                instances=1,
                subnet_group=subnet_group,
                removal_policy=core.RemovalPolicy.DESTROY,
                deletion_protection=False
            )
            core.CfnOutput(self, "RDS_cluster_endpoint", value=rds_cluster.cluster_endpoint.hostname)
            self.output_props['rds'] = rds_cluster
Beispiel #11
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        demo_vpc = aws_ec2.Vpc(self,
                               "network",
                               cidr="10.10.0.0/16",
                               max_azs=2,
                               subnet_configuration=[])

        demo_subnets = []
        demo_subnets.append(
            aws_ec2.Subnet(self,
                           'sbn-demo-1',
                           availability_zone=demo_vpc.availability_zones[0],
                           vpc_id=demo_vpc.vpc_id,
                           cidr_block='10.10.0.0/25'))
        demo_subnets.append(
            aws_ec2.Subnet(self,
                           'sbn-demo-2',
                           availability_zone=demo_vpc.availability_zones[1],
                           vpc_id=demo_vpc.vpc_id,
                           cidr_block='10.10.0.128/25'))

        demo_vpc.add_interface_endpoint(
            'secretmanager',
            service=aws_ec2.InterfaceVpcEndpointAwsService.SECRETS_MANAGER,
            subnets=aws_ec2.SubnetSelection(subnets=demo_subnets))

        db_subnet_group = aws_rds.SubnetGroup(
            self,
            'sbng-demo-rds',
            description='demo db subnet group',
            vpc=demo_vpc,
            removal_policy=core.RemovalPolicy.DESTROY,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=demo_subnets))

        db_security_group = aws_ec2.SecurityGroup(self,
                                                  'sg-demo-rds',
                                                  vpc=demo_vpc)

        db_security_group.add_ingress_rule(
            peer=aws_ec2.Peer.ipv4('10.10.0.0/16'),
            connection=aws_ec2.Port(
                protocol=aws_ec2.Protocol.TCP,
                string_representation="to allow from the vpc internal",
                from_port=3306,
                to_port=3306))

        mysql_instance = aws_rds.DatabaseInstance(
            self,
            'mys-demo-rds',
            engine=aws_rds.DatabaseInstanceEngine.MYSQL,
            vpc=demo_vpc,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=demo_subnets),
            security_groups=[db_security_group],
            iam_authentication=True)

        db_secret = mysql_instance.secret

        role_init_db = aws_iam.Role(
            self,
            'cmd_role_init_src_db',
            assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"))

        lambda_base_policy_statement = aws_iam.PolicyStatement(
            resources=['*'],
            actions=[
                'logs:CreateLogGroup',
                'logs:CreateLogStream',
                'logs:PutLogEvents',
                "ec2:CreateNetworkInterface",
                "ec2:DescribeNetworkInterfaces",
                "ec2:DeleteNetworkInterface",
            ])

        role_init_db.add_to_policy(lambda_base_policy_statement)

        role_init_db.add_to_policy(
            aws_iam.PolicyStatement(resources=[db_secret.secret_arn],
                                    actions=[
                                        "secretsmanager:GetResourcePolicy",
                                        "secretsmanager:GetSecretValue",
                                        "secretsmanager:DescribeSecret",
                                        "secretsmanager:ListSecretVersionIds"
                                    ]))

        db_user = '******'

        func_init_db = aws_lambda.Function(
            self,
            'func_init_db',
            function_name='demo-rds_func_init_db',
            handler='handler.init',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.asset('./app_stack/func_init_db'),
            role=role_init_db,
            timeout=core.Duration.seconds(10),
            allow_public_subnet=False,
            vpc=demo_vpc,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=demo_subnets),
            environment={
                'db_secret': db_secret.secret_name,
                'db_user': db_user
            })

        # When the role_name specified as static value, it could be occurring with KMS related exception.
        # It's because re-created lambda function with static name of role would be refer to the role which is not exsisted.
        # https://github.com/serverless/examples/issues/279
        role_test_db = aws_iam.Role(
            self,
            'demo_role_test_db',
            assumed_by=aws_iam.ServicePrincipal("lambda.amazonaws.com"),
            role_name=db_user)
        role_test_db.add_to_policy(lambda_base_policy_statement)
        role_test_db.add_to_policy(
            aws_iam.PolicyStatement(resources=[
                'arn:aws:rds-db:%s:%s:dbuser:*/*' % (REGION, ACCOUNT)
            ],
                                    actions=[
                                        "rds-db:connect",
                                    ]))

        role_test_db.add_to_policy(
            aws_iam.PolicyStatement(resources=[db_secret.secret_arn],
                                    actions=[
                                        "secretsmanager:GetResourcePolicy",
                                        "secretsmanager:GetSecretValue",
                                        "secretsmanager:DescribeSecret",
                                        "secretsmanager:ListSecretVersionIds"
                                    ]))

        func_test_db = aws_lambda.Function(
            self,
            'func_test_db',
            function_name='demo-rds_test_iam_auth',
            handler='handler.init',
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            code=aws_lambda.Code.asset('./app_stack/func_test_db'),
            role=role_test_db,
            timeout=core.Duration.seconds(10),
            allow_public_subnet=False,
            vpc=demo_vpc,
            vpc_subnets=aws_ec2.SubnetSelection(subnets=demo_subnets),
            environment={
                'db_secret': db_secret.secret_name,
                'db_user': db_user
            })
    def __init__(self, scope: core.Construct, id: str, props,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
        vpc = ec2.Vpc(self,
                      "vpc",
                      cidr=props['vpc_CIDR'],
                      max_azs=3,
                      subnet_configuration=[{
                          'cidrMask': 28,
                          'name': 'public',
                          'subnetType': ec2.SubnetType.PUBLIC
                      }, {
                          'cidrMask':
                          28,
                          'name':
                          'private',
                          'subnetType':
                          ec2.SubnetType.PRIVATE
                      }, {
                          'cidrMask':
                          28,
                          'name':
                          'db',
                          'subnetType':
                          ec2.SubnetType.ISOLATED
                      }])

        rds_subnetGroup = rds.SubnetGroup(
            self,
            "rds_subnetGroup",
            description=
            f"Group for {props['environment']}-{props['application']}-{props['unit']} DB",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED))

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html
        ##TODO:ADD Aurora Serverless Option
        rds_instance = rds.DatabaseCluster(
            self,
            'wordpress-db',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_2),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                enable_performance_insights=props[
                    'rds_enable_performance_insights'],
                instance_type=ec2.InstanceType(
                    instance_type_identifier=props['rds_instance_type'])),
            subnet_group=rds_subnetGroup,
            storage_encrypted=props['rds_storage_encrypted'],
            backup=rds.BackupProps(retention=core.Duration.days(
                props['rds_automated_backup_retention_days'])))

        EcsToRdsSeurityGroup = ec2.SecurityGroup(
            self,
            "EcsToRdsSeurityGroup",
            vpc=vpc,
            description="Allow WordPress containers to talk to RDS")

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        db_cred_generator = _lambda.Function(
            self,
            'db_creds_generator',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='db_creds_generator.handler',
            code=_lambda.Code.asset('lambda/db_creds_generator'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.ISOLATED
            ),  #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets ,
            environment={
                'SECRET_NAME': rds_instance.secret.secret_name,
            })

        #Set Permissions and Sec Groups
        rds_instance.connections.allow_from(
            EcsToRdsSeurityGroup,
            ec2.Port.tcp(3306))  #Open hole to RDS in RDS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html
        file_system = efs.FileSystem(
            self,
            "MyEfsFileSystem",
            vpc=vpc,
            encrypted=True,  # file system is not encrypted by default
            lifecycle_policy=props['efs_lifecycle_policy'],
            performance_mode=efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode=efs.ThroughputMode.BURSTING,
            removal_policy=core.RemovalPolicy(props['efs_removal_policy']),
            enable_automatic_backups=props['efs_automatic_backups'])

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster
        cluster = ecs.Cluster(
            self,
            "Cluster",
            vpc=vpc,
            container_insights=props['ecs_enable_container_insights'])

        if props['deploy_bastion_host']:
            #ToDo: Deploy bastion host with a key file
            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html
            bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc=vpc)
            rds_instance.connections.allow_from(bastion_host,
                                                ec2.Port.tcp(3306))

            #######################
            ### Developer Tools ###
            # SFTP into the EFS Shared File System

            NetToolsSecret = secretsmanager.Secret(
                self,
                "NetToolsSecret",
                generate_secret_string=secretsmanager.SecretStringGenerator(
                    secret_string_template=json.dumps({
                        "username": '******',
                        "ip": ''
                    }),
                    generate_string_key="password",
                    exclude_characters='/"'))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point
            AccessPoint = file_system.add_access_point(
                "access-point",
                path="/",
                create_acl=efs.Acl(
                    owner_uid=
                    "100",  #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/
                    owner_gid="101",
                    permissions="0755"))

            EfsVolume = ecs.Volume(
                name="efs",
                efs_volume_configuration=ecs.EfsVolumeConfiguration(
                    file_system_id=file_system.file_system_id,
                    transit_encryption="ENABLED",
                    authorization_config=ecs.AuthorizationConfig(
                        access_point_id=AccessPoint.access_point_id)))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html
            NetToolsTask = ecs.FargateTaskDefinition(self,
                                                     "TaskDefinition",
                                                     cpu=256,
                                                     memory_limit_mib=512,
                                                     volumes=[EfsVolume])

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container
            NetToolsContainer = NetToolsTask.add_container(
                "NetTools",
                image=ecs.ContainerImage.from_registry('netresearch/sftp'),
                command=['test:test:100:101:efs'])
            NetToolsContainer.add_port_mappings(
                ecs.PortMapping(container_port=22, protocol=ecs.Protocol.TCP))

            NetToolsContainer.add_mount_points(
                ecs.MountPoint(
                    container_path=
                    "/home/test/efs",  #ToDo build path out with username from secret
                    read_only=False,
                    source_volume=EfsVolume.name,
                ))

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html?highlight=fargateservice#aws_cdk.aws_ecs.FargateService
            service = ecs.FargateService(
                self,
                "Service",
                cluster=cluster,
                task_definition=NetToolsTask,
                platform_version=ecs.FargatePlatformVersion(
                    "VERSION1_4"),  #Required for EFS
            )
            #ToDo somehow store container's IP on deploy

            #Allow traffic to EFS Volume from Net Tools container
            service.connections.allow_to(file_system, ec2.Port.tcp(2049))
            #ToDo allow bastion host into container on port 22

            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
            bastion_ip_locator = _lambda.Function(
                self,
                'bastion_ip_locator',
                function_name=
                f"{props['environment']}-{props['application']}-{props['unit']}-SFTP-IP",
                runtime=_lambda.Runtime.PYTHON_3_8,
                handler='bastion_ip_locator.handler',
                code=_lambda.Code.asset('lambda/bastion_ip_locator'),
                environment={
                    'CLUSTER_NAME': cluster.cluster_arn,
                    'SERVICE_NAME': service.service_name
                })

            #Give needed perms to bastion_ip_locator for reading info from ECS
            bastion_ip_locator.add_to_role_policy(
                iam.PolicyStatement(
                    actions=["ecs:DescribeTasks"],
                    resources=[
                        #f"arn:aws:ecs:us-east-1:348757191778:service/{cluster.cluster_name}/{service.service_name}",
                        f"arn:aws:ecs:us-east-1:348757191778:task/{cluster.cluster_name}/*"
                    ]))
            bastion_ip_locator.add_to_role_policy(
                iam.PolicyStatement(actions=[
                    "ecs:ListTasks",
                ],
                                    resources=["*"],
                                    conditions={
                                        'ArnEquals': {
                                            'ecs:cluster': cluster.cluster_arn
                                        }
                                    }))

        self.output_props = props.copy()
        self.output_props["vpc"] = vpc
        self.output_props["rds_instance"] = rds_instance
        self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup
        self.output_props["file_system"] = file_system
        self.output_props["cluster"] = cluster
Beispiel #13
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # create s3 bucket
        s3_Bucket = s3.Bucket(
            self,
            "AWS-Cookbook-Recipe-405",
            removal_policy=RemovalPolicy.DESTROY
        )

        aws_s3_deployment.BucketDeployment(
            self,
            'S3Deployment',
            destination_bucket=s3_Bucket,
            sources=[aws_s3_deployment.Source.asset("./s3_content")],
            retain_on_delete=False
        )

        isolated_subnets = ec2.SubnetConfiguration(
            name="ISOLATED",
            subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
            cidr_mask=24
        )

        # create VPC
        vpc = ec2.Vpc(
            self,
            'AWS-Cookbook-VPC',
            cidr='10.10.0.0/23',
            subnet_configuration=[isolated_subnets]
        )

        vpc.add_interface_endpoint(
            'VPCSecretsManagerInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('secretsmanager'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc.add_gateway_endpoint(
            's3GateWayEndPoint',
            service=ec2.GatewayVpcEndpointAwsService('s3'),
            subnets=[ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)],
        )

        subnet_group = rds.SubnetGroup(
            self,
            'rds_subnet_group',
            description='VPC Subnet Group for RDS',
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            )
        )

        rds_security_group = ec2.SecurityGroup(
            self,
            'rds_security_group',
            description='Security Group for the RDS Instance',
            allow_all_outbound=True,
            vpc=vpc
        )

        db_name = 'AWSCookbookRecipe405'

        rds_instance = rds.DatabaseInstance(
            self,
            'DBInstance',
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_8_0_23
            ),
            instance_type=ec2.InstanceType("m5.large"),
            vpc=vpc,
            multi_az=False,
            database_name=db_name,
            instance_identifier='awscookbook405db',
            delete_automated_backups=True,
            deletion_protection=False,
            removal_policy=RemovalPolicy.DESTROY,
            allocated_storage=8,
            subnet_group=subnet_group,
            security_groups=[rds_security_group]
        )

        # mkdir -p lambda-layers/sqlparse/python
        # cd layers/sqlparse/python
        # pip install sqlparse --target="."
        # cd ../../../

        # create Lambda Layer to use for RDS Rotation Arn
        pymysql = aws_lambda.LayerVersion(
            self,
            "pymysql",
            code=aws_lambda.AssetCode('lambda-layers/pymysql'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="pymysql",
            license="MIT"
        )

        # -------- Begin EC2 Helper ---------
        vpc.add_interface_endpoint(
            'VPCSSMInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssm'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc.add_interface_endpoint(
            'VPCEC2MessagesInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ec2messages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        vpc.add_interface_endpoint(
            'VPCSSMMessagesInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService('ssmmessages'),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False,
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED
            ),
        )

        ami = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE
        )

        iam_role = iam.Role(self, "InstanceSSM", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        iam_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2RoleforSSM"))

        instance = ec2.Instance(
            self,
            "Instance",
            instance_type=ec2.InstanceType("t3.nano"),
            machine_image=ami,
            role=iam_role,
            vpc=vpc,
        )

        CfnOutput(
            self,
            'InstanceId',
            value=instance.instance_id
        )
        # -------- End EC2 Helper ---------

        iam_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("SecretsManagerReadWrite"))

        rds_instance.connections.allow_from(
            instance.connections, ec2.Port.tcp(3306), "Ingress")

        # outputs

        CfnOutput(
            self,
            'VpcId',
            value=vpc.vpc_id
        )

        CfnOutput(
            self,
            'PyMysqlLambdaLayerArn',
            value=pymysql.layer_version_arn
        )

        CfnOutput(
            self,
            'RdsDatabaseId',
            value=rds_instance.instance_identifier
        )

        CfnOutput(
            self,
            'RdsSecurityGroup',
            value=rds_security_group.security_group_id
        )

        CfnOutput(
            self,
            'RdsEndpoint',
            value=rds_instance.db_instance_endpoint_address
        )

        isolated_subnets = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)

        CfnOutput(
            self,
            'IsolatedSubnets',
            value=', '.join(map(str, isolated_subnets.subnet_ids))
        )

        CfnOutput(
            self,
            'DbName',
            value=db_name
        )

        CfnOutput(
            self,
            'RdsSecretArn',
            value=rds_instance.secret.secret_full_arn
        )
    def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
        vpc = ec2.Vpc(self, "vpc",
            cidr=props['vpc_CIDR'],
            max_azs=3,
            subnet_configuration=[
                {
                    'cidrMask': 28,
                    'name': 'public',
                    'subnetType': ec2.SubnetType.PUBLIC
                },
                {
                    'cidrMask': 28,
                    'name': 'private',
                    'subnetType': ec2.SubnetType.PRIVATE
                },
                {
                    'cidrMask': 28,
                    'name': 'db',
                    'subnetType': ec2.SubnetType.ISOLATED
                }
            ]
        )

        rds_subnetGroup = rds.SubnetGroup(self, "rds_subnetGroup",
            description = f"Group for {props['environment']}-{props['application']}-{props['unit']} DB",
            vpc = vpc,
            vpc_subnets = ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED)
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html
        ##TODO:ADD Aurora Serverless Option
        rds_instance = rds.DatabaseCluster(self,'wordpress-db',
            engine=rds.DatabaseClusterEngine.aurora_mysql(
                version=rds.AuroraMysqlEngineVersion.VER_2_07_2
            ),
            instances=1,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                enable_performance_insights=props['rds_enable_performance_insights'],
                instance_type=ec2.InstanceType(instance_type_identifier=props['rds_instance_type'])
            ),
            subnet_group=rds_subnetGroup,
            storage_encrypted=props['rds_storage_encrypted'],
            backup=rds.BackupProps(
                retention=core.Duration.days(props['rds_automated_backup_retention_days'])
            )
        )

        EcsToRdsSeurityGroup= ec2.SecurityGroup(self, "EcsToRdsSeurityGroup",
            vpc = vpc,
            description = "Allow WordPress containers to talk to RDS"
        )

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html
        db_cred_generator = _lambda.Function(
            self, 'db_creds_generator',
            runtime=_lambda.Runtime.PYTHON_3_8,
            handler='db_creds_generator.handler',
            code=_lambda.Code.asset('lambda'),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED),        #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets ,
            environment={
                'SECRET_NAME': rds_instance.secret.secret_name,
            }
        )

        #Set Permissions and Sec Groups
        rds_instance.connections.allow_from(EcsToRdsSeurityGroup, ec2.Port.tcp(3306))   #Open hole to RDS in RDS SG

        #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html
        file_system = efs.FileSystem(self, "MyEfsFileSystem",
            vpc = vpc,
            encrypted=True, # file system is not encrypted by default
            lifecycle_policy = props['efs_lifecycle_policy'],
            performance_mode = efs.PerformanceMode.GENERAL_PURPOSE,
            throughput_mode = efs.ThroughputMode.BURSTING,
            removal_policy = core.RemovalPolicy(props['efs_removal_policy']),
            enable_automatic_backups = props['efs_automatic_backups']
        )

        if props['deploy_bastion_host']:
            #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html
            bastion_host = ec2.BastionHostLinux(self, 'bastion_host',
                vpc = vpc
            )
            rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306))

        self.output_props = props.copy()
        self.output_props["vpc"] = vpc
        self.output_props["rds_instance"] = rds_instance
        self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup
        self.output_props["file_system"] = file_system
Beispiel #15
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # The code that defines your stack goes here
        vpc_name = self.node.try_get_context("vpc_name")
        vpc = aws_ec2.Vpc.from_lookup(self,
                                      "ExistingVPC",
                                      is_default=True,
                                      vpc_name=vpc_name)

        sg_use_mysql = aws_ec2.SecurityGroup(
            self,
            'MySQLClientSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for mysql client',
            security_group_name='use-mysql-sg')
        core.Tags.of(sg_use_mysql).add('Name', 'mysql-client-sg')

        sg_mysql_server = aws_ec2.SecurityGroup(
            self,
            'MySQLServerSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for mysql',
            security_group_name='mysql-server-sg')
        sg_mysql_server.add_ingress_rule(peer=sg_use_mysql,
                                         connection=aws_ec2.Port.tcp(3306),
                                         description='use-mysql-sg')
        core.Tags.of(sg_mysql_server).add('Name', 'mysql-server-sg')

        rds_subnet_group = aws_rds.SubnetGroup(
            self,
            'RdsSubnetGroup',
            description='subnet group for mysql',
            subnet_group_name='aurora-mysql',
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PRIVATE),
            vpc=vpc)

        rds_engine = aws_rds.DatabaseClusterEngine.aurora_mysql(
            version=aws_rds.AuroraMysqlEngineVersion.VER_2_08_1)

        rds_cluster_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraMySQLClusterParamGroup',
            engine=rds_engine,
            description='Custom cluster parameter group for aurora-mysql5.7',
            parameters={
                'innodb_flush_log_at_trx_commit': '2',
                'slow_query_log': '1',
                'tx_isolation': 'READ-COMMITTED',
                'wait_timeout': '300',
                'character-set-client-handshake': '0',
                'character_set_server': 'utf8mb4',
                'collation_server': 'utf8mb4_unicode_ci',
                'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
            })

        rds_db_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraMySQLDBParamGroup',
            engine=rds_engine,
            description='Custom parameter group for aurora-mysql5.7',
            parameters={
                'slow_query_log': '1',
                'tx_isolation': 'READ-COMMITTED',
                'wait_timeout': '300',
                'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
            })

        db_cluster_name = self.node.try_get_context('db_cluster_name')
        #    #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name
        #    #XXX: therefore, first create Secret and then use it to create database
        #    db_secret_name = self.node.try_get_context('db_secret_name')
        #    #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
        #    db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format(
        #      region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID, resource_name=db_secret_name)
        #    db_secret = aws_secretsmanager.Secret.from_secret_arn(self, 'DBSecretFromArn', db_secret_arn)
        #    rds_credentials = aws_rds.Credentials.from_secret(db_secret)
        rds_credentials = aws_rds.Credentials.from_generated_secret("admin")
        db_cluster = aws_rds.DatabaseCluster(
            self,
            'Database',
            engine=rds_engine,
            credentials=rds_credentials,
            instance_props={
                'instance_type':
                aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3,
                                        aws_ec2.InstanceSize.MEDIUM),
                'parameter_group':
                rds_db_param_group,
                'vpc_subnets': {
                    'subnet_type': aws_ec2.SubnetType.PRIVATE
                },
                'vpc':
                vpc,
                'auto_minor_version_upgrade':
                False,
                'security_groups': [sg_mysql_server]
            },
            instances=2,
            parameter_group=rds_cluster_param_group,
            cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS,
            cluster_identifier=db_cluster_name,
            subnet_group=rds_subnet_group,
            backup=aws_rds.BackupProps(retention=core.Duration.days(3),
                                       preferred_window="03:00-04:00"))

        sagemaker_notebook_role_policy_doc = aws_iam.PolicyDocument()
        sagemaker_notebook_role_policy_doc.add_statements(
            aws_iam.PolicyStatement(
                **{
                    "effect": aws_iam.Effect.ALLOW,
                    "resources": [db_cluster.secret.secret_full_arn],
                    "actions": ["secretsmanager:GetSecretValue"]
                }))

        sagemaker_notebook_role = aws_iam.Role(
            self,
            'SageMakerNotebookRoleForRDS',
            role_name='AWSSageMakerNotebookRoleForRDS',
            assumed_by=aws_iam.ServicePrincipal('sagemaker.amazonaws.com'),
            inline_policies={
                'AuroraMySQLSecretPolicy': sagemaker_notebook_role_policy_doc
            })

        cf_readonly_access_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name(
            'AWSCloudFormationReadOnlyAccess')
        sagemaker_notebook_role.add_managed_policy(cf_readonly_access_policy)

        #XXX: skip downloading rds-combined-ca-bundle.pem if not use SSL with a MySQL DB instance
        # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.SSLSupport
        rds_wb_lifecycle_content = '''#!/bin/bash
sudo -u ec2-user -i <<'EOF'
echo "export AWS_REGION={AWS_Region}" >> ~/.bashrc
source /home/ec2-user/anaconda3/bin/activate python3
pip install --upgrade ipython-sql
pip install --upgrade PyMySQL 
pip install --upgrade pretty_errors
source /home/ec2-user/anaconda3/bin/deactivate
cd /home/ec2-user/SageMaker
wget -N https://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem
wget -N https://raw.githubusercontent.com/ksmin23/my-aws-cdk-examples/main/rds/sagemaker-aurora_mysql/ipython-sql.ipynb
EOF
'''.format(AWS_Region=core.Aws.REGION)

        rds_wb_lifecycle_config_prop = aws_sagemaker.CfnNotebookInstanceLifecycleConfig.NotebookInstanceLifecycleHookProperty(
            content=core.Fn.base64(rds_wb_lifecycle_content))

        rds_wb_lifecycle_config = aws_sagemaker.CfnNotebookInstanceLifecycleConfig(
            self,
            'MySQLWorkbenchLifeCycleConfig',
            notebook_instance_lifecycle_config_name=
            'MySQLWorkbenchLifeCycleConfig',
            on_start=[rds_wb_lifecycle_config_prop])

        rds_workbench = aws_sagemaker.CfnNotebookInstance(
            self,
            'AuroraMySQLWorkbench',
            instance_type='ml.t3.xlarge',
            role_arn=sagemaker_notebook_role.role_arn,
            lifecycle_config_name=rds_wb_lifecycle_config.
            notebook_instance_lifecycle_config_name,
            notebook_instance_name='AuroraMySQLWorkbench',
            root_access='Disabled',
            security_group_ids=[sg_use_mysql.security_group_name],
            subnet_id=vpc.select_subnets(
                subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids[0])

        core.CfnOutput(self,
                       'StackName',
                       value=self.stack_name,
                       export_name='StackName')
        core.CfnOutput(self, 'VpcId', value=vpc.vpc_id, export_name='VpcId')

        core.CfnOutput(self,
                       'DBClusterName',
                       value=db_cluster.cluster_identifier,
                       export_name='DBClusterName')
        core.CfnOutput(self,
                       'DBCluster',
                       value=db_cluster.cluster_endpoint.socket_address,
                       export_name='DBCluster')
        #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_secretsmanager/README.html
        # secret_arn="arn:aws:secretsmanager:<region>:<account-id-number>:secret:<secret-name>-<random-6-characters>",
        core.CfnOutput(self,
                       'DBSecret',
                       value=db_cluster.secret.secret_name,
                       export_name='DBSecret')

        core.CfnOutput(self,
                       'SageMakerRole',
                       value=sagemaker_notebook_role.role_name,
                       export_name='SageMakerRole')
        core.CfnOutput(self,
                       'SageMakerNotebookInstance',
                       value=rds_workbench.notebook_instance_name,
                       export_name='SageMakerNotebookInstance')
        core.CfnOutput(self,
                       'SageMakerNotebookInstanceLifecycleConfig',
                       value=rds_workbench.lifecycle_config_name,
                       export_name='SageMakerNotebookInstanceLifecycleConfig')
Beispiel #16
0
    def __init__(self, scope: core.Construct, vpc: ec2.Vpc, **kwargs):
        self.deploy_env = active_environment
        self.custom_vpc = vpc
        super().__init__(scope,
                         id=f"{self.deploy_env.value}-rds-stack",
                         **kwargs)

        # Security Group
        self.sg_ecommerce_rds = ec2.SecurityGroup(
            self,
            id=f"sg-rds-ecommerce-{self.deploy_env.value}",
            vpc=self.custom_vpc,
            allow_all_outbound=True,
            security_group_name=f"rds-ecommerce-{self.deploy_env.value}-sg",
        )

        # Security Group Ingress Rules (Public)
        self.sg_ecommerce_rds.add_ingress_rule(peer=ec2.Peer.ipv4("0.0.0.0/0"),
                                               connection=ec2.Port.tcp(5432))

        for subnet in self.custom_vpc.private_subnets:
            self.sg_ecommerce_rds.add_ingress_rule(
                peer=ec2.Peer.ipv4(subnet.ipv4_cidr_block),
                connection=ec2.Port.tcp(5432),
            )

        # Parameter Group - parameters to read rds with dms
        self.ecommerce_rds_parameter_group = rds.ParameterGroup(
            self,
            id=f"ecommerce-{self.deploy_env.value}-rds-parameter-group",
            description="Parameter group to allow CDC from RDS using DMS.",
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_12_4),
            parameters={
                "rds.logical_replication": "1",
                "wal_sender_timeout": "0"
            },
        )

        # Define Credentials
        # Definitely Not best practice, as we should use secrets manager
        # But we want to avoid extra costs in this demonstration
        self.rds_credentials = rds.Credentials.from_password(
            username=db_username,
            password=core.SecretValue.plain_text(db_password))

        # Postgres DataBase Instance
        self.ecommerce_rds = rds.DatabaseInstance(
            self,
            id=f"rds-ecommerce-{self.deploy_env.value}",
            database_name=db_name,
            engine=rds.DatabaseInstanceEngine.postgres(
                version=rds.PostgresEngineVersion.VER_12_4),
            credentials=self.rds_credentials,
            instance_type=ec2.InstanceType("t3.micro"),
            vpc=self.custom_vpc,
            instance_identifier=f"rds-{self.deploy_env.value}-ecommerce-db",
            port=5432,
            vpc_placement=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC),
            subnet_group=rds.SubnetGroup(
                self,
                f"rds-{self.deploy_env.value}-subnet",
                description="place RDS on public subnet",
                vpc=self.custom_vpc,
                vpc_subnets=ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.PUBLIC),
            ),
            parameter_group=self.ecommerce_rds_parameter_group,
            security_groups=[self.sg_ecommerce_rds],
            removal_policy=core.RemovalPolicy.DESTROY,
            **kwargs,
        )

        self._rds_host = self.ecommerce_rds.db_instance_endpoint_address

        self._rds_port = self.ecommerce_rds.db_instance_endpoint_port

        @property
        def rds_endpoint_address(self):
            return self._rds_host

        @property
        def rds_endpoint_port(self):
            return self._rds_port
Beispiel #17
0
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # get acct id for policies
        # acct_id=env['account']

        # creates a new vpc, subnets, 2 nat gateways, etc
        vpc = ec2.Vpc(self, "VPC")

        # mocking vpc from my environment
        #vpc = ec2.Vpc.from_lookup(self, "nonDefaultVpc", vpc_id="vpc-9931a0fc")

        self._rds_subnet_group = rds.SubnetGroup(
            self,
            'RdsSubnetGroup',
            description="aaa",
            subnet_group_name='aurora-mysql',
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            vpc=vpc)

        # create the RDS cluster
        self._rds_cluster = rds.DatabaseCluster(
            self,
            "RDS Cluster",
            cluster_identifier="rds-test",
            engine=rds.DatabaseClusterEngine.AURORA_MYSQL,
            instance_props=rds.InstanceProps(
                vpc=vpc,
                instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2,
                                                  ec2.InstanceSize.SMALL),
            ),
            port=3306,
            default_database_name="test",
            subnet_group=self._rds_subnet_group)

        # enable autoscaling for rds
        # 3 servers maximum
        # scale on 1% cpu for testing, 50% normally

        # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_applicationautoscaling/ScalableTarget.html
        self._scaling_target = autoscale.ScalableTarget(
            self,
            "Scaling Target",
            max_capacity=3,
            min_capacity=1,
            resource_id='cluster:' + self._rds_cluster.cluster_identifier,
            scalable_dimension='rds:cluster:ReadReplicaCount',
            service_namespace=autoscale.ServiceNamespace.RDS)

        self._scale_policy = autoscale.TargetTrackingScalingPolicy(
            self,
            "Tracking Scaling Policy",
            policy_name='thisisscalingpolicyname',
            target_value=1,
            predefined_metric=autoscale.PredefinedMetric.
            RDS_READER_AVERAGE_CPU_UTILIZATION,
            scaling_target=self._scaling_target,
            scale_in_cooldown=core.Duration.minutes(5),
            scale_out_cooldown=core.Duration.minutes(5),
        )
Beispiel #18
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        vpc_name = self.node.try_get_context("vpc_name")
        vpc = aws_ec2.Vpc.from_lookup(self,
                                      "ExistingVPC",
                                      is_default=True,
                                      vpc_name=vpc_name)

        sg_use_mysql = aws_ec2.SecurityGroup(
            self,
            'MySQLClientSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for mysql client',
            security_group_name='use-default-mysql')
        core.Tags.of(sg_use_mysql).add('Name', 'use-default-mysql')

        sg_mysql_server = aws_ec2.SecurityGroup(
            self,
            'MySQLServerSG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for mysql',
            security_group_name='default-mysql-server')
        sg_mysql_server.add_ingress_rule(peer=sg_use_mysql,
                                         connection=aws_ec2.Port.tcp(3306),
                                         description='use-default-mysql')
        core.Tags.of(sg_mysql_server).add('Name', 'mysql-server')

        rds_subnet_group = aws_rds.SubnetGroup(
            self,
            'RdsSubnetGroup',
            description='subnet group for mysql',
            subnet_group_name='aurora-mysql',
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PRIVATE),
            vpc=vpc)

        rds_engine = aws_rds.DatabaseClusterEngine.aurora_mysql(
            version=aws_rds.AuroraMysqlEngineVersion.VER_2_08_1)

        rds_cluster_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraMySQLClusterParamGroup',
            engine=rds_engine,
            description='Custom cluster parameter group for aurora-mysql5.7',
            parameters={
                'innodb_flush_log_at_trx_commit': '2',
                'slow_query_log': '1',
                'tx_isolation': 'READ-COMMITTED',
                'wait_timeout': '300',
                'character-set-client-handshake': '0',
                'character_set_server': 'utf8mb4',
                'collation_server': 'utf8mb4_unicode_ci',
                'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
            })

        rds_db_param_group = aws_rds.ParameterGroup(
            self,
            'AuroraMySQLDBParamGroup',
            engine=rds_engine,
            description='Custom parameter group for aurora-mysql5.7',
            parameters={
                'slow_query_log': '1',
                'tx_isolation': 'READ-COMMITTED',
                'wait_timeout': '300',
                'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci'
            })

        db_cluster_name = self.node.try_get_context('db_cluster_name')
        #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name
        #XXX: therefore, first create Secret and then use it to create database
        db_secret_name = self.node.try_get_context('db_secret_name')
        #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name}
        db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format(
            region=core.Aws.REGION,
            account=core.Aws.ACCOUNT_ID,
            resource_name=db_secret_name)
        db_secret = aws_secretsmanager.Secret.from_secret_arn(
            self, 'DBSecretFromArn', db_secret_arn)
        rds_credentials = aws_rds.Credentials.from_secret(db_secret)

        db_cluster = aws_rds.DatabaseCluster(
            self,
            'Database',
            engine=rds_engine,
            credentials=rds_credentials,
            instance_props={
                'instance_type':
                aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3,
                                        aws_ec2.InstanceSize.MEDIUM),
                'parameter_group':
                rds_db_param_group,
                'vpc_subnets': {
                    'subnet_type': aws_ec2.SubnetType.PRIVATE
                },
                'vpc':
                vpc,
                'auto_minor_version_upgrade':
                False,
                'security_groups': [sg_mysql_server]
            },
            instances=2,
            parameter_group=rds_cluster_param_group,
            cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS,
            cluster_identifier=db_cluster_name,
            subnet_group=rds_subnet_group,
            backup=aws_rds.BackupProps(retention=core.Duration.days(3),
                                       preferred_window="03:00-04:00"))

        sg_mysql_public_proxy = aws_ec2.SecurityGroup(
            self,
            'MySQLPublicProxySG',
            vpc=vpc,
            allow_all_outbound=True,
            description='security group for mysql public proxy',
            security_group_name='default-mysql-public-proxy')
        sg_mysql_public_proxy.add_ingress_rule(
            peer=aws_ec2.Peer.any_ipv4(),
            connection=aws_ec2.Port.tcp(3306),
            description='mysql public proxy')
        core.Tags.of(sg_mysql_public_proxy).add('Name', 'mysql-public-proxy')

        #XXX: Datbase Proxy use only Secret Arn of target database or database cluster
        #XXX: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbproxy-authformat.html
        #XXX: If new Secret for database user is created, it is necessary to update Resource of Proxy IAM Role to access new Secret.
        #XXX: Otherwise, new database user can not connect to database by RDS Proxy.
        db_proxy = aws_rds.DatabaseProxy(
            self,
            'DBProxy',
            proxy_target=aws_rds.ProxyTarget.from_cluster(db_cluster),
            secrets=[db_secret],
            vpc=vpc,
            db_proxy_name='{}-proxy'.format(db_cluster_name),
            idle_client_timeout=core.Duration.minutes(10),
            max_connections_percent=90,
            max_idle_connections_percent=10,
            security_groups=[sg_use_mysql, sg_mysql_public_proxy],
            vpc_subnets=aws_ec2.SubnetSelection(
                subnet_type=aws_ec2.SubnetType.PUBLIC))
Beispiel #19
0
    def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # create s3 bucket
        s3_Bucket = s3.Bucket(self,
                              "AWS-Cookbook-Recipe-407",
                              removal_policy=RemovalPolicy.DESTROY)

        aws_s3_deployment.BucketDeployment(
            self,
            'S3Deployment',
            destination_bucket=s3_Bucket,
            sources=[aws_s3_deployment.Source.asset("./s3_content")],
            retain_on_delete=False)

        isolated_subnets = ec2.SubnetConfiguration(
            name="ISOLATED",
            subnet_type=ec2.SubnetType.PRIVATE_ISOLATED,
            cidr_mask=24)

        # create VPC
        vpc = ec2.Vpc(self,
                      'AWS-Cookbook-VPC',
                      cidr='10.10.0.0/23',
                      subnet_configuration=[isolated_subnets])

        vpc.add_interface_endpoint(
            'VPCSSMInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService(
                'ssm'
            ),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
        )

        vpc.add_interface_endpoint(
            'VPCEC2MessagesInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService(
                'ec2messages'
            ),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
        )

        vpc.add_interface_endpoint(
            'VPCSSMMessagedInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService(
                'ssmmessages'
            ),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
        )

        vpc.add_interface_endpoint(
            'VPCSecretsManagerInterfaceEndpoint',
            service=ec2.InterfaceVpcEndpointAwsService(
                'secretsmanager'
            ),  # Find names with - aws ec2 describe-vpc-endpoint-services | jq '.ServiceNames'
            private_dns_enabled=True,
            subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED),
        )

        vpc.add_gateway_endpoint(
            's3GateWayEndPoint',
            service=ec2.GatewayVpcEndpointAwsService('s3'),
            subnets=[
                ec2.SubnetSelection(
                    subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)
            ],
        )

        ami = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        iam_role = iam.Role(
            self,
            "InstanceSSM",
            assumed_by=iam.ServicePrincipal("ec2.amazonaws.com"))

        iam_role.add_managed_policy(
            iam.ManagedPolicy.from_aws_managed_policy_name(
                "service-role/AmazonEC2RoleforSSM"))

        instance = ec2.Instance(
            self,
            "Instance",
            instance_type=ec2.InstanceType("t3.nano"),
            machine_image=ami,
            role=iam_role,
            vpc=vpc,
        )

        subnet_group = rds.SubnetGroup(
            self,
            'rds_subnet_group',
            description='VPC Subnet Group for RDS',
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                one_per_az=False, subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        source_rds_security_group = ec2.SecurityGroup(
            self,
            'source_rds_security_group',
            description='Security Group for the Source RDS Instance',
            allow_all_outbound=True,
            vpc=vpc)

        source_db_name = 'AWSCookbookRecipe407Source'

        source_rds_instance = rds.DatabaseInstance(
            self,
            'source_rds_instance',
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_33),
            instance_type=ec2.InstanceType("m5.large"),
            vpc=vpc,
            multi_az=False,
            database_name=source_db_name,
            instance_identifier='awscookbook407SourceDb',
            delete_automated_backups=True,
            deletion_protection=False,
            # iam_authentication=
            removal_policy=RemovalPolicy.DESTROY,
            allocated_storage=8,
            subnet_group=subnet_group,
            security_groups=[source_rds_security_group])

        source_rds_instance.connections.allow_from(instance.connections,
                                                   ec2.Port.tcp(3306),
                                                   "Ingress")

        target_rds_security_group = ec2.SecurityGroup(
            self,
            'target_rds_security_group',
            description='Security Group for the Target RDS Instance',
            allow_all_outbound=True,
            vpc=vpc)

        target_db_name = 'AWSCookbookRecipe407Target'

        target_rds_instance = rds.DatabaseInstance(
            self,
            'target_rds_instance',
            engine=rds.DatabaseInstanceEngine.mysql(
                version=rds.MysqlEngineVersion.VER_5_7_26),
            instance_type=ec2.InstanceType("m5.large"),
            vpc=vpc,
            multi_az=False,
            database_name=target_db_name,
            instance_identifier='awscookbook407TargetDb',
            delete_automated_backups=True,
            deletion_protection=False,
            # iam_authentication=
            removal_policy=RemovalPolicy.DESTROY,
            allocated_storage=8,
            subnet_group=subnet_group,
            security_groups=[target_rds_security_group])

        target_rds_instance.connections.allow_from(instance.connections,
                                                   ec2.Port.tcp(3306),
                                                   "Ingress")

        # mkdir -p lambda-layers/sqlparse/python
        # cd layers/sqlparse/python
        # pip install sqlparse --target="."
        # cd ../../../

        # create Lambda Layer
        sqlparse = aws_lambda.LayerVersion(
            self,
            "sqlparse",
            code=aws_lambda.AssetCode('lambda-layers/sqlparse'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="sqlparse",
            license=
            "https://github.com/andialbrecht/sqlparse/blob/master/LICENSE")

        pymysql = aws_lambda.LayerVersion(
            self,
            "pymysql",
            code=aws_lambda.AssetCode('lambda-layers/pymysql'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="pymysql",
            license="MIT")

        smartopen = aws_lambda.LayerVersion(
            self,
            "smartopen",
            code=aws_lambda.AssetCode('lambda-layers/smart_open'),
            compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_8],
            description="smartopen",
            license="MIT")

        lambda_function = aws_lambda.Function(
            self,
            'LambdaRDS',
            code=aws_lambda.AssetCode("./mysql-lambda/"),
            handler="lambda_function.lambda_handler",
            environment={
                "DB_SECRET_ARN": source_rds_instance.secret.secret_arn,
                "S3_BUCKET": s3_Bucket.bucket_name
            },
            layers=[sqlparse, pymysql, smartopen],
            memory_size=1024,
            runtime=aws_lambda.Runtime.PYTHON_3_8,
            timeout=Duration.seconds(600),
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE_ISOLATED))

        source_rds_instance.secret.grant_read(lambda_function)

        source_rds_instance.connections.allow_from(lambda_function.connections,
                                                   ec2.Port.tcp(3306),
                                                   "Ingress")

        s3_Bucket.grant_read(lambda_function)

        # outputs

        CfnOutput(self, 'VpcId', value=vpc.vpc_id)

        CfnOutput(self, 'InstanceId', value=instance.instance_id)

        CfnOutput(self,
                  'SourceRdsDatabaseId',
                  value=source_rds_instance.instance_identifier)

        CfnOutput(self,
                  'SourceRdsSecurityGroup',
                  value=source_rds_security_group.security_group_id)

        CfnOutput(self, 'SourceDbName', value=source_db_name)

        CfnOutput(self,
                  'SourceRdsSecretArn',
                  value=source_rds_instance.secret.secret_full_arn)

        CfnOutput(self,
                  'SourceRdsEndpoint',
                  value=source_rds_instance.db_instance_endpoint_address)

        CfnOutput(self,
                  'TargetRdsDatabaseId',
                  value=target_rds_instance.instance_identifier)

        CfnOutput(self,
                  'TargetRdsSecurityGroup',
                  value=target_rds_security_group.security_group_id)

        CfnOutput(self, 'TargetDbName', value=target_db_name)

        CfnOutput(self,
                  'TargetRdsSecretArn',
                  value=target_rds_instance.secret.secret_full_arn)

        CfnOutput(self,
                  'TargetRdsEndpoint',
                  value=target_rds_instance.db_instance_endpoint_address)

        isolated_subnets = vpc.select_subnets(
            subnet_type=ec2.SubnetType.PRIVATE_ISOLATED)

        CfnOutput(self,
                  'IsolatedSubnets',
                  value=', '.join(map(str, isolated_subnets.subnet_ids)))

        CfnOutput(self, 'LambdaArn', value=lambda_function.function_arn)

        CfnOutput(self,
                  'RdsSourceSecretName',
                  value=source_rds_instance.secret.secret_name)

        CfnOutput(self,
                  'RdsTargetSecretName',
                  value=target_rds_instance.secret.secret_name)