def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, lambdasg: ec2.SecurityGroup, bastionsg: ec2.SecurityGroup, kmskey, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") json_template = {'username': '******'} db_creds = sm.Secret( self, 'db-secret', secret_name=env_name + '/rds-secret', generate_secret_string=sm.SecretStringGenerator( include_space=False, password_length=12, generate_string_key='password', exclude_punctuation=True, secret_string_template=json.dumps(json_template))) db_mysql = rds.DatabaseCluster( self, 'mysql', default_database_name=prj_name + env_name, engine=rds.DatabaseClusterEngine.AURORA_MYSQL, engine_version="5.7.12", master_user=rds.Login( username='******', password=db_creds.secret_value_from_json('password')), instance_props=rds.InstanceProps( vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED), instance_type=ec2.InstanceType( instance_type_identifier="t3.small")), instances=1, parameter_group=rds.ClusterParameterGroup. from_parameter_group_name( self, 'pg-dev', parameter_group_name='default.aurora-mysql5.7'), kms_key=kmskey, removal_policy=core.RemovalPolicy.DESTROY) db_mysql.connections.allow_default_port_from( lambdasg, "Access from Lambda functions") db_mysql.connections.allow_default_port_from( bastionsg, "Allow from bastion host") #SSM Parameter ssm.StringParameter(self, 'db-host', parameter_name='/' + env_name + '/db-host', string_value=db_mysql.cluster_endpoint.hostname) ssm.StringParameter(self, 'db-name', parameter_name='/' + env_name + '/db-name', string_value=prj_name + env_name)
def __init__(self, scope: core.Construct, id: str, user_pool_arn: str) -> None: super().__init__(scope, id) private_sn = aws_ec2.SubnetConfiguration( name=f"{get_stack_name()}private_sn", subnet_type=aws_ec2.SubnetType.PRIVATE, cidr_mask=24) public_sn = aws_ec2.SubnetConfiguration( name=f"{get_stack_name()}public_sn", subnet_type=aws_ec2.SubnetType.PUBLIC, cidr_mask=24) self.vpc: IVpc = aws_ec2.Vpc( self, id='vpc', cidr='10.1.0.0/16', subnet_configuration=[private_sn, public_sn], max_azs=2) db_access_sg: aws_ec2.ISecurityGroup = aws_ec2.SecurityGroup( self, id=f"{get_stack_name()}fromOffice", vpc=self.vpc, security_group_name='accessFromOffice', allow_all_outbound=True) database_port: int = 3306 db_access_sg.connections.allow_from( aws_ec2.Peer.ipv4(f'{get_my_public_ip()}/32'), aws_ec2.Port.tcp(database_port), 'access from office only') my_sql = aws_rds.DatabaseClusterEngine.aurora_mysql( version=aws_rds.AuroraMysqlEngineVersion.VER_5_7_12) cluster = aws_rds.DatabaseCluster( self, id=f"{get_stack_name()}db_cluster", engine=my_sql, credentials=aws_rds.Credentials.from_username( 'admin', secret_name=read_git_branch()), instances=1, port=database_port, instance_props={ "instance_type": aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE2, aws_ec2.InstanceSize.SMALL), "vpc_subnets": { "subnet_type": aws_ec2.SubnetType.PUBLIC }, "vpc": self.vpc, "publicly_accessible": True, "security_groups": [db_access_sg], })
def create_rds(self, vpc): # Create DB rds_cluster = rds.DatabaseCluster( self, 'Database', engine=rds.DatabaseClusterEngine.AURORA, master_user=rds.Login(username='******'), instance_props=rds.InstanceProps( instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE), vpc=vpc)) return rds_cluster
def __init__(self, scope: core.Construct, id: str, bmt_vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) db_subnet_group = rds.SubnetGroup( self, 'Aurora', description='aurora subnet group', vpc=bmt_vpc, removal_policy=core.RemovalPolicy.DESTROY, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED)) db_security_group = ec2.SecurityGroup(self, 'aurora-sg', vpc=bmt_vpc) db_security_group.add_ingress_rule( peer=ec2.Peer.ipv4('10.100.0.0/16'), connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="to allow from the vpc internal", from_port=3306, to_port=3306)) param_group = rds.ParameterGroup( self, 'bmt-aurora-param', engine=rds.DatabaseClusterEngine.AURORA_MYSQL) param_group.add_parameter("performance_schema", "1") rds.DatabaseCluster( self, 'bmt-aurora-cluster', engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_07_1), instance_props=rds.InstanceProps( vpc=bmt_vpc, instance_type=ec2.InstanceType.of( instance_class=ec2.InstanceClass.BURSTABLE3, instance_size=ec2.InstanceSize.MEDIUM), security_groups=[db_security_group]), instances=1, subnet_group=db_subnet_group, parameter_group=param_group, removal_policy=core.RemovalPolicy.DESTROY)
def create_rds(self, vpc, eks): rds_cluster = rds.DatabaseCluster( self, "Database", engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_08_1), instance_props={ "instance_type": ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), "vpc_subnets": { "subnet_type": ec2.SubnetType.PRIVATE }, "vpc": vpc }) eks.connections.allow_to(rds_cluster, ec2.Port.tcp(3306)) return rds_cluster
def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) master_user = _rds.Login( username="******", password=core.SecretValue.plain_text("password")) rds = _rds.DatabaseCluster( self, "rds", engine=_rds.DatabaseClusterEngine.aurora_mysql( version=_rds.AuroraMysqlEngineVersion.VER_5_7_12), master_user=master_user, instance_props=_rds.InstanceProps( vpc=vpc, instance_type=_ec2.InstanceType("t3.medium"), security_groups=[vpc.sg], vpc_subnets=_ec2.SubnetSelection( subnets=vpc.isolated_subnets)), instances=2)
def rds_cluster(self, vpc, ec2sg, rds_type, rds_param=None): postgres = rds.DatabaseCluster( self, "adl-" + rds_type, default_database_name="adldb", engine=getattr(rds.DatabaseClusterEngine, rds_type), instance_props=rds.InstanceProps( vpc=vpc['vpc'], vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE), instance_type=ec2.InstanceType( instance_type_identifier="t3.medium")), master_user=rds.Login(username="******"), backup=rds.BackupProps(retention=core.Duration.days(7), preferred_window='01:00-02:00'), parameter_group=rds_param, preferred_maintenance_window="Sun:23:45-Mon:00:15", removal_policy=core.RemovalPolicy.DESTROY, storage_encrypted=True) postgres.connections.allow_from(ec2sg['ec2-sg'], ec2.Port.all_tcp())
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, asg_sg, stage={}, **kwargs) -> None: super().__init__(scope, id, **kwargs) prefix_name = f'{stage["vpc_prefix"]}-{stage["stage_name"]}-{self.node.try_get_context("customer")}' self._rds_subnet_group = rds.SubnetGroup( self, f'{prefix_name}-rds-subnet-gruop', description="aaa", subnet_group_name=f'{prefix_name}-aurora-mysql', vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED), vpc=vpc) self._rds_cluster = rds.DatabaseCluster( self, f'{prefix_name}-rds-cluster', cluster_identifier=f'{prefix_name}-rds-cluster', credentials=rds.Credentials.from_generated_secret("admin"), engine=rds.DatabaseClusterEngine.AURORA_MYSQL, instance_props=rds.InstanceProps( vpc=vpc, instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED)), port=3306, default_database_name=self.node.try_get_context("customer"), subnet_group=self._rds_subnet_group) for sg in asg_sg: self._rds_cluster.connections.allow_default_port_from( sg, "Allow EC2 ASG access to RDS MySQL")
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, lambdasg: ec2.SecurityGroup, bastionsg: ec2.SecurityGroup, kmskey: kms.Key, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context('project_name') env_name = self.node.try_get_context('env') db_mysql = rds.DatabaseCluster( self, 'mysql', default_database_name=prj_name + env_name, engine=rds.DatabaseClusterEngine.AURORA_MYSQL, instance_props=rds.InstanceProps( vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED), instance_type=ec2.InstanceType( instance_type_identifier="t3.small")), instances=1, storage_encrypted=True, storage_encryption_key=kmskey, removal_policy=core.RemovalPolicy.DESTROY) db_mysql.connections.allow_default_port_from( lambdasg, 'Allow from Lambda function') db_mysql.connections.allow_default_port_from( bastionsg, "Allow from bastion host") ssm.StringParameter(self, 'db-host', parameter_name=f"/{env_name}/db-host", string_value=db_mysql.cluster_endpoint.hostname) ssm.StringParameter(self, 'db-secret-name', parameter_name=f"/{env_name}/db-secret-name", string_value=db_mysql.secret.secret_name)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here vpc_name = self.node.try_get_context("vpc_name") vpc = aws_ec2.Vpc.from_lookup(self, "ExistingVPC", is_default=True, vpc_name=vpc_name) sg_use_mysql = aws_ec2.SecurityGroup( self, 'MySQLClientSG', vpc=vpc, allow_all_outbound=True, description='security group for mysql client', security_group_name='use-mysql-sg') core.Tags.of(sg_use_mysql).add('Name', 'mysql-client-sg') sg_mysql_server = aws_ec2.SecurityGroup( self, 'MySQLServerSG', vpc=vpc, allow_all_outbound=True, description='security group for mysql', security_group_name='mysql-server-sg') sg_mysql_server.add_ingress_rule(peer=sg_use_mysql, connection=aws_ec2.Port.tcp(3306), description='use-mysql-sg') core.Tags.of(sg_mysql_server).add('Name', 'mysql-server-sg') rds_subnet_group = aws_rds.SubnetGroup( self, 'RdsSubnetGroup', description='subnet group for mysql', subnet_group_name='aurora-mysql', vpc_subnets=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.PRIVATE), vpc=vpc) rds_engine = aws_rds.DatabaseClusterEngine.aurora_mysql( version=aws_rds.AuroraMysqlEngineVersion.VER_2_08_1) rds_cluster_param_group = aws_rds.ParameterGroup( self, 'AuroraMySQLClusterParamGroup', engine=rds_engine, description='Custom cluster parameter group for aurora-mysql5.7', parameters={ 'innodb_flush_log_at_trx_commit': '2', 'slow_query_log': '1', 'tx_isolation': 'READ-COMMITTED', 'wait_timeout': '300', 'character-set-client-handshake': '0', 'character_set_server': 'utf8mb4', 'collation_server': 'utf8mb4_unicode_ci', 'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci' }) rds_db_param_group = aws_rds.ParameterGroup( self, 'AuroraMySQLDBParamGroup', engine=rds_engine, description='Custom parameter group for aurora-mysql5.7', parameters={ 'slow_query_log': '1', 'tx_isolation': 'READ-COMMITTED', 'wait_timeout': '300', 'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci' }) db_cluster_name = self.node.try_get_context('db_cluster_name') # #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name # #XXX: therefore, first create Secret and then use it to create database # db_secret_name = self.node.try_get_context('db_secret_name') # #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name} # db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format( # region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID, resource_name=db_secret_name) # db_secret = aws_secretsmanager.Secret.from_secret_arn(self, 'DBSecretFromArn', db_secret_arn) # rds_credentials = aws_rds.Credentials.from_secret(db_secret) rds_credentials = aws_rds.Credentials.from_generated_secret("admin") db_cluster = aws_rds.DatabaseCluster( self, 'Database', engine=rds_engine, credentials=rds_credentials, instance_props={ 'instance_type': aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM), 'parameter_group': rds_db_param_group, 'vpc_subnets': { 'subnet_type': aws_ec2.SubnetType.PRIVATE }, 'vpc': vpc, 'auto_minor_version_upgrade': False, 'security_groups': [sg_mysql_server] }, instances=2, parameter_group=rds_cluster_param_group, cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS, cluster_identifier=db_cluster_name, subnet_group=rds_subnet_group, backup=aws_rds.BackupProps(retention=core.Duration.days(3), preferred_window="03:00-04:00")) sagemaker_notebook_role_policy_doc = aws_iam.PolicyDocument() sagemaker_notebook_role_policy_doc.add_statements( aws_iam.PolicyStatement( **{ "effect": aws_iam.Effect.ALLOW, "resources": [db_cluster.secret.secret_full_arn], "actions": ["secretsmanager:GetSecretValue"] })) sagemaker_notebook_role = aws_iam.Role( self, 'SageMakerNotebookRoleForRDS', role_name='AWSSageMakerNotebookRoleForRDS', assumed_by=aws_iam.ServicePrincipal('sagemaker.amazonaws.com'), inline_policies={ 'AuroraMySQLSecretPolicy': sagemaker_notebook_role_policy_doc }) cf_readonly_access_policy = aws_iam.ManagedPolicy.from_aws_managed_policy_name( 'AWSCloudFormationReadOnlyAccess') sagemaker_notebook_role.add_managed_policy(cf_readonly_access_policy) #XXX: skip downloading rds-combined-ca-bundle.pem if not use SSL with a MySQL DB instance # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_MySQL.html#MySQL.Concepts.SSLSupport rds_wb_lifecycle_content = '''#!/bin/bash sudo -u ec2-user -i <<'EOF' echo "export AWS_REGION={AWS_Region}" >> ~/.bashrc source /home/ec2-user/anaconda3/bin/activate python3 pip install --upgrade ipython-sql pip install --upgrade PyMySQL pip install --upgrade pretty_errors source /home/ec2-user/anaconda3/bin/deactivate cd /home/ec2-user/SageMaker wget -N https://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem wget -N https://raw.githubusercontent.com/ksmin23/my-aws-cdk-examples/main/rds/sagemaker-aurora_mysql/ipython-sql.ipynb EOF '''.format(AWS_Region=core.Aws.REGION) rds_wb_lifecycle_config_prop = aws_sagemaker.CfnNotebookInstanceLifecycleConfig.NotebookInstanceLifecycleHookProperty( content=core.Fn.base64(rds_wb_lifecycle_content)) rds_wb_lifecycle_config = aws_sagemaker.CfnNotebookInstanceLifecycleConfig( self, 'MySQLWorkbenchLifeCycleConfig', notebook_instance_lifecycle_config_name= 'MySQLWorkbenchLifeCycleConfig', on_start=[rds_wb_lifecycle_config_prop]) rds_workbench = aws_sagemaker.CfnNotebookInstance( self, 'AuroraMySQLWorkbench', instance_type='ml.t3.xlarge', role_arn=sagemaker_notebook_role.role_arn, lifecycle_config_name=rds_wb_lifecycle_config. notebook_instance_lifecycle_config_name, notebook_instance_name='AuroraMySQLWorkbench', root_access='Disabled', security_group_ids=[sg_use_mysql.security_group_name], subnet_id=vpc.select_subnets( subnet_type=aws_ec2.SubnetType.PRIVATE).subnet_ids[0]) core.CfnOutput(self, 'StackName', value=self.stack_name, export_name='StackName') core.CfnOutput(self, 'VpcId', value=vpc.vpc_id, export_name='VpcId') core.CfnOutput(self, 'DBClusterName', value=db_cluster.cluster_identifier, export_name='DBClusterName') core.CfnOutput(self, 'DBCluster', value=db_cluster.cluster_endpoint.socket_address, export_name='DBCluster') #XXX: https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_secretsmanager/README.html # secret_arn="arn:aws:secretsmanager:<region>:<account-id-number>:secret:<secret-name>-<random-6-characters>", core.CfnOutput(self, 'DBSecret', value=db_cluster.secret.secret_name, export_name='DBSecret') core.CfnOutput(self, 'SageMakerRole', value=sagemaker_notebook_role.role_name, export_name='SageMakerRole') core.CfnOutput(self, 'SageMakerNotebookInstance', value=rds_workbench.notebook_instance_name, export_name='SageMakerNotebookInstance') core.CfnOutput(self, 'SageMakerNotebookInstanceLifecycleConfig', value=rds_workbench.lifecycle_config_name, export_name='SageMakerNotebookInstanceLifecycleConfig')
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html vpc = ec2.Vpc(self, "vpc", cidr=props['vpc_CIDR'], max_azs=3, subnet_configuration=[ { 'cidrMask': 28, 'name': 'public', 'subnetType': ec2.SubnetType.PUBLIC }, { 'cidrMask': 28, 'name': 'private', 'subnetType': ec2.SubnetType.PRIVATE }, { 'cidrMask': 28, 'name': 'db', 'subnetType': ec2.SubnetType.ISOLATED } ] ) rds_subnetGroup = rds.SubnetGroup(self, "rds_subnetGroup", description = f"Group for {props['environment']}-{props['application']}-{props['unit']} DB", vpc = vpc, vpc_subnets = ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED) ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html ##TODO:ADD Aurora Serverless Option rds_instance = rds.DatabaseCluster(self,'wordpress-db', engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_07_2 ), instances=1, instance_props=rds.InstanceProps( vpc=vpc, enable_performance_insights=props['rds_enable_performance_insights'], instance_type=ec2.InstanceType(instance_type_identifier=props['rds_instance_type']) ), subnet_group=rds_subnetGroup, storage_encrypted=props['rds_storage_encrypted'], backup=rds.BackupProps( retention=core.Duration.days(props['rds_automated_backup_retention_days']) ) ) EcsToRdsSeurityGroup= ec2.SecurityGroup(self, "EcsToRdsSeurityGroup", vpc = vpc, description = "Allow WordPress containers to talk to RDS" ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html db_cred_generator = _lambda.Function( self, 'db_creds_generator', runtime=_lambda.Runtime.PYTHON_3_8, handler='db_creds_generator.handler', code=_lambda.Code.asset('lambda'), vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED), #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets , environment={ 'SECRET_NAME': rds_instance.secret.secret_name, } ) #Set Permissions and Sec Groups rds_instance.connections.allow_from(EcsToRdsSeurityGroup, ec2.Port.tcp(3306)) #Open hole to RDS in RDS SG #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html file_system = efs.FileSystem(self, "MyEfsFileSystem", vpc = vpc, encrypted=True, # file system is not encrypted by default lifecycle_policy = props['efs_lifecycle_policy'], performance_mode = efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode = efs.ThroughputMode.BURSTING, removal_policy = core.RemovalPolicy(props['efs_removal_policy']), enable_automatic_backups = props['efs_automatic_backups'] ) if props['deploy_bastion_host']: #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc = vpc ) rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306)) self.output_props = props.copy() self.output_props["vpc"] = vpc self.output_props["rds_instance"] = rds_instance self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup self.output_props["file_system"] = file_system
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.current_dir = os.path.dirname(__file__) self.vpc = ec2.Vpc( self, "VPC", cidr="10.0.0.0/21", max_azs=2, subnet_configuration=[ ec2.SubnetConfiguration( cidr_mask=28, name="Database", subnet_type=ec2.SubnetType.ISOLATED, ), ec2.SubnetConfiguration(cidr_mask=28, name="Private", subnet_type=ec2.SubnetType.PRIVATE), ec2.SubnetConfiguration(cidr_mask=28, name="Public", subnet_type=ec2.SubnetType.PUBLIC) ], nat_gateways=3) self.qs_security_group = ec2.SecurityGroup( self, "quicksight-sg", vpc=self.vpc, allow_all_outbound=True, description="QuickSight security group") self.bastion = ec2.BastionHostLinux( self, "BastionHost", vpc=self.vpc, subnet_selection=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PUBLIC)) self.bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(22), "Internet access SSH") self.vpc.add_interface_endpoint( "redshift_endpoint", service=ec2.InterfaceVpcEndpointAwsService("redshift")) self.vpc.add_interface_endpoint( "rds_endpoint", service=ec2.InterfaceVpcEndpointAwsService("rds")) self.redshift_secret = secrets.Secret( self, 'redshift-admin', secret_name='redshift-admin', description= "This secret has generated admin secret password for Redshift cluster", generate_secret_string=secrets.SecretStringGenerator( secret_string_template='{"username": "******"}', generate_string_key='password', password_length=32, exclude_characters='"@\\\/', exclude_punctuation=True)) self.rs_security_group = ec2.SecurityGroup(self, "redshift-sg", vpc=self.vpc, allow_all_outbound=True, description="Redshift SG") self.rs_security_group.add_ingress_rule(self.rs_security_group, ec2.Port.all_tcp(), 'Redshift-basic') self.rs_security_group.add_ingress_rule( # https://docs.aws.amazon.com/quicksight/latest/user/regions.html ec2.Peer.ipv4('52.23.63.224/27'), ec2.Port.tcp(5439), 'QuickSight-IP') self.rs_security_group.add_ingress_rule(self.qs_security_group, ec2.Port.tcp(5439), 'QuickSight-sg') # self.rs_security_group.add_egress_rule( # self.rs_security_group, # ec2.Port.all_tcp(), # 'Allow outbound for QuickSight' # ) self.redshift_cluster = redshift.Cluster( self, "datasource-redshift", master_user=redshift.Login( master_username="******", master_password=self.redshift_secret.secret_value_from_json( 'password')), vpc=self.vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED), security_groups=[self.rs_security_group]) self.rds_secret = secrets.Secret( self, 'rds-admin', secret_name='rds-admin', description= "This secret has generated admin secret password for RDS cluster", generate_secret_string=secrets.SecretStringGenerator( secret_string_template='{"username": "******"}', generate_string_key='password', password_length=32, exclude_characters='"@\\\/', exclude_punctuation=True)) self.rds_cluster = rds.DatabaseCluster( self, "datasource-rds", engine=rds.DatabaseClusterEngine.aurora_postgres( version=rds.AuroraPostgresEngineVersion.VER_11_9), instance_props={ "vpc_subnets": { "subnet_type": ec2.SubnetType.ISOLATED }, "vpc": self.vpc }, credentials=rds.Credentials.from_secret(self.rds_secret)) self.rds_cluster.connections.allow_default_port_from( self.bastion, "EC2 Bastion access Aurora") self.rds_cluster.connections.allow_default_port_from( self.qs_security_group, "QuickSight-sg") self.rds_cluster.connections.allow_default_port_from( # https://docs.aws.amazon.com/quicksight/latest/user/regions.html ec2.Peer.ipv4('52.23.63.224/27'), "QuickSight-IP") self.qs_security_group.add_ingress_rule(self.rs_security_group, ec2.Port.all_tcp(), 'AllTCP') for rds_group in self.rds_cluster.connections.security_groups: self.qs_security_group.add_ingress_rule(rds_group, ec2.Port.all_tcp(), 'AllTCP') # self.qs_security_group.add_egress_rule( # self.rs_security_group, # ec2.Port.all_tcp(), # 'AllTCP' # ) core.CfnOutput(self, "vpcId", value=self.vpc.vpc_id) core.CfnOutput(self, "redshiftUsername", value="admin") core.CfnOutput(self, "redshiftPassword", value="redshift-admin") core.CfnOutput(self, "redshiftClusterId", value=self.redshift_cluster.cluster_name) core.CfnOutput(self, "redshiftHost", value=self.redshift_cluster.cluster_endpoint.hostname) core.CfnOutput(self, "redshiftDB", value="dev") core.CfnOutput(self, "rdsUsername", value="administrator") core.CfnOutput(self, "rdsPassword", value="rds-admin") core.CfnOutput(self, "rdsClusterId", value=self.rds_cluster.cluster_identifier) core.CfnOutput(self, "namespace", value="default") core.CfnOutput(self, "version", value="1") core.CfnOutput(self, "quicksightSecurityGroupId", value=self.qs_security_group.security_group_id)
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html vpc = ec2.Vpc(self, "vpc", cidr=props['vpc_CIDR'], max_azs=3, subnet_configuration=[{ 'cidrMask': 28, 'name': 'public', 'subnetType': ec2.SubnetType.PUBLIC }, { 'cidrMask': 28, 'name': 'private', 'subnetType': ec2.SubnetType.PRIVATE }, { 'cidrMask': 28, 'name': 'db', 'subnetType': ec2.SubnetType.ISOLATED }]) rds_subnetGroup = rds.SubnetGroup( self, "rds_subnetGroup", description= f"Group for {props['environment']}-{props['application']}-{props['unit']} DB", vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html ##TODO:ADD Aurora Serverless Option rds_instance = rds.DatabaseCluster( self, 'wordpress-db', engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_07_2), instances=1, instance_props=rds.InstanceProps( vpc=vpc, enable_performance_insights=props[ 'rds_enable_performance_insights'], instance_type=ec2.InstanceType( instance_type_identifier=props['rds_instance_type'])), subnet_group=rds_subnetGroup, storage_encrypted=props['rds_storage_encrypted'], backup=rds.BackupProps(retention=core.Duration.days( props['rds_automated_backup_retention_days']))) EcsToRdsSeurityGroup = ec2.SecurityGroup( self, "EcsToRdsSeurityGroup", vpc=vpc, description="Allow WordPress containers to talk to RDS") #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html db_cred_generator = _lambda.Function( self, 'db_creds_generator', runtime=_lambda.Runtime.PYTHON_3_8, handler='db_creds_generator.handler', code=_lambda.Code.asset('lambda/db_creds_generator'), vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED ), #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets , environment={ 'SECRET_NAME': rds_instance.secret.secret_name, }) #Set Permissions and Sec Groups rds_instance.connections.allow_from( EcsToRdsSeurityGroup, ec2.Port.tcp(3306)) #Open hole to RDS in RDS SG #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html file_system = efs.FileSystem( self, "MyEfsFileSystem", vpc=vpc, encrypted=True, # file system is not encrypted by default lifecycle_policy=props['efs_lifecycle_policy'], performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, removal_policy=core.RemovalPolicy(props['efs_removal_policy']), enable_automatic_backups=props['efs_automatic_backups']) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster cluster = ecs.Cluster( self, "Cluster", vpc=vpc, container_insights=props['ecs_enable_container_insights']) if props['deploy_bastion_host']: #ToDo: Deploy bastion host with a key file #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc=vpc) rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306)) ####################### ### Developer Tools ### # SFTP into the EFS Shared File System NetToolsSecret = secretsmanager.Secret( self, "NetToolsSecret", generate_secret_string=secretsmanager.SecretStringGenerator( secret_string_template=json.dumps({ "username": '******', "ip": '' }), generate_string_key="password", exclude_characters='/"')) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point AccessPoint = file_system.add_access_point( "access-point", path="/", create_acl=efs.Acl( owner_uid= "100", #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/ owner_gid="101", permissions="0755")) EfsVolume = ecs.Volume( name="efs", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( access_point_id=AccessPoint.access_point_id))) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html NetToolsTask = ecs.FargateTaskDefinition(self, "TaskDefinition", cpu=256, memory_limit_mib=512, volumes=[EfsVolume]) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container NetToolsContainer = NetToolsTask.add_container( "NetTools", image=ecs.ContainerImage.from_registry('netresearch/sftp'), command=['test:test:100:101:efs']) NetToolsContainer.add_port_mappings( ecs.PortMapping(container_port=22, protocol=ecs.Protocol.TCP)) NetToolsContainer.add_mount_points( ecs.MountPoint( container_path= "/home/test/efs", #ToDo build path out with username from secret read_only=False, source_volume=EfsVolume.name, )) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html?highlight=fargateservice#aws_cdk.aws_ecs.FargateService service = ecs.FargateService( self, "Service", cluster=cluster, task_definition=NetToolsTask, platform_version=ecs.FargatePlatformVersion( "VERSION1_4"), #Required for EFS ) #ToDo somehow store container's IP on deploy #Allow traffic to EFS Volume from Net Tools container service.connections.allow_to(file_system, ec2.Port.tcp(2049)) #ToDo allow bastion host into container on port 22 #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html bastion_ip_locator = _lambda.Function( self, 'bastion_ip_locator', function_name= f"{props['environment']}-{props['application']}-{props['unit']}-SFTP-IP", runtime=_lambda.Runtime.PYTHON_3_8, handler='bastion_ip_locator.handler', code=_lambda.Code.asset('lambda/bastion_ip_locator'), environment={ 'CLUSTER_NAME': cluster.cluster_arn, 'SERVICE_NAME': service.service_name }) #Give needed perms to bastion_ip_locator for reading info from ECS bastion_ip_locator.add_to_role_policy( iam.PolicyStatement( actions=["ecs:DescribeTasks"], resources=[ #f"arn:aws:ecs:us-east-1:348757191778:service/{cluster.cluster_name}/{service.service_name}", f"arn:aws:ecs:us-east-1:348757191778:task/{cluster.cluster_name}/*" ])) bastion_ip_locator.add_to_role_policy( iam.PolicyStatement(actions=[ "ecs:ListTasks", ], resources=["*"], conditions={ 'ArnEquals': { 'ecs:cluster': cluster.cluster_arn } })) self.output_props = props.copy() self.output_props["vpc"] = vpc self.output_props["rds_instance"] = rds_instance self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup self.output_props["file_system"] = file_system self.output_props["cluster"] = cluster
def __init__(self, scope: core.Construct, id: str, datalake: DataLakeLayer, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.__datalake = datalake self.security_group = ec2.SecurityGroup( self, 'SecurityGroup', vpc=self.datalake.vpc, allow_all_outbound=True, description='SonarQube Security Group') self.security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.all_traffic(), description='Allow any traffic') self.sonarqube_svr_ecr = ecr.DockerImageAsset( self, 'Repo', directory=os.path.join(root_dir, 'images/sonarqube-server'), repository_name='sonarqube') self.sonarqube_cli_ecr = ecr.DockerImageAsset( self, 'Cli', directory=os.path.join(root_dir, 'images/sonarqube-scanner'), repository_name='sonarqube-cli') self.database = rds.DatabaseCluster( self, 'Database', engine=rds.DatabaseClusterEngine.aurora_postgres( version=rds.AuroraPostgresEngineVersion.VER_11_9), default_database_name='sonarqube', removal_policy=core.RemovalPolicy.DESTROY, credentials=rds.Credentials.from_username( username='******', password=core.SecretValue(value='postgres')), instance_props=rds.InstanceProps( vpc=self.datalake.vpc, security_groups=[self.security_group], instance_type=ec2.InstanceType('r6g.xlarge'))) # self.ecs_cluster = ecs.Cluster(self,'SonarCluster', # container_insights=True, # vpc=self.datalake.vpc, # capacity=ecs.AddCapacityOptions( # machine_image_type= ecs.MachineImageType.AMAZON_LINUX_2, # instance_type=ec2.InstanceType('m5.xlarge'), # allow_all_outbound=True, # associate_public_ip_address=False, # vpc_subnets= ec2.SubnetSelection(subnet_type= ec2.SubnetType.PUBLIC), # desired_capacity=2)) # self.service = ecsp.ApplicationLoadBalancedEc2Service(self,'SonarEc2', # cluster=self.ecs_cluster, # desired_count=1, # listener_port=80, # memory_reservation_mib= 4 * 1024, # task_image_options= ecsp.ApplicationLoadBalancedTaskImageOptions( # image= ecs.ContainerImage.from_docker_image_asset(asset=self.sonarqube_svr_ecr), # container_name='sonarqube-svr', # container_port=9000, # enable_logging=True, # environment={ # '_SONAR_JDBC_URL':'jdbc:postgresql://{}/sonarqube'.format( # self.database.cluster_endpoint.hostname), # '_SONAR_JDBC_USERNAME':'******', # '_SONAR_JDBC_PASSWORD':'******' # })) self.service = ecsp.ApplicationLoadBalancedFargateService( self, 'Server', assign_public_ip=True, vpc=self.datalake.vpc, desired_count=1, cpu=4096, memory_limit_mib=8 * 1024, listener_port=80, platform_version=ecs.FargatePlatformVersion.VERSION1_4, security_groups=[self.security_group, self.datalake.efs_sg], task_image_options=ecsp.ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_docker_image_asset( asset=self.sonarqube_svr_ecr), container_name='sonarqube-svr', container_port=9000, enable_logging=True, environment={ '_SONAR_JDBC_URL': 'jdbc:postgresql://{}/sonarqube'.format( self.database.cluster_endpoint.hostname), '_SONAR_JDBC_USERNAME': '******', '_SONAR_JDBC_PASSWORD': '******' })) for name in ['AmazonElasticFileSystemClientFullAccess']: self.service.task_definition.task_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name(name)) # Override container specific settings container = self.service.task_definition.default_container # Required to start remote sql container.add_ulimits( ecs.Ulimit(name=ecs.UlimitName.NOFILE, soft_limit=262145, hard_limit=262145)) for folder in ['data', 'logs']: efs_ap = self.datalake.efs.add_access_point( 'sonarqube-' + folder, create_acl=efs.Acl(owner_gid="0", owner_uid="0", permissions="777"), path='/sonarqube/' + folder) self.service.task_definition.add_volume( name=folder, efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=self.datalake.efs.file_system_id, transit_encryption='ENABLED', authorization_config=ecs.AuthorizationConfig( access_point_id=efs_ap.access_point_id, iam='DISABLED'))) container.add_mount_points( ecs.MountPoint(container_path='/opt/sonarqube/' + folder, source_volume=folder, read_only=False))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.current_dir = os.path.dirname(__file__) self.vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/21", max_azs=3, subnet_configuration=[ ec2.SubnetConfiguration( cidr_mask=28, name="Database", subnet_type=ec2.SubnetType.ISOLATED, ) ]) self.vpc.add_interface_endpoint( "redshift_endpoint", service=ec2.InterfaceVpcEndpointAwsService("redshift")) self.vpc.add_interface_endpoint( "rds_endpoint", service=ec2.InterfaceVpcEndpointAwsService("rds")) self.redshift_secret = secrets.Secret( self, 'redshift-admin', secret_name='redshift-admin', description= "This secret has generated admin secret password for Redshift cluster", generate_secret_string=secrets.SecretStringGenerator( secret_string_template='{"username": "******"}', generate_string_key='password', password_length=32, exclude_characters='"@\\\/', exclude_punctuation=True)) self.redshift_cluster = redshift.Cluster( self, "datasource-redshift", master_user=redshift.Login( master_username="******", master_password=self.redshift_secret.secret_value_from_json( 'password')), vpc=self.vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED)) self.rds_secret = secrets.Secret( self, 'rds-admin', secret_name='rds-admin', description= "This secret has generated admin secret password for RDS cluster", generate_secret_string=secrets.SecretStringGenerator( secret_string_template='{"username": "******"}', generate_string_key='password', password_length=32, exclude_characters='"@\\\/', exclude_punctuation=True)) self.rds_cluster = rds.DatabaseCluster( self, "datasource-rds", engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_08_1), instance_props={ "vpc_subnets": { "subnet_type": ec2.SubnetType.ISOLATED }, "vpc": self.vpc }, credentials=rds.Credentials.from_secret(self.rds_secret)) core.CfnOutput(self, "vpcId", value=self.vpc.vpc_id) core.CfnOutput(self, "redshiftUsername", value="admin") core.CfnOutput(self, "redshiftPassword", value=self.redshift_secret.secret_name) core.CfnOutput(self, "redshiftClusterId", value=self.redshift_cluster.cluster_name) core.CfnOutput(self, "redshiftHost", value=self.redshift_cluster.cluster_endpoint.hostname) core.CfnOutput(self, "redshiftDB", value="dev") core.CfnOutput(self, "rdsUsername", value="admin") core.CfnOutput(self, "rdsPassword", value=self.rds_secret.secret_name) core.CfnOutput(self, "rdsClusterId", value=self.rds_cluster.cluster_identifier) core.CfnOutput(self, "namespace", value="default") core.CfnOutput(self, "version", value="1")
def __init__(self, scope: core.Construct, id: str, env, props, cluster=False, **kwargs) -> None: super().__init__(scope, id, **kwargs) #TEMP without ASG # security_groups = [ec2.SecurityGroup( # self, # id="ecs-sample-mysql", # vpc=props['vpc'], # security_group_name="ecs-sample-mysql" # )] vpc = props['vpc'] security_groups=[props['sg_rds']] credential = rds.Credentials.from_username(username="******") private_subnet_selections = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE) subnet_group = rds.SubnetGroup(self, "sample-rds-subnet-group", vpc=vpc, subnet_group_name="sample-rds-subnet-group", vpc_subnets=private_subnet_selections, description="sample-rds-subnet-group") self.output_props = props.copy() if not cluster: rds_instance = rds.DatabaseInstance( self, "RDS-instance", database_name="sample", engine=rds.DatabaseInstanceEngine.mysql( version=rds.MysqlEngineVersion.VER_8_0_16 ), credentials=credential, instance_identifier="ecs-sample-db", vpc=vpc, port=3306, instance_type=ec2.InstanceType.of( ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MICRO, ), subnet_group=subnet_group, vpc_subnets=private_subnet_selections, removal_policy=core.RemovalPolicy.DESTROY, deletion_protection=False, security_groups=security_groups ) core.CfnOutput(self, "RDS_instnace_endpoint", value=rds_instance.db_instance_endpoint_address) self.output_props['rds'] = rds_instance else: instance_props = rds.InstanceProps( vpc=vpc, security_groups=security_groups, vpc_subnets=private_subnet_selections ) rds_cluster = rds.DatabaseCluster( self, "RDS-cluster", cluster_identifier="ecs-sample-db-cluster", instance_props=instance_props, engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_07_1 ), credentials=credential, default_database_name="sample", instances=1, subnet_group=subnet_group, removal_policy=core.RemovalPolicy.DESTROY, deletion_protection=False ) core.CfnOutput(self, "RDS_cluster_endpoint", value=rds_cluster.cluster_endpoint.hostname) self.output_props['rds'] = rds_cluster
def __init__(self, scope: Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc_name = self.node.try_get_context("vpc_name") vpc = aws_ec2.Vpc.from_lookup(self, "ExistingVPC", is_default=True, vpc_name=vpc_name) sg_use_mysql = aws_ec2.SecurityGroup(self, 'MySQLClientSG', vpc=vpc, allow_all_outbound=True, description='security group for mysql client', security_group_name='default-mysql-client-sg' ) cdk.Tags.of(sg_use_mysql).add('Name', 'default-mysql-client-sg') sg_mysql_server = aws_ec2.SecurityGroup(self, 'MySQLServerSG', vpc=vpc, allow_all_outbound=True, description='security group for mysql', security_group_name='default-mysql-server-sg' ) sg_mysql_server.add_ingress_rule(peer=sg_use_mysql, connection=aws_ec2.Port.tcp(3306), description='default-mysql-client-sg') sg_mysql_server.add_ingress_rule(peer=sg_mysql_server, connection=aws_ec2.Port.all_tcp(), description='default-mysql-server-sg') cdk.Tags.of(sg_mysql_server).add('Name', 'default-mysql-server-sg') rds_subnet_group = aws_rds.SubnetGroup(self, 'MySQLSubnetGroup', description='subnet group for mysql', subnet_group_name='aurora-mysql', vpc_subnets=aws_ec2.SubnetSelection(subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT), vpc=vpc ) rds_engine = aws_rds.DatabaseClusterEngine.aurora_mysql(version=aws_rds.AuroraMysqlEngineVersion.VER_3_01_0) #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Reference.html#AuroraMySQL.Reference.Parameters.Cluster rds_cluster_param_group = aws_rds.ParameterGroup(self, 'AuroraMySQLClusterParamGroup', engine=rds_engine, description='Custom cluster parameter group for aurora-mysql8.x', parameters={ # For Aurora MySQL version 3, Aurora always uses the default value of 1. # 'innodb_flush_log_at_trx_commit': '2', 'slow_query_log': '1', # Removed from Aurora MySQL version 3. # 'tx_isolation': 'READ-COMMITTED', 'wait_timeout': '300', 'character-set-client-handshake': '0', 'character_set_server': 'utf8mb4', 'collation_server': 'utf8mb4_unicode_ci', 'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci' } ) #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Reference.html#AuroraMySQL.Reference.Parameters.Instance rds_db_param_group = aws_rds.ParameterGroup(self, 'AuroraMySQLDBParamGroup', engine=rds_engine, description='Custom parameter group for aurora-mysql8.x', parameters={ 'slow_query_log': '1', # Removed from Aurora MySQL version 3. # 'tx_isolation': 'READ-COMMITTED', 'wait_timeout': '300', 'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci' } ) db_cluster_name = self.node.try_get_context('db_cluster_name') #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name # therefore, first create Secret and then use it to create database db_secret_name = self.node.try_get_context('db_secret_name') #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name} db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format( region=cdk.Aws.REGION, account=cdk.Aws.ACCOUNT_ID, resource_name=db_secret_name) db_secret = aws_secretsmanager.Secret.from_secret_partial_arn(self, 'DBSecretFromArn', db_secret_arn) rds_credentials = aws_rds.Credentials.from_secret(db_secret) db_cluster = aws_rds.DatabaseCluster(self, 'Database', engine=rds_engine, credentials=rds_credentials, # A username of 'admin' (or 'postgres' for PostgreSQL) and SecretsManager-generated password instance_props={ 'instance_type': aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM), 'parameter_group': rds_db_param_group, 'vpc_subnets': { 'subnet_type': aws_ec2.SubnetType.PRIVATE_WITH_NAT }, 'vpc': vpc, 'auto_minor_version_upgrade': False, 'security_groups': [sg_mysql_server] }, instances=2, parameter_group=rds_cluster_param_group, cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS, cluster_identifier=db_cluster_name, subnet_group=rds_subnet_group, backup=aws_rds.BackupProps( retention=cdk.Duration.days(3), preferred_window="03:00-04:00" ) ) cdk.CfnOutput(self, 'DBClusterEndpoint', value=db_cluster.cluster_endpoint.socket_address, export_name='DBClusterEndpoint') cdk.CfnOutput(self, 'DBClusterReadEndpoint', value=db_cluster.cluster_read_endpoint.socket_address, export_name='DBClusterReadEndpoint')
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.current_dir = os.path.dirname(__file__) self.quicksight_migration_source_assume_role = iam.Role( self, 'quicksight-migration-source-assume-role', description='Role for the Quicksight dashboard migration Lambdas to assume', role_name='quicksight-migration-source-assume-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:*", ], resources=["*"] ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:GetParameter", ], resources=["arn:aws:ssm:*:*:parameter/infra/config"] ) ] ) } ) self.quicksight_migration_source_assume_role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['sts:AssumeRole'], principals=[iam.AccountPrincipal("499080683179")] ) ) self.quicksight_migration_target_assume_role = iam.Role( self, 'quicksight-migration-target-assume-role', description='Role for the Quicksight dashboard migration Lambdas to assume', role_name='quicksight-migration-target-assume-role', max_session_duration=core.Duration.seconds(3600), assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), inline_policies={ 'AllowAccess': iam.PolicyDocument( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "quicksight:*", ], resources=["*"] ), iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "ssm:GetParameter", ], resources=["arn:aws:ssm:*:*:parameter/infra/config"] ) ] ) } ) self.quicksight_migration_target_assume_role.assume_role_policy.add_statements( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=['sts:AssumeRole'], principals=[iam.AccountPrincipal("499080683179")] ) ) self.vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/21", max_azs=3, subnet_configuration=[ ec2.SubnetConfiguration( cidr_mask=28, name="Database", subnet_type=ec2.SubnetType.ISOLATED, ) ] ) self.vpc.add_interface_endpoint("redshift_endpoint", service=ec2.InterfaceVpcEndpointAwsService("redshift") ) self.vpc.add_interface_endpoint("rds_endpoint", service=ec2.InterfaceVpcEndpointAwsService("rds") ) self.redshift_secret = secrets.Secret(self,'redshift-admin', secret_name='redshift-admin', description="This secret has generated admin secret password for Redshift cluster", generate_secret_string=secrets.SecretStringGenerator( secret_string_template='{"username": "******"}', generate_string_key='password', password_length=32, exclude_characters='"@\\\/', exclude_punctuation=True ) ) self.redshift_cluster = redshift.Cluster(self, "datasource-redshift", master_user=redshift.Login( master_username="******", master_password=self.redshift_secret.secret_value_from_json('password') ), vpc=self.vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED ) ) self.rds_secret = secrets.Secret(self,'rds-admin', secret_name='rds-admin', description="This secret has generated admin secret password for RDS cluster", generate_secret_string=secrets.SecretStringGenerator( secret_string_template='{"username": "******"}', generate_string_key='password', password_length=32, exclude_characters='"@\\\/', exclude_punctuation=True ) ) self.rds_cluster = rds.DatabaseCluster(self, "datasource-rds", engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_08_1), instance_props={ "vpc_subnets": { "subnet_type": ec2.SubnetType.ISOLATED }, "vpc": self.vpc }, credentials=rds.Credentials.from_secret(self.rds_secret) ) ssm.StringParameter(self, 'InfraConfigParam', parameter_name='/infra/config', string_value=json.dumps(self.to_dict()))
def _setup_postgresql(self) -> None: port = 3306 database = "postgres" schema = "public" pg = rds.ParameterGroup( self, "aws-data-wrangler-postgresql-params", engine=rds.DatabaseClusterEngine.aurora_postgres( version=rds.AuroraPostgresEngineVersion.VER_11_13, ), parameters={ "apg_plan_mgmt.capture_plan_baselines": "off", }, ) aurora_pg = rds.DatabaseCluster( self, "aws-data-wrangler-aurora-cluster-postgresql", removal_policy=RemovalPolicy.DESTROY, engine=rds.DatabaseClusterEngine.aurora_postgres( version=rds.AuroraPostgresEngineVersion.VER_11_13, ), cluster_identifier="postgresql-cluster-wrangler", instances=1, credentials=rds.Credentials.from_password( username=self.db_username, password=self.db_password_secret, ), port=port, backup=rds.BackupProps(retention=Duration.days(1)), parameter_group=pg, s3_import_buckets=[self.bucket], s3_export_buckets=[self.bucket], instance_props=rds.InstanceProps( vpc=self.vpc, security_groups=[self.db_security_group], publicly_accessible=True, ), subnet_group=self.rds_subnet_group, ) glue.Connection( self, "aws-data-wrangler-postgresql-glue-connection", description="Connect to Aurora (PostgreSQL).", type=glue.ConnectionType.JDBC, connection_name="aws-data-wrangler-postgresql", properties={ "JDBC_CONNECTION_URL": f"jdbc:postgresql://{aurora_pg.cluster_endpoint.hostname}:{port}/{database}", "USERNAME": self.db_username, "PASSWORD": self.db_password, }, subnet=self.vpc.private_subnets[0], security_groups=[self.db_security_group], ) secrets.Secret( self, "aws-data-wrangler-postgresql-secret", secret_name="aws-data-wrangler/postgresql", description="Postgresql credentials", generate_secret_string=secrets.SecretStringGenerator( generate_string_key="dummy", secret_string_template=json.dumps( { "username": self.db_username, "password": self.db_password, "engine": "postgresql", "host": aurora_pg.cluster_endpoint.hostname, "port": port, "dbClusterIdentifier": aurora_pg.cluster_identifier, "dbname": database, } ), ), ) CfnOutput(self, "PostgresqlAddress", value=aurora_pg.cluster_endpoint.hostname) CfnOutput(self, "PostgresqlPort", value=str(port)) CfnOutput(self, "PostgresqlDatabase", value=database) CfnOutput(self, "PostgresqlSchema", value=schema)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # get acct id for policies # acct_id=env['account'] # creates a new vpc, subnets, 2 nat gateways, etc vpc = ec2.Vpc(self, "VPC") # mocking vpc from my environment #vpc = ec2.Vpc.from_lookup(self, "nonDefaultVpc", vpc_id="vpc-9931a0fc") self._rds_subnet_group = rds.SubnetGroup( self, 'RdsSubnetGroup', description="aaa", subnet_group_name='aurora-mysql', vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE), vpc=vpc) # create the RDS cluster self._rds_cluster = rds.DatabaseCluster( self, "RDS Cluster", cluster_identifier="rds-test", engine=rds.DatabaseClusterEngine.AURORA_MYSQL, instance_props=rds.InstanceProps( vpc=vpc, instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), ), port=3306, default_database_name="test", subnet_group=self._rds_subnet_group) # enable autoscaling for rds # 3 servers maximum # scale on 1% cpu for testing, 50% normally # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_applicationautoscaling/ScalableTarget.html self._scaling_target = autoscale.ScalableTarget( self, "Scaling Target", max_capacity=3, min_capacity=1, resource_id='cluster:' + self._rds_cluster.cluster_identifier, scalable_dimension='rds:cluster:ReadReplicaCount', service_namespace=autoscale.ServiceNamespace.RDS) self._scale_policy = autoscale.TargetTrackingScalingPolicy( self, "Tracking Scaling Policy", policy_name='thisisscalingpolicyname', target_value=1, predefined_metric=autoscale.PredefinedMetric. RDS_READER_AVERAGE_CPU_UTILIZATION, scaling_target=self._scaling_target, scale_in_cooldown=core.Duration.minutes(5), scale_out_cooldown=core.Duration.minutes(5), )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here vpc_name = self.node.try_get_context("vpc_name") vpc = aws_ec2.Vpc.from_lookup(self, "ExistingVPC", is_default=True, vpc_name=vpc_name) sg_use_mysql = aws_ec2.SecurityGroup( self, 'MySQLClientSG', vpc=vpc, allow_all_outbound=True, description='security group for mysql client', security_group_name='use-default-mysql') core.Tags.of(sg_use_mysql).add('Name', 'use-default-mysql') sg_mysql_server = aws_ec2.SecurityGroup( self, 'MySQLServerSG', vpc=vpc, allow_all_outbound=True, description='security group for mysql', security_group_name='default-mysql-server') sg_mysql_server.add_ingress_rule(peer=sg_use_mysql, connection=aws_ec2.Port.tcp(3306), description='use-default-mysql') core.Tags.of(sg_mysql_server).add('Name', 'mysql-server') rds_subnet_group = aws_rds.SubnetGroup( self, 'RdsSubnetGroup', description='subnet group for mysql', subnet_group_name='aurora-mysql', vpc_subnets=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.PRIVATE), vpc=vpc) rds_engine = aws_rds.DatabaseClusterEngine.aurora_mysql( version=aws_rds.AuroraMysqlEngineVersion.VER_2_08_1) rds_cluster_param_group = aws_rds.ParameterGroup( self, 'AuroraMySQLClusterParamGroup', engine=rds_engine, description='Custom cluster parameter group for aurora-mysql5.7', parameters={ 'innodb_flush_log_at_trx_commit': '2', 'slow_query_log': '1', 'tx_isolation': 'READ-COMMITTED', 'wait_timeout': '300', 'character-set-client-handshake': '0', 'character_set_server': 'utf8mb4', 'collation_server': 'utf8mb4_unicode_ci', 'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci' }) rds_db_param_group = aws_rds.ParameterGroup( self, 'AuroraMySQLDBParamGroup', engine=rds_engine, description='Custom parameter group for aurora-mysql5.7', parameters={ 'slow_query_log': '1', 'tx_isolation': 'READ-COMMITTED', 'wait_timeout': '300', 'init_connect': 'SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci' }) db_cluster_name = self.node.try_get_context('db_cluster_name') #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name #XXX: therefore, first create Secret and then use it to create database db_secret_name = self.node.try_get_context('db_secret_name') #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name} db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format( region=core.Aws.REGION, account=core.Aws.ACCOUNT_ID, resource_name=db_secret_name) db_secret = aws_secretsmanager.Secret.from_secret_arn( self, 'DBSecretFromArn', db_secret_arn) rds_credentials = aws_rds.Credentials.from_secret(db_secret) db_cluster = aws_rds.DatabaseCluster( self, 'Database', engine=rds_engine, credentials=rds_credentials, instance_props={ 'instance_type': aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM), 'parameter_group': rds_db_param_group, 'vpc_subnets': { 'subnet_type': aws_ec2.SubnetType.PRIVATE }, 'vpc': vpc, 'auto_minor_version_upgrade': False, 'security_groups': [sg_mysql_server] }, instances=2, parameter_group=rds_cluster_param_group, cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS, cluster_identifier=db_cluster_name, subnet_group=rds_subnet_group, backup=aws_rds.BackupProps(retention=core.Duration.days(3), preferred_window="03:00-04:00")) sg_mysql_public_proxy = aws_ec2.SecurityGroup( self, 'MySQLPublicProxySG', vpc=vpc, allow_all_outbound=True, description='security group for mysql public proxy', security_group_name='default-mysql-public-proxy') sg_mysql_public_proxy.add_ingress_rule( peer=aws_ec2.Peer.any_ipv4(), connection=aws_ec2.Port.tcp(3306), description='mysql public proxy') core.Tags.of(sg_mysql_public_proxy).add('Name', 'mysql-public-proxy') #XXX: Datbase Proxy use only Secret Arn of target database or database cluster #XXX: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbproxy-authformat.html #XXX: If new Secret for database user is created, it is necessary to update Resource of Proxy IAM Role to access new Secret. #XXX: Otherwise, new database user can not connect to database by RDS Proxy. db_proxy = aws_rds.DatabaseProxy( self, 'DBProxy', proxy_target=aws_rds.ProxyTarget.from_cluster(db_cluster), secrets=[db_secret], vpc=vpc, db_proxy_name='{}-proxy'.format(db_cluster_name), idle_client_timeout=core.Duration.minutes(10), max_connections_percent=90, max_idle_connections_percent=10, security_groups=[sg_use_mysql, sg_mysql_public_proxy], vpc_subnets=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.PUBLIC))
def __init__( self, scope: core.Construct, id: str, keycloak_domain: str, vpc: ec2.IVpc = None, cluster: ecs.ICluster = None, load_balancer: elbv2.IApplicationLoadBalancer = None, log_group: logs.ILogGroup = None, keycloak_database_name: str = 'keykloak', keycloak_database_user: str = 'admin', **kwargs ) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here keycloak_task_role = iam.Role( self, 'KeycloakTastRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com') ) keycloak_database_secret = secretsmanager.Secret( self, 'KeycloakDatabaseSecret', description='Keycloak Database Password', generate_secret_string=secretsmanager.SecretStringGenerator(exclude_punctuation=True) ) keycloak_database_cluster = rds.DatabaseCluster( self, 'KeycloakDatabaseCluster', engine= rds.DatabaseClusterEngine.AURORA, instance_props=rds.InstanceProps( instance_type=ec2.InstanceType.of( instance_class=ec2.InstanceClass.BURSTABLE3, instance_size=ec2.InstanceSize.SMALL ), vpc=vpc, ), master_user= rds.Login( username=keycloak_database_user, password=keycloak_database_secret.secret_value, ), instances=1, default_database_name=keycloak_database_name, removal_policy=core.RemovalPolicy.DESTROY, ) keycloak_hosted_zone = route53.HostedZone.from_lookup( self, 'KeycloakHostedZone', domain_name=keycloak_domain ) keycloak_certificate = acm.DnsValidatedCertificate( self, 'KeycloakCertificate', hosted_zone=keycloak_hosted_zone, domain_name='keycloak.' + keycloak_domain ) keycloak_service = ecs_patterns.ApplicationLoadBalancedFargateService( self, 'KeycloakLoadBalancedFargateService', load_balancer=load_balancer, cluster=cluster, task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_asset('keycloak'), container_port=8080, enable_logging=True, task_role=keycloak_task_role, log_driver=ecs.AwsLogDriver( stream_prefix='keycloak', log_group=log_group, ), secrets={ 'DB_PASSWORD': ecs.Secret.from_secrets_manager(keycloak_database_secret), }, environment={ 'DB_VENDOR': 'mysql', 'DB_USER': keycloak_database_user, 'DB_ADDR': keycloak_database_cluster.cluster_endpoint.hostname, 'DB_DATABASE': keycloak_database_name, # 'KEYCLOAK_LOGLEVEL': 'DEBUG', 'PROXY_ADDRESS_FORWARDING': 'true', }, ), memory_limit_mib=512, cpu=256, desired_count=1, public_load_balancer=True, domain_name= 'keycloak.' + keycloak_domain, domain_zone= keycloak_hosted_zone, protocol=elbv2.ApplicationProtocol.HTTPS, ) keycloak_service.target_group.enable_cookie_stickiness(core.Duration.seconds(24 * 60 * 60)) keycloak_service.target_group.configure_health_check( port='8080', path='/auth/realms/master/.well-known/openid-configuration', timeout=core.Duration.seconds(20), healthy_threshold_count=2, unhealthy_threshold_count=10, interval=core.Duration.seconds(30), ) keycloak_service.listener.add_certificates( 'KeycloakListenerCertificate', certificates= [ keycloak_certificate ] ) keycloak_database_cluster.connections.allow_default_port_from(keycloak_service.service, 'From Keycloak Fargate Service')
def __init__(self, scope: core.Construct, config: dict, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # create the securty group that will allow to connect to this instance # I am lazy and create only 1 SG that allows TCP 5432 from itself # database clients (lambda functions) will have TCP 5432 authorized for themselves too, # which is not necessary but harmless self.db_security_group = ec2.SecurityGroup(self, "Database Security Group", vpc=config['vpc']) self.db_security_group.add_ingress_rule(self.db_security_group, ec2.Port.tcp(5432)) self.cluster = rds.DatabaseCluster( self, config['rds']['name'], engine=rds.DatabaseClusterEngine.aurora_postgres( version=rds.AuroraPostgresEngineVersion.VER_11_7), default_database_name=config['rds']['databaseName'], master_user=rds.Login(username=config['rds']['masterUsername']), instance_props=rds.InstanceProps( vpc=config['vpc'], security_groups=[self.db_security_group])) # Add Secrets Manager Password rotation self.cluster.add_rotation_single_user() # aurora serverless is not yet support by CDK, https://github.com/aws/aws-cdk/issues/929 # escape hatch https://docs.aws.amazon.com/cdk/latest/guide/cfn_layer.html#cfn_layer_raw # cfn_aurora_cluster = cluster.node.default_child # cfn_aurora_cluster.add_override("Properties.EngineMode", "serverless") # cfn_aurora_cluster.add_override("Properties.EnableHttpEndpoint",True) # Enable Data API # cfn_aurora_cluster.add_override("Properties.ScalingConfiguration", { # 'AutoPause': True, # 'MaxCapacity': 4, # 'MinCapacity': 1, # 'SecondsUntilAutoPause': 600 # }) # cluster.node.try_remove_child('Instance1') # Remove 'Server' instance that isn't required for serverless Aurora # create a custom resource to initialize the data schema function = _lambda.Function( self, config['custom resource lambda']['name'], runtime=_lambda.Runtime.PYTHON_3_8, code=_lambda.Code.asset('./custom_resources'), handler='app.on_event', vpc=config['vpc'], environment={ 'DB_SECRET_ARN': self.get_secret_arn(), 'PYTHON_LOGLEVEL': 'DEBUG' }, security_groups=[self.db_security_group]) # add permission to access the secret function.add_to_role_policy( iam.PolicyStatement(resources=[self.get_secret_arn()], actions=["secretsmanager:GetSecretValue"])) custom_resource_provider = cr.Provider(self, 'Custom Resource Provider', on_event_handler=function) custom_resource = core.CustomResource( self, 'Custom Resource', service_token=custom_resource_provider.service_token) # Tell CFN to wait for the database to be ready before ot create the custom resource custom_resource.node.add_dependency(self.cluster)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) #create VPC self.vpc = ec2.Vpc( self, 'SonarVPC', max_azs=3 ) #DB Security Group with required ingress rules self.sg= ec2.SecurityGroup( self, "SonarQubeSG", vpc=self.vpc, allow_all_outbound=True, description="Aurora Security Group" ) self.sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5432), "SonarDBAurora") pgroup = rds.ParameterGroup.from_parameter_group_name( self, "SonarDBParamGroup", parameter_group_name='default.aurora-postgresql11' ) #create RDS Cluster self.db= rds.DatabaseCluster(self, 'SonarDBCluster', engine= rds.DatabaseClusterEngine.aurora_postgres(version=rds.AuroraPostgresEngineVersion.VER_11_6), default_database_name= 'sonarqube', parameter_group= pgroup, master_user=rds.Login(username= "******"), instance_props= rds.InstanceProps( instance_type= ec2.InstanceType.of( ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MEDIUM ), security_groups= [self.sg], vpc= self.vpc ) ) #create Cluster self.cluster= ecs.Cluster(self, "SonarCluster", capacity= ecs.AddCapacityOptions( instance_type= ec2.InstanceType('m5.large')), vpc= self.vpc ) asg= self.cluster.autoscaling_group user_data= asg.user_data user_data.add_commands('sysctl -qw vm.max_map_count=262144') user_data.add_commands('sysctl -w fs.file-max=65536') user_data.add_commands('ulimit -n 65536') user_data.add_commands('ulimit -u 4096') #Create iam Role for Task self.task_role = iam.Role( self, id= "SonarTaskRole", role_name= "SonarTaskRole", assumed_by= iam.ServicePrincipal(service= "ecs-tasks.amazonaws.com"), managed_policies= [ iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy") ] ) #Grant permission for Task to read secret from SecretsManager self.db.secret.grant_read(self.task_role) url = 'jdbc:postgresql://{}/sonarqube'.format(self.db.cluster_endpoint.socket_address) #create task task= ecs_patterns.ApplicationLoadBalancedEc2Service(self, "SonarService", # if a cluster is provided use the same vpc cluster= self.cluster, cpu=512, desired_count=1, task_image_options= ecs_patterns.ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_registry("sonarqube:8.2-community"), container_port=9000, secrets={ "sonar.jdbc.username": ecs.Secret.from_secrets_manager(self.db.secret, field="username"), "sonar.jdbc.password": ecs.Secret.from_secrets_manager(self.db.secret, field="password") }, environment={ 'sonar.jdbc.url': url }, task_role= self.task_role ), memory_limit_mib=2048, public_load_balancer=True ) container = task.task_definition.default_container container.add_ulimits( ecs.Ulimit( name=ecs.UlimitName.NOFILE, soft_limit=65536, hard_limit=65536 ) )
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) vpc_name = self.node.try_get_context("vpc_name") vpc = aws_ec2.Vpc.from_lookup(self, "ExistingVPC", is_default=True, vpc_name=vpc_name) sg_postgresql_client = aws_ec2.SecurityGroup( self, 'PostgreSQLClientSG', vpc=vpc, allow_all_outbound=True, description='security group for postgresql client', security_group_name='default-postgresql-client-sg') cdk.Tags.of(sg_postgresql_client).add('Name', 'default-postgresql-client-sg') sg_postgresql_server = aws_ec2.SecurityGroup( self, 'PostgreSQLServerSG', vpc=vpc, allow_all_outbound=True, description='security group for postgresql', security_group_name='default-postgresql-server-sg') sg_postgresql_server.add_ingress_rule( peer=sg_postgresql_client, connection=aws_ec2.Port.tcp(5432), description='default-postgresql-client-sg') sg_postgresql_server.add_ingress_rule( peer=sg_postgresql_server, connection=aws_ec2.Port.all_tcp(), description='default-postgresql-server-sg') cdk.Tags.of(sg_postgresql_server).add('Name', 'default-postgresql-server-sg') rds_subnet_group = aws_rds.SubnetGroup( self, 'PostgreSQLSubnetGroup', description='subnet group for postgresql', subnet_group_name='aurora-postgresql', vpc_subnets=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.PRIVATE_WITH_NAT), vpc=vpc) db_cluster_name = self.node.try_get_context('db_cluster_name') #XXX: aws_rds.Credentials.from_username(username, ...) can not be given user specific Secret name # therefore, first create Secret and then use it to create database db_secret_name = self.node.try_get_context('db_secret_name') #XXX: arn:{partition}:{service}:{region}:{account}:{resource}{sep}}{resource-name} db_secret_arn = 'arn:aws:secretsmanager:{region}:{account}:secret:{resource_name}'.format( region=cdk.Aws.REGION, account=cdk.Aws.ACCOUNT_ID, resource_name=db_secret_name) db_secret = aws_secretsmanager.Secret.from_secret_partial_arn( self, 'DBSecretFromArn', db_secret_arn) rds_credentials = aws_rds.Credentials.from_secret(db_secret) rds_engine = aws_rds.DatabaseClusterEngine.aurora_postgres( version=aws_rds.AuroraPostgresEngineVersion.VER_13_4) #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Reference.ParameterGroups.html#AuroraPostgreSQL.Reference.Parameters.Cluster rds_cluster_param_group = aws_rds.ParameterGroup( self, 'AuroraPostgreSQLClusterParamGroup', engine=rds_engine, description= 'Custom cluster parameter group for aurora-postgresql13', parameters={ 'log_min_duration_statement': '15000', # 15 sec 'default_transaction_isolation': 'read committed', 'client_encoding': 'UTF8' }) #XXX: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Reference.ParameterGroups.html#AuroraPostgreSQL.Reference.Parameters.Instance rds_db_param_group = aws_rds.ParameterGroup( self, 'AuroraPostgreSQLDBParamGroup', engine=rds_engine, description='Custom parameter group for aurora-postgresql13', parameters={ 'log_min_duration_statement': '15000', # 15 sec 'default_transaction_isolation': 'read committed' }) db_cluster = aws_rds.DatabaseCluster( self, 'Database', engine=rds_engine, credentials= rds_credentials, # A username of 'admin' (or 'postgres' for PostgreSQL) and SecretsManager-generated password instance_props={ 'instance_type': aws_ec2.InstanceType.of(aws_ec2.InstanceClass.BURSTABLE3, aws_ec2.InstanceSize.MEDIUM), 'parameter_group': rds_db_param_group, 'vpc_subnets': { 'subnet_type': aws_ec2.SubnetType.PRIVATE_WITH_NAT }, 'vpc': vpc, 'auto_minor_version_upgrade': False, 'security_groups': [sg_postgresql_server] }, instances=2, parameter_group=rds_cluster_param_group, cloudwatch_logs_retention=aws_logs.RetentionDays.THREE_DAYS, cluster_identifier=db_cluster_name, subnet_group=rds_subnet_group, backup=aws_rds.BackupProps(retention=cdk.Duration.days(3), preferred_window="03:00-04:00")) cdk.CfnOutput(self, 'DBClusterEndpoint', value=db_cluster.cluster_endpoint.socket_address, export_name='DBClusterEndpoint') cdk.CfnOutput(self, 'DBClusterReadEndpoint', value=db_cluster.cluster_read_endpoint.socket_address, export_name='DBClusterReadEndpoint')
def _setup_mysql(self) -> None: port = 3306 database = "test" schema = "test" aurora_mysql = rds.DatabaseCluster( self, "aws-data-wrangler-aurora-cluster-mysql", removal_policy=RemovalPolicy.DESTROY, engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_5_7_12, ), cluster_identifier="mysql-cluster-wrangler", instances=1, default_database_name=database, credentials=rds.Credentials.from_password( username=self.db_username, password=self.db_password_secret, ), port=port, backup=rds.BackupProps(retention=Duration.days(1)), instance_props=rds.InstanceProps( vpc=self.vpc, security_groups=[self.db_security_group], publicly_accessible=True, ), subnet_group=self.rds_subnet_group, s3_import_buckets=[self.bucket], s3_export_buckets=[self.bucket], ) glue.Connection( self, "aws-data-wrangler-mysql-glue-connection", description="Connect to Aurora (MySQL).", type=glue.ConnectionType.JDBC, connection_name="aws-data-wrangler-mysql", properties={ "JDBC_CONNECTION_URL": f"jdbc:mysql://{aurora_mysql.cluster_endpoint.hostname}:{port}/{database}", "USERNAME": self.db_username, "PASSWORD": self.db_password, }, subnet=self.vpc.private_subnets[0], security_groups=[self.db_security_group], ) glue.Connection( self, "aws-data-wrangler-mysql-glue-connection-ssl", description="Connect to Aurora (MySQL) with SSL.", type=glue.ConnectionType.JDBC, connection_name="aws-data-wrangler-mysql-ssl", properties={ "JDBC_CONNECTION_URL": f"jdbc:mysql://{aurora_mysql.cluster_endpoint.hostname}:{port}/{database}", "USERNAME": self.db_username, "PASSWORD": self.db_password, "JDBC_ENFORCE_SSL": "true", "CUSTOM_JDBC_CERT": "s3://rds-downloads/rds-combined-ca-bundle.pem", }, subnet=self.vpc.private_subnets[0], security_groups=[self.db_security_group], ) secrets.Secret( self, "aws-data-wrangler-mysql-secret", secret_name="aws-data-wrangler/mysql", description="MySQL credentials", generate_secret_string=secrets.SecretStringGenerator( generate_string_key="dummy", secret_string_template=json.dumps( { "username": self.db_username, "password": self.db_password, "engine": "mysql", "host": aurora_mysql.cluster_endpoint.hostname, "port": port, "dbClusterIdentifier": aurora_mysql.cluster_identifier, "dbname": database, } ), ), ) CfnOutput(self, "MysqlAddress", value=aurora_mysql.cluster_endpoint.hostname) CfnOutput(self, "MysqlPort", value=str(port)) CfnOutput(self, "MysqlDatabase", value=database) CfnOutput(self, "MysqlSchema", value=schema)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here # --- iam --- instance_role = iam.Role( self, 'web_server_instance_role', assumed_by=iam.ServicePrincipal(service="ec2"), ) instance_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMManagedInstanceCore')) # --- vpc --- main_vpc = ec2.Vpc( self, 'main_vpc', cidr='10.0.0.0/16', ) # --- security groups --- rds_sg = ec2.SecurityGroup(self, 'rds_sg', vpc=main_vpc, allow_all_outbound=True, security_group_name='rds_sg') rds_sg.add_ingress_rule(peer=rds_sg, connection=ec2.Port.tcp(3306)) # --- ec2 instance --- userdata_script = Path('assets/bastion.sh').read_text() bastion = ec2.Instance( self, 'bastion', instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3_AMD, ec2.InstanceSize.MEDIUM), machine_image=ec2.AmazonLinuxImage( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2), vpc=main_vpc, role=instance_role, allow_all_outbound=True, vpc_subnets=ec2.SubnetType.PUBLIC, security_group=rds_sg) bastion.add_user_data(userdata_script) # --- rds database --- aurora_db = rds.DatabaseCluster( self, 'aurora_rds', engine=rds.DatabaseClusterEngine.AURORA_MYSQL, master_user={"username": "******"}, instance_props={ "instance_type": ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MEDIUM), "vpc_subnets": { "subnet_type": ec2.SubnetType.PRIVATE }, "vpc": main_vpc, "security_groups": [rds_sg] }, ) core.CfnOutput(self, 'instance-id', value=bastion.instance_id)