def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) pearson_vpn_connection = ec2.Peer.ipv4('159.182.0.0/16') # Props Setup stage = scope.node.try_get_context('stage') my_service_name = scope.node.try_get_context('serviceName') api_health_path = props['apiHealthPath'] tca_health_path = props['tcaHealthPath'] # Setup IAM user for logs vpc_flow_role = iam.Role( self, 'FlowLog', assumed_by=iam.ServicePrincipal('vpc-flow-logs.amazonaws.com')) vpc_flow_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ 'iam:PassRole', 'logs:CreateLogGroup', 'logs:DescribeLogGroups', 'logs:CreateLogStream', 'logs:PutLogEvents' ], resources=["*"])) # Create Cloudwatch log group log_group = logs.LogGroup(self, 'LogGroup', log_group_name="{0}-{1}".format( my_service_name, stage), retention=logs.RetentionDays('ONE_YEAR'), removal_policy=core.RemovalPolicy('DESTROY')) # Setup VPC resource vpc = ec2.Vpc(self, '{0}-{1}-vpc'.format(my_service_name, stage), cidr=props['cidr'], max_azs=props['vpcAzCount']) # Setup VPC flow logs vpc_log = ec2.CfnFlowLog( self, 'FlowLogs', resource_id=vpc.vpc_id, resource_type='VPC', traffic_type='ALL', deliver_logs_permission_arn=vpc_flow_role.role_arn, log_destination_type='cloud-watch-logs', log_group_name="{0}-{1}".format(log_group.log_group_name, stage)) # Setup Security Group in VPC vpc_sg = ec2.SecurityGroup(self, 'EcSSG', vpc=vpc, allow_all_outbound=None, description="Security Group for Oculus vpc", security_group_name="{0}-{1}-vpc-sg".format( my_service_name, stage)) # Add Rules to Security Group vpc_sg.add_ingress_rule(peer=pearson_vpn_connection, connection=ec2.Port.tcp(22)) # ALB Security Group alb_sg = ec2.SecurityGroup(self, 'AlbSG', vpc=vpc, allow_all_outbound=None, description="Security group for oculus ALB", security_group_name="{0}-{1}-alb-sg".format( my_service_name, stage)) # Add HTTPS Rule to Security Group alb_sg.add_ingress_rule(peer=pearson_vpn_connection, connection=ec2.Port.tcp(443)) # Setup ALB alb = elbv2.ApplicationLoadBalancer(self, 'ALB', vpc=vpc, internet_facing=True, security_group=alb_sg) # Setup API Target Group api_tg = elbv2.ApplicationTargetGroup( self, 'ApiTargetGroup', port=8080, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup Web Target Group web_tg = elbv2.ApplicationTargetGroup( self, 'WebTargetGroup', port=3030, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup API Target Group tca_tg = elbv2.ApplicationTargetGroup( self, 'TcaTargetGroup', port=8080, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup ECS Cluster ecs_cluster = ecs.Cluster(self, 'ECSCluster', vpc=vpc, cluster_name="{0}-{1}".format( my_service_name, stage)) # ECS Execution Role - Grants ECS agent to call AWS APIs ecs_execution_role = iam.Role( self, 'ECSExecutionRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name="{0}-{1}-execution-role".format(my_service_name, stage)) # Setup Role Permissions ecs_execution_role.add_to_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer', 'elasticloadbalancing:DeregisterTargets', 'elasticloadbalancing:Describe*', 'elasticloadbalancing:RegisterInstancesWithLoadBalancer', 'elasticloadbalancing:RegisterTargets', 'ec2:Describe*', 'ec2:AuthorizeSecurityGroupIngress', 'sts:AssumeRole', 'ssm:GetParameters', 'secretsmanager:GetSecretValue', 'ecr:GetAuthorizationToken', 'ecr:BatchCheckLayerAvailability', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'logs:CreateLogStream', 'logs:PutLogEvents', "application-autoscaling:*", "cloudwatch:DescribeAlarms", "cloudwatch:PutMetricAlarm" ], resources=["*"])) # ECS Task Role - Grants containers in task permission to AWS APIs ecs_task_role = iam.Role( self, 'ECSTaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name="{0}-{1}-task-role".format(my_service_name, stage)) # Setup Role Permissions ecs_task_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ 'logs:CreateLogStream', 'logs:PutLogEvents', 'dynamodb:Query', 'dynamodb:ListTables', 'secretsmanager:GetSecretValue', 'kms:Decrypt' ], resources=["*"])) # Setup API Task Definition api_taskdef = ecs.FargateTaskDefinition( self, 'APIFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-api".format(my_service_name, stage)) # Setup Web Task Definition web_taskdef = ecs.FargateTaskDefinition( self, 'WebFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-web".format(my_service_name, stage)) # # Setup TCA Task Definition tca_taskdef = ecs.FargateTaskDefinition( self, 'TcaFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-tca".format(my_service_name, stage)) api_repo = ecr.Repository.from_repository_arn( self, 'ApiImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-api") web_repo = ecr.Repository.from_repository_arn( self, 'WebImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-web") tca_repo = ecr.Repository.from_repository_arn( self, 'TcaImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-tca-api") # Add Container API to Task api_container = api_taskdef.add_container( "oculus-cdk-{}-api".format(stage), image=ecs.EcrImage(repository=api_repo, tag="devqaurl"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-api".format( my_service_name, stage), log_group=log_group)) # Add Container Web to Task web_container = web_taskdef.add_container( "oculus-cdk-{}-web".format(stage), image=ecs.EcrImage(repository=web_repo, tag="removeMetaMockup"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-web".format( my_service_name, stage), log_group=log_group)) # # Add Container TCA to Task tca_container = tca_taskdef.add_container( "oculus-cdk-{}-tca".format(stage), image=ecs.EcrImage(repository=tca_repo, tag="ocu-1109"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-tca".format( my_service_name, stage), log_group=log_group)) # Setup API Port Mappings api_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP)) # Setup Web Port Mappings web_container.add_port_mappings( ecs.PortMapping(container_port=3030, host_port=3030, protocol=ecs.Protocol.TCP)) # # Setup TCA Port Mappings tca_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP)) # Setup API Fargate Service api_service = ecs.FargateService(self, "FargateServiceAPI", task_definition=api_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-api".format( my_service_name, stage)) api_scaling = api_service.auto_scale_task_count(max_capacity=5) api_scaling.scale_on_cpu_utilization('ApiCpuScaling', target_utilization_percent=50) # Setup Web Fargate Service web_service = ecs.FargateService(self, "FargateServiceWeb", task_definition=web_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-web".format( my_service_name, stage)) web_scaling = web_service.auto_scale_task_count(max_capacity=5) web_scaling.scale_on_cpu_utilization('WebCpuScaling', target_utilization_percent=50) # # Setup TCA Fargate Service tca_service = ecs.FargateService(self, "FargateServiceTCA", task_definition=tca_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-tca".format( my_service_name, stage)) tca_scaling = tca_service.auto_scale_task_count(max_capacity=5) tca_scaling.scale_on_cpu_utilization('TcaCpuScaling', target_utilization_percent=50) # Setup ALB Listener alb_listener = alb.add_listener( 'Listener', certificate_arns=[ "arn:aws:acm:us-east-1:829809672214:certificate/a84bb369-03ce-4e5e-9d32-8c84609cad1e" ], port=443, open=False, protocol=elbv2.ApplicationProtocol.HTTPS) # Attach ALB to ECS API Service api_target = alb_listener.add_targets( 'ECSAPI', port=8080, priority=1, targets=[api_service], health_check=elbv2.HealthCheck(path=api_health_path), path_pattern='/oculus-api/*') # # Attach ALB to ECS TCA Service tca_target = alb_listener.add_targets( 'ECSTCA', port=8080, priority=2, targets=[tca_service], health_check=elbv2.HealthCheck(path=tca_health_path), path_pattern='/tca/*') # Attach ALB to ECS Web Service web_target = alb_listener.add_targets( 'ECSWeb', port=3030, protocol=elbv2.ApplicationProtocol.HTTP, targets=[web_service], health_check=elbv2.HealthCheck(path='/'), ) core.CfnOutput(self, 'LoadBalancerDNS', value=alb.load_balancer_dns_name) zone = route53.HostedZone.from_lookup(self, 'MyHostedZone', domain_name=props['zoneDomain']) route53.ARecord( self, 'ServiceAliasRecord', record_name=props['siteDomain'], target=route53.RecordTarget( alias_target=aws_route53_targets.LoadBalancerTarget( load_balancer=alb)), zone=zone)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) ########################################################################### # AWS LAMBDA FUNCTIONS ########################################################################### parse_image_list_file = aws_lambda.Function( self, 'parse_image_list_file', handler='parse_image_list_file.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('parse_image_list_file'), memory_size=10240, timeout=core.Duration.seconds(300), log_retention=aws_logs.RetentionDays.ONE_DAY) list_objects = aws_lambda.Function( self, 'list_objects', handler='list_objects.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('list_objects'), memory_size=4096, timeout=core.Duration.seconds(300), log_retention=aws_logs.RetentionDays.ONE_DAY) get_size_and_store = aws_lambda.Function( self, 'get_size_and_store', handler='get_size_and_store.lambda_handler', runtime=aws_lambda.Runtime.PYTHON_3_7, code=aws_lambda.Code.asset('get_size_and_store'), memory_size=4096, timeout=core.Duration.seconds(300), log_retention=aws_logs.RetentionDays.ONE_DAY) ########################################################################### # AMAZON S3 BUCKETS ########################################################################### images_bucket = aws_s3.Bucket(self, "images_bucket") ########################################################################### # LAMBDA SUPPLEMENTAL POLICIES ########################################################################### lambda_supplemental_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["s3:Get*", "s3:Head*", "s3:List*", "sqs:*", "es:*"], resources=["*"]) parse_image_list_file.add_to_role_policy( lambda_supplemental_policy_statement) list_objects.add_to_role_policy(lambda_supplemental_policy_statement) get_size_and_store.add_to_role_policy( lambda_supplemental_policy_statement) ########################################################################### # AWS SNS TOPICS ########################################################################### # notification_topic = aws_sns.Topic(self, "notification_topic") ########################################################################### # ADD AMAZON S3 BUCKET NOTIFICATIONS ########################################################################### images_bucket.add_event_notification( aws_s3.EventType.OBJECT_CREATED, aws_s3_notifications.LambdaDestination(parse_image_list_file)) ########################################################################### # AWS SQS QUEUES ########################################################################### comprehend_queue_iqueue = aws_sqs.Queue(self, "comprehend_queue_iqueue") comprehend_queue_iqueue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=comprehend_queue_iqueue) comprehend_queue = aws_sqs.Queue( self, "comprehend_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=comprehend_queue_iqueue_dlq) rekognition_queue_iqueue = aws_sqs.Queue(self, "rekognition_queue_iqueue") rekognition_queue_dlq = aws_sqs.DeadLetterQueue( max_receive_count=10, queue=rekognition_queue_iqueue) rekognition_queue = aws_sqs.Queue( self, "rekognition_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=rekognition_queue_dlq) object_queue_iqueue = aws_sqs.Queue(self, "object_queue_iqueue") object_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10, queue=object_queue_iqueue) object_queue = aws_sqs.Queue( self, "object_queue", visibility_timeout=core.Duration.seconds(301), dead_letter_queue=object_queue_dlq) ########################################################################### # AWS LAMBDA SQS EVENT SOURCE ########################################################################### get_size_and_store.add_event_source( SqsEventSource(object_queue, batch_size=10)) ########################################################################### # AWS ELASTICSEARCH DOMAIN ########################################################################### s3workflow_domain = aws_elasticsearch.Domain( self, "s3workflow_domain", version=aws_elasticsearch.ElasticsearchVersion.V7_1, capacity={ "master_nodes": 3, "data_nodes": 4 }, ebs={"volume_size": 100}, zone_awareness={"availability_zone_count": 2}, logging={ "slow_search_log_enabled": True, "app_log_enabled": True, "slow_index_log_enabled": True }) ########################################################################### # AMAZON COGNITO USER POOL ########################################################################### s3workflow_pool = aws_cognito.UserPool( self, "s3workflow-pool", account_recovery=None, auto_verify=None, custom_attributes=None, email_settings=None, enable_sms_role=None, lambda_triggers=None, mfa=None, mfa_second_factor=None, password_policy=None, self_sign_up_enabled=None, sign_in_aliases=aws_cognito.SignInAliases(email=True, phone=None, preferred_username=None, username=True), sign_in_case_sensitive=None, sms_role=None, sms_role_external_id=None, standard_attributes=None, user_invitation=None, user_pool_name=None, user_verification=None) ########################################################################### # AMAZON VPC ########################################################################### vpc = aws_ec2.Vpc(self, "s3workflowVPC", max_azs=3) # default is all AZs in region ########################################################################### # AMAZON ECS CLUSTER ########################################################################### cluster = aws_ecs.Cluster(self, "s3", vpc=vpc) ########################################################################### # AMAZON ECS Repositories ########################################################################### rekognition_repository = aws_ecr.Repository( self, "rekognition_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY")) comprehend_repository = aws_ecr.Repository( self, "comprehend_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY")) ########################################################################### # AMAZON ECS Roles and Policies ########################################################################### task_execution_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*", "ecr:*" ], resources=["*"]) task_execution_policy_document = aws_iam.PolicyDocument() task_execution_policy_document.add_statements( task_execution_policy_statement) task_execution_policy = aws_iam.Policy( self, "task_execution_policy", document=task_execution_policy_document) task_execution_role = aws_iam.Role( self, "task_execution_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com')) task_execution_role.attach_inline_policy(task_execution_policy) task_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "logs:*", "xray:*", "sqs:*", "s3:*", "rekognition:*", "comprehend:*", "es:*" ], resources=["*"]) task_policy_document = aws_iam.PolicyDocument() task_policy_document.add_statements(task_policy_statement) task_policy = aws_iam.Policy(self, "task_policy", document=task_policy_document) task_role = aws_iam.Role( self, "task_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com')) task_role.attach_inline_policy(task_policy) ########################################################################### # AMAZON ECS Task definitions ########################################################################### rekognition_task_definition = aws_ecs.TaskDefinition( self, "rekognition_task_definition", compatibility=aws_ecs.Compatibility("FARGATE"), cpu="1024", # ipc_mode=None, memory_mib="2048", network_mode=aws_ecs.NetworkMode("AWS_VPC"), # pid_mode=None, #Not supported in Fargate and Windows containers # placement_constraints=None, execution_role=task_execution_role, # family=None, # proxy_configuration=None, task_role=task_role # volumes=None ) comprehend_task_definition = aws_ecs.TaskDefinition( self, "comprehend_task_definition", compatibility=aws_ecs.Compatibility("FARGATE"), cpu="1024", # ipc_mode=None, memory_mib="2048", network_mode=aws_ecs.NetworkMode("AWS_VPC"), # pid_mode=None, #Not supported in Fargate and Windows containers # placement_constraints=None, execution_role=task_execution_role, # family=None, # proxy_configuration=None, task_role=task_role # volumes=None ) ########################################################################### # AMAZON ECS Images ########################################################################### rekognition_ecr_image = aws_ecs.EcrImage( repository=rekognition_repository, tag="latest") comprehend_ecr_image = aws_ecs.EcrImage( repository=comprehend_repository, tag="latest") ########################################################################### # ENVIRONMENT VARIABLES ########################################################################### environment_variables = {} environment_variables["COMPREHEND_QUEUE"] = comprehend_queue.queue_url environment_variables[ "REKOGNITION_QUEUE"] = rekognition_queue.queue_url environment_variables["IMAGES_BUCKET"] = images_bucket.bucket_name environment_variables[ "ELASTICSEARCH_HOST"] = s3workflow_domain.domain_endpoint parse_image_list_file.add_environment( "ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint) parse_image_list_file.add_environment("QUEUEURL", rekognition_queue.queue_url) parse_image_list_file.add_environment("DEBUG", "False") parse_image_list_file.add_environment("BUCKET", "-") parse_image_list_file.add_environment("KEY", "-") list_objects.add_environment("QUEUEURL", object_queue.queue_url) list_objects.add_environment("ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint) list_objects.add_environment("S3_BUCKET_NAME", images_bucket.bucket_name) list_objects.add_environment("S3_BUCKET_PREFIX", "images/") list_objects.add_environment("S3_BUCKET_SUFFIX", "") list_objects.add_environment("LOGGING_LEVEL", "INFO") get_size_and_store.add_environment("QUEUEURL", object_queue.queue_url) get_size_and_store.add_environment("ELASTICSEARCH_HOST", s3workflow_domain.domain_endpoint) get_size_and_store.add_environment("S3_BUCKET_NAME", images_bucket.bucket_name) get_size_and_store.add_environment("S3_BUCKET_PREFIX", "images/") get_size_and_store.add_environment("S3_BUCKET_SUFFIX", "") get_size_and_store.add_environment("LOGGING_LEVEL", "INFO") ########################################################################### # ECS Log Drivers ########################################################################### rekognition_task_log_driver = aws_ecs.LogDriver.aws_logs( stream_prefix="s3workflow", log_retention=aws_logs.RetentionDays("ONE_DAY")) comprehend_task_log_driver = aws_ecs.LogDriver.aws_logs( stream_prefix="s3workflow", log_retention=aws_logs.RetentionDays("ONE_DAY")) ########################################################################### # ECS Task Definitions ########################################################################### rekognition_task_definition.add_container( "rekognition_task_definition", image=rekognition_ecr_image, memory_reservation_mib=1024, environment=environment_variables, logging=rekognition_task_log_driver) comprehend_task_definition.add_container( "comprehend_task_definition", image=comprehend_ecr_image, memory_reservation_mib=1024, environment=environment_variables, logging=comprehend_task_log_driver) ########################################################################### # AWS ROUTE53 HOSTED ZONE ########################################################################### hosted_zone = aws_route53.HostedZone( self, "hosted_zone", zone_name="s3workflow.com", comment="private hosted zone for s3workflow system") hosted_zone.add_vpc(vpc)
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html vpc = ec2.Vpc(self, "vpc", cidr=props['vpc_CIDR'], max_azs=3, subnet_configuration=[ { 'cidrMask': 28, 'name': 'public', 'subnetType': ec2.SubnetType.PUBLIC }, { 'cidrMask': 28, 'name': 'private', 'subnetType': ec2.SubnetType.PRIVATE }, { 'cidrMask': 28, 'name': 'db', 'subnetType': ec2.SubnetType.ISOLATED } ] ) rds_subnetGroup = rds.SubnetGroup(self, "rds_subnetGroup", description = f"Group for {props['environment']}-{props['application']}-{props['unit']} DB", vpc = vpc, vpc_subnets = ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED) ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html ##TODO:ADD Aurora Serverless Option rds_instance = rds.DatabaseCluster(self,'wordpress-db', engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_07_2 ), instances=1, instance_props=rds.InstanceProps( vpc=vpc, enable_performance_insights=props['rds_enable_performance_insights'], instance_type=ec2.InstanceType(instance_type_identifier=props['rds_instance_type']) ), subnet_group=rds_subnetGroup, storage_encrypted=props['rds_storage_encrypted'], backup=rds.BackupProps( retention=core.Duration.days(props['rds_automated_backup_retention_days']) ) ) EcsToRdsSeurityGroup= ec2.SecurityGroup(self, "EcsToRdsSeurityGroup", vpc = vpc, description = "Allow WordPress containers to talk to RDS" ) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html db_cred_generator = _lambda.Function( self, 'db_creds_generator', runtime=_lambda.Runtime.PYTHON_3_8, handler='db_creds_generator.handler', code=_lambda.Code.asset('lambda'), vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnet_type= ec2.SubnetType.ISOLATED), #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets , environment={ 'SECRET_NAME': rds_instance.secret.secret_name, } ) #Set Permissions and Sec Groups rds_instance.connections.allow_from(EcsToRdsSeurityGroup, ec2.Port.tcp(3306)) #Open hole to RDS in RDS SG #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html file_system = efs.FileSystem(self, "MyEfsFileSystem", vpc = vpc, encrypted=True, # file system is not encrypted by default lifecycle_policy = props['efs_lifecycle_policy'], performance_mode = efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode = efs.ThroughputMode.BURSTING, removal_policy = core.RemovalPolicy(props['efs_removal_policy']), enable_automatic_backups = props['efs_automatic_backups'] ) if props['deploy_bastion_host']: #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc = vpc ) rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306)) self.output_props = props.copy() self.output_props["vpc"] = vpc self.output_props["rds_instance"] = rds_instance self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup self.output_props["file_system"] = file_system
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html vpc = ec2.Vpc(self, "vpc", cidr=props['vpc_CIDR'], max_azs=3, subnet_configuration=[{ 'cidrMask': 28, 'name': 'public', 'subnetType': ec2.SubnetType.PUBLIC }, { 'cidrMask': 28, 'name': 'private', 'subnetType': ec2.SubnetType.PRIVATE }, { 'cidrMask': 28, 'name': 'db', 'subnetType': ec2.SubnetType.ISOLATED }]) rds_subnetGroup = rds.SubnetGroup( self, "rds_subnetGroup", description= f"Group for {props['environment']}-{props['application']}-{props['unit']} DB", vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED)) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_rds/DatabaseCluster.html ##TODO:ADD Aurora Serverless Option rds_instance = rds.DatabaseCluster( self, 'wordpress-db', engine=rds.DatabaseClusterEngine.aurora_mysql( version=rds.AuroraMysqlEngineVersion.VER_2_07_2), instances=1, instance_props=rds.InstanceProps( vpc=vpc, enable_performance_insights=props[ 'rds_enable_performance_insights'], instance_type=ec2.InstanceType( instance_type_identifier=props['rds_instance_type'])), subnet_group=rds_subnetGroup, storage_encrypted=props['rds_storage_encrypted'], backup=rds.BackupProps(retention=core.Duration.days( props['rds_automated_backup_retention_days']))) EcsToRdsSeurityGroup = ec2.SecurityGroup( self, "EcsToRdsSeurityGroup", vpc=vpc, description="Allow WordPress containers to talk to RDS") #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html db_cred_generator = _lambda.Function( self, 'db_creds_generator', runtime=_lambda.Runtime.PYTHON_3_8, handler='db_creds_generator.handler', code=_lambda.Code.asset('lambda/db_creds_generator'), vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED ), #vpc.select_subnets(subnet_type = ec2.SubnetType("ISOLATED")).subnets , environment={ 'SECRET_NAME': rds_instance.secret.secret_name, }) #Set Permissions and Sec Groups rds_instance.connections.allow_from( EcsToRdsSeurityGroup, ec2.Port.tcp(3306)) #Open hole to RDS in RDS SG #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html file_system = efs.FileSystem( self, "MyEfsFileSystem", vpc=vpc, encrypted=True, # file system is not encrypted by default lifecycle_policy=props['efs_lifecycle_policy'], performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, removal_policy=core.RemovalPolicy(props['efs_removal_policy']), enable_automatic_backups=props['efs_automatic_backups']) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html?highlight=ecs%20cluster#aws_cdk.aws_ecs.Cluster cluster = ecs.Cluster( self, "Cluster", vpc=vpc, container_insights=props['ecs_enable_container_insights']) if props['deploy_bastion_host']: #ToDo: Deploy bastion host with a key file #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/BastionHostLinux.html bastion_host = ec2.BastionHostLinux(self, 'bastion_host', vpc=vpc) rds_instance.connections.allow_from(bastion_host, ec2.Port.tcp(3306)) ####################### ### Developer Tools ### # SFTP into the EFS Shared File System NetToolsSecret = secretsmanager.Secret( self, "NetToolsSecret", generate_secret_string=secretsmanager.SecretStringGenerator( secret_string_template=json.dumps({ "username": '******', "ip": '' }), generate_string_key="password", exclude_characters='/"')) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_efs/FileSystem.html#aws_cdk.aws_efs.FileSystem.add_access_point AccessPoint = file_system.add_access_point( "access-point", path="/", create_acl=efs.Acl( owner_uid= "100", #https://aws.amazon.com/blogs/containers/developers-guide-to-using-amazon-efs-with-amazon-ecs-and-aws-fargate-part-2/ owner_gid="101", permissions="0755")) EfsVolume = ecs.Volume( name="efs", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=file_system.file_system_id, transit_encryption="ENABLED", authorization_config=ecs.AuthorizationConfig( access_point_id=AccessPoint.access_point_id))) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html NetToolsTask = ecs.FargateTaskDefinition(self, "TaskDefinition", cpu=256, memory_limit_mib=512, volumes=[EfsVolume]) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html#aws_cdk.aws_ecs.FargateTaskDefinition.add_container NetToolsContainer = NetToolsTask.add_container( "NetTools", image=ecs.ContainerImage.from_registry('netresearch/sftp'), command=['test:test:100:101:efs']) NetToolsContainer.add_port_mappings( ecs.PortMapping(container_port=22, protocol=ecs.Protocol.TCP)) NetToolsContainer.add_mount_points( ecs.MountPoint( container_path= "/home/test/efs", #ToDo build path out with username from secret read_only=False, source_volume=EfsVolume.name, )) #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateService.html?highlight=fargateservice#aws_cdk.aws_ecs.FargateService service = ecs.FargateService( self, "Service", cluster=cluster, task_definition=NetToolsTask, platform_version=ecs.FargatePlatformVersion( "VERSION1_4"), #Required for EFS ) #ToDo somehow store container's IP on deploy #Allow traffic to EFS Volume from Net Tools container service.connections.allow_to(file_system, ec2.Port.tcp(2049)) #ToDo allow bastion host into container on port 22 #https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_lambda/Function.html bastion_ip_locator = _lambda.Function( self, 'bastion_ip_locator', function_name= f"{props['environment']}-{props['application']}-{props['unit']}-SFTP-IP", runtime=_lambda.Runtime.PYTHON_3_8, handler='bastion_ip_locator.handler', code=_lambda.Code.asset('lambda/bastion_ip_locator'), environment={ 'CLUSTER_NAME': cluster.cluster_arn, 'SERVICE_NAME': service.service_name }) #Give needed perms to bastion_ip_locator for reading info from ECS bastion_ip_locator.add_to_role_policy( iam.PolicyStatement( actions=["ecs:DescribeTasks"], resources=[ #f"arn:aws:ecs:us-east-1:348757191778:service/{cluster.cluster_name}/{service.service_name}", f"arn:aws:ecs:us-east-1:348757191778:task/{cluster.cluster_name}/*" ])) bastion_ip_locator.add_to_role_policy( iam.PolicyStatement(actions=[ "ecs:ListTasks", ], resources=["*"], conditions={ 'ArnEquals': { 'ecs:cluster': cluster.cluster_arn } })) self.output_props = props.copy() self.output_props["vpc"] = vpc self.output_props["rds_instance"] = rds_instance self.output_props["EcsToRdsSeurityGroup"] = EcsToRdsSeurityGroup self.output_props["file_system"] = file_system self.output_props["cluster"] = cluster
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) ########################################################################### # AMAZON VPC ########################################################################### vpc = ec2.Vpc(self, "LoadTestVPC", max_azs=3) # default is all AZs in region ########################################################################### # AMAZON ECS Repositories ########################################################################### # get_repository = aws_ecs.IRepository(self, "get_repository", image_scan_on_push=True, removal_policy=aws_cdk.core.RemovalPolicy("DESTROY") ) # put_repository = aws_ecs.IRepository(self, "put_repository", image_scan_on_push=True, removal_policy=aws_cdk.core.RemovalPolicy("DESTROY") ) get_repository = aws_ecr.Repository(self, "get_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY") ) put_repository = aws_ecr.Repository(self, "put_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY") ) xray_repository = aws_ecr.Repository(self, "xray_repository", image_scan_on_push=True, removal_policy=core.RemovalPolicy("DESTROY") ) ########################################################################### # AMAZON ECS Roles and Policies ########################################################################### task_execution_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["logs:*", "ecs:*", "ec2:*", "elasticloadbalancing:*","ecr:*"], resources=["*"] ) task_execution_policy_document = aws_iam.PolicyDocument() task_execution_policy_document.add_statements(task_execution_policy_statement) task_execution_policy = aws_iam.Policy(self, "task_execution_policy", document=task_execution_policy_document) task_execution_role = aws_iam.Role(self, "task_execution_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com') ) task_execution_role.attach_inline_policy(task_execution_policy) task_policy_statement = aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["logs:*", "xray:*", "sqs:*", "s3:*"], resources=["*"] ) task_policy_document = aws_iam.PolicyDocument() task_policy_document.add_statements(task_policy_statement) task_policy = aws_iam.Policy(self, "task_policy", document=task_policy_document) task_role = aws_iam.Role(self, "task_role", assumed_by=aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com') ) task_role.attach_inline_policy(task_policy) ########################################################################### # AMAZON ECS Task definitions ########################################################################### get_task_definition = aws_ecs.TaskDefinition(self, "gettaskdefinition", compatibility=aws_ecs.Compatibility("FARGATE"), cpu="1024", # ipc_mode=None, memory_mib="2048", network_mode=aws_ecs.NetworkMode("AWS_VPC"), # pid_mode=None, #Not supported in Fargate and Windows containers # placement_constraints=None, execution_role=task_execution_role, # family=None, # proxy_configuration=None, task_role=task_role # volumes=None ) put_task_definition = aws_ecs.TaskDefinition(self, "puttaskdefinition", compatibility=aws_ecs.Compatibility("FARGATE"), cpu="1024", # ipc_mode=None, memory_mib="2048", network_mode=aws_ecs.NetworkMode("AWS_VPC"), # pid_mode=None, #Not supported in Fargate and Windows containers # placement_constraints=None, execution_role=task_execution_role, # family=None, # proxy_configuration=None, task_role=task_role # volumes=None ) ########################################################################### # AMAZON S3 BUCKETS ########################################################################### storage_bucket = aws_s3.Bucket(self, "storage_bucket") ########################################################################### # AWS SQS QUEUES ########################################################################### ecs_task_queue_iqueue = aws_sqs.Queue(self, "ecs_task_queue_iqueue_dlq") ecs_task_queue_queue_dlq = aws_sqs.DeadLetterQueue(max_receive_count=10, queue=ecs_task_queue_iqueue) ecs_task_queue_queue = aws_sqs.Queue(self, "ecs_task_queue_queue", visibility_timeout=core.Duration.seconds(300), dead_letter_queue=ecs_task_queue_queue_dlq) ########################################################################### # AMAZON ECS Images ########################################################################### get_repository_ecr_image = aws_ecs.EcrImage(repository=get_repository, tag="latest") put_repository_ecr_image = aws_ecs.EcrImage(repository=put_repository, tag="latest") xray_repository_ecr_image = aws_ecs.EcrImage(repository=xray_repository, tag="latest") environment_variables = {} environment_variables["SQS_QUEUE"] = ecs_task_queue_queue.queue_url environment_variables["S3_BUCKET"] = storage_bucket.bucket_name get_task_log_driver = aws_ecs.LogDriver.aws_logs(stream_prefix="S3LoadTest", log_retention=aws_logs.RetentionDays("ONE_WEEK")) put_task_log_driver = aws_ecs.LogDriver.aws_logs(stream_prefix="S3LoadTest", log_retention=aws_logs.RetentionDays("ONE_WEEK")) xray_task_log_driver = aws_ecs.LogDriver.aws_logs(stream_prefix="S3LoadTest", log_retention=aws_logs.RetentionDays("ONE_WEEK")) get_task_definition.add_container("get_task_definition_get", image=get_repository_ecr_image, memory_reservation_mib=1024, environment=environment_variables, logging=get_task_log_driver ) get_task_definition.add_container("get_task_definition_xray", image=xray_repository_ecr_image, memory_reservation_mib=1024, environment=environment_variables, logging=xray_task_log_driver ) put_task_definition.add_container("put_task_definition_put", image=put_repository_ecr_image, memory_reservation_mib=1024, environment=environment_variables, logging=put_task_log_driver ) put_task_definition.add_container("put_task_definition_xray", image=xray_repository_ecr_image, memory_reservation_mib=1024, environment=environment_variables, logging=xray_task_log_driver ) ########################################################################### # AMAZON ECS CLUSTER ########################################################################### cluster = aws_ecs.Cluster(self, "LoadTestCluster", vpc=vpc) ########################################################################### # AWS ROUTE53 HOSTED ZONE ########################################################################### hosted_zone = aws_route53.HostedZone(self, "hosted_zone", zone_name="loadtest.com" ,comment="private hosted zone for loadtest system") hosted_zone.add_vpc(vpc) bucket_record_values = [storage_bucket.bucket_name] queue_record_values = [ecs_task_queue_queue.queue_url] bucket_record_name = "bucket." + hosted_zone.zone_name queue_record_name = "filesqueue." + hosted_zone.zone_name hosted_zone_record_bucket = aws_route53.TxtRecord(self, "hosted_zone_record_bucket", record_name=bucket_record_name, values=bucket_record_values, zone=hosted_zone, comment="dns record for bucket name") hosted_zone_record_queue = aws_route53.TxtRecord(self, "hosted_zone_record_queue", record_name=queue_record_name, values=queue_record_values, zone=hosted_zone, comment="dns record for queue name")