def configure_load_balancing( load_balancer: elbv2.ApplicationLoadBalancer, ec2_service: ecs.FargateService, ssl_certificate=None, ): # Redirection 80 --> 443 if ssl_certificate: redirect_listener = load_balancer.add_listener('redirect', port=80, open=True) redirect_listener.add_redirect_response('redirect', status_code='HTTP_301', protocol='HTTPS', port='443') https_listener = load_balancer.add_listener( 'listener', port=443, certificates=[ssl_certificate], open=True) https_listener.add_targets( 'target', port=80, deregistration_delay=core.Duration.seconds(30), slow_start=core.Duration.seconds(30), targets=[ec2_service], health_check=elbv2.HealthCheck(path='/')) else: http_listener = load_balancer.add_listener('listener', port=80, open=True) http_listener.add_targets( 'target', port=80, deregistration_delay=core.Duration.seconds(30), slow_start=core.Duration.seconds(30), targets=[ec2_service], health_check=elbv2.HealthCheck(path='/'))
def __init__( self, scope: core.Construct, id: str, **kwargs, ) -> None: super().__init__( scope, id, **kwargs, ) self.backend_task = ecs.FargateTaskDefinition(self, "BackendTask") self.backend_task.add_container( "BackendContainer", image=scope.image, logging=ecs.LogDrivers.aws_logs( stream_prefix="BackendContainer", log_retention=logs.RetentionDays.ONE_WEEK, ), environment=scope.variables.regular_variables, secrets=scope.variables.secret_variables, command=["/start_prod.sh"], ) scope.backend_assets_bucket.grant_read_write( self.backend_task.task_role) for secret in [scope.variables.django_secret_key, scope.rds.db_secret]: secret.grant_read(self.backend_task.task_role) port_mapping = ecs.PortMapping(container_port=8000, protocol=ecs.Protocol.TCP) self.backend_task.default_container.add_port_mappings(port_mapping) self.backend_service = ecs.FargateService( self, "BackendService", task_definition=self.backend_task, assign_public_ip=True, cluster=scope.ecs.cluster, security_group=ec2.SecurityGroup.from_security_group_id( self, "BackendServiceSecurityGroup", security_group_id=scope.vpc.vpc_default_security_group, ), ) scope.https_listener.add_targets( "BackendTarget", port=80, targets=[self.backend_service], priority=2, path_patterns=["*"], health_check=elbv2.HealthCheck( healthy_http_codes="200-299", path="/api/health-check/", ), )
def create_target_group(self, vpc, tg_name): tg = _elbv2.ApplicationTargetGroup( self, tg_name, port=80, target_type=_elbv2.TargetType.IP, target_group_name=tg_name, vpc=vpc, health_check=_elbv2.HealthCheck(path='/login'), ) tg.enable_cookie_stickiness(core.Duration.seconds(1800)) return tg
def __create_application_target_group(self, asg: autoscaling.AutoScalingGroup, vpc: ec2.Vpc): target_group = elbv2.ApplicationTargetGroup( self, id=common.generate_id("ImagizerTargetGroup"), targets=[asg], port=variables.PUBLIC_PORT, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc, health_check=elbv2.HealthCheck(path="/health", healthy_threshold_count=2, interval=core.Duration.seconds(10))) common.add_tags(self, target_group, variables.IMAGIZER_CLUSTER_TAGS) return target_group
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = ec2.Vpc(self, f"{props['namespace'].lower()}-vpc", max_azs=3) cluster = ecs.Cluster(self, f"{props['namespace'].lower()}-cluster", vpc=vpc) task_role = iam.Role( self, f"{props['namespace'].lower()}-task-role", assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')) task_role.add_to_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["lambda:*", "cloudwatch:*", "dynamodb:*", "logs:*"], resources=["*"])) service = ecs_patterns.ApplicationLoadBalancedFargateService( self, f"{props['namespace'].lower()}", cluster=cluster, # Required cpu=512, # Default is 256 desired_count=6, # Default is 1 task_image_options=ecs_patterns. ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_ecr_repository( props['ecr'], f"{props['namespace']}"), container_name="regulators", container_port=8000, task_role=task_role), memory_limit_mib=2048, # Default is 512 public_load_balancer=True # Default is False ) service.target_group.health_check = elb.HealthCheck( protocol=elb.Protocol.HTTP, port='8000', path='/', healthy_threshold_count=2, unhealthy_threshold_count=4)
def ELB_SVC(self, ZachECSNodeName, TaskName, ZachTaskDef, cluster, vpc): ecs_service = ecs.Ec2Service( self, id=ZachECSNodeName + TaskName, task_definition=ZachTaskDef, cluster=cluster, desired_count=2, security_group=self.VPC_SG(TaskName, vpc), assign_public_ip=True, # health_check_grace_period=core.Duration.seconds(30), # Health check grace period is only valid for services configured to use load balancers service_name=ZachECSNodeName + TaskName) # Create ALB lb = elb.ApplicationLoadBalancer(self, ZachECSNodeName + TaskName + "-LB", vpc=vpc, internet_facing=True) listener = lb.add_listener(ZachECSNodeName + TaskName + "PublicListener", port=80, open=True) health_check = elb.HealthCheck(interval=core.Duration.seconds(60), path="/health", timeout=core.Duration.seconds(5)) # Attach ALB to ECS Service listener.add_targets( ZachECSNodeName + TaskName + "-ECS", port=80, targets=[ecs_service], health_check=health_check, ) core.CfnOutput(self, ZachECSNodeName + TaskName + "-LoadBalancerDNS", value=lb.load_balancer_dns_name) core.CfnOutput(self, id=ZachECSNodeName + TaskName + "-ServiceName", value=ecs_service.service.service_name) core.CfnOutput(self, id=ZachECSNodeName + TaskName + "-ServiceARN", value=ecs_service.service.service_arn) return ecs_service
def __init__(self, scope: core.Construct, construct_id: str, vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self.elb_security_group = ec2.SecurityGroup( self, "ELBSG", vpc=vpc, allow_all_outbound=True, description="Group for the ELB") self.elb_security_group.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(80), description="Allow HTTP access") self.elb_security_group.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(443), description="Allow HTTPS access") lb = elbv2.ApplicationLoadBalancer( self, "ELB", vpc=vpc, internet_facing=True, vpc_subnets=ec2.SubnetSelection(subnets=vpc.public_subnets)) health_check = elbv2.HealthCheck(enabled=True, healthy_http_codes="200", healthy_threshold_count=3, interval=core.Duration.seconds(15), path="/pi", timeout=core.Duration.seconds(10), unhealthy_threshold_count=3) blue_target_group = elbv2.ApplicationTargetGroup( self, "BlueTargetGroup", port=8224, protocol=elbv2.ApplicationProtocol.HTTP, stickiness_cookie_duration=core.Duration.days(30), health_check=health_check, target_type=elbv2.TargetType.IP, vpc=vpc)
def create_service_and_nlb(self): service = self.create_service() listener = self.create_listener() nlb_healthcheck = elb.HealthCheck(port=str( self.master_healthcheck_port), protocol=elb.Protocol.HTTP) listener.add_targets( "ECS", port=self.master_port, targets=[ service.load_balancer_target( container_name="master", container_port=self.master_port, protocol=ecs.Protocol.UDP, ) ], proxy_protocol_v2=True, health_check=nlb_healthcheck, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) Zach_LB_Name = self.__class__.__name__ LB_VPC = ec2.Vpc.from_lookup(self, id="Zach_VPC_Stack_A", vpc_id="vpc-01e73b4b5c6f9f98a") LB_SG = ec2.SecurityGroup.from_security_group_id(self,id=Zach_LB_Name+"-SG",security_group_id="sg-07a1d71bba92f38ca") LB_Subnet = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC) Zach_LB = elb.ApplicationLoadBalancer(self, id=Zach_LB_Name, idle_timeout=core.Duration.seconds(5), ip_address_type=elb.IpAddressType.IPV4, security_group=LB_SG, vpc=LB_VPC, deletion_protection=True, vpc_subnets=LB_Subnet, internet_facing=True, http2_enabled=True ) # LB_Targets = elb.CfnTargetGroup(self,id=Zach_LB_Name,targets=[{ # "AvailabilityZone": "ap-southeast-1", # "id": "i-0735304dcf665f40e", # "Port": 80 # }]) LB_Targets = self._add_autoscaling_group(LB_VPC,LB_Subnet,LB_SG,'aws-sg-root') Zach_LB_Listener = Zach_LB.add_listener(id=Zach_LB_Name + "_" + str(80), open=True,port=80,protocol=elb.ApplicationProtocol.HTTP) Zach_LB_Listener.add_targets(id=Zach_LB_Name,health_check = elb.HealthCheck(healthy_http_codes="200,301,302,401,403,405", healthy_threshold_count=3, unhealthy_threshold_count=5, path="/", interval=core.Duration.seconds(30)), port = 80, protocol = elb.ApplicationProtocol.HTTP, # priority = 100, # path_pattern = '/', stickiness_cookie_duration = core.Duration.hours(12), targets=[LB_Targets], # target_group_name=LB_Targets.auto_scaling_group_name ) Zach_LB_Listener.connections.allow_default_port_from_any_ipv4("Open to the world") core.CfnOutput(self, "LoadBalancerDNS", export_name="LoadBalancerDNS", value=Zach_LB.load_balancer_dns_name)
def __init__(self, scope: core.Construct, construct_id: str, vpc: ec2.Vpc, auto_scaling: autoscaling.AutoScalingGroup, **kwargs) -> None: self.elb = elb.ApplicationLoadBalancer( scope, 'ALB-WebGroup', vpc=vpc, internet_facing=True, vpc_subnets=ec2.SubnetSelection(subnets=vpc.public_subnets) ) listener = self.elb.add_listener('Listener', port=80, open=True) # For the healthcheck I could leave most of parameters blank as the default would sufface, but I put them here for reference later. listener.add_targets( 'WebFleet', port=80, targets=[auto_scaling], health_check=elb.HealthCheck( enabled=True, healthy_http_codes='200-399', interval=core.Duration.seconds(30), path='/index.php' ) )
def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, instances: list, certificate_arn: str, **kwargs): super().__init__(scope, id, **kwargs) health_check = elbv2.HealthCheck(path="/", healthy_http_codes="200-399") public_target_group = elbv2.ApplicationTargetGroup( self, "PublicTG", port=8080, vpc=vpc, health_check=health_check) for instance in instances: public_target_group.add_target( elbv2.InstanceTarget(instance.instance_id, port=8080)) self._public_security_group = ec2.SecurityGroup(self, "PublicLBSG", vpc=vpc) self._public_security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443)) self._public_lb = elbv2.ApplicationLoadBalancer( self, "PublicLB", vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnets=vpc.select_subnets( subnet_type=ec2.SubnetType.PUBLIC).subnets), internet_facing=True, security_group=self._public_security_group) self._public_lb.add_listener( "PublicLBListener", certificates=[elbv2.ListenerCertificate(certificate_arn)], port=443, default_target_groups=[public_target_group]) core.CfnOutput(self, "CloudIDE URL", value="https://{}".format( self._public_lb.load_balancer_dns_name))
def configure_load_balancers(self, vpc: ec2.Vpc, publoadbal: elbv2.ApplicationLoadBalancer): tgroups = {} hc = elbv2.HealthCheck() hc['intervalSecs'] = 10 hc['protocol'] = elbv2.ApplicationProtocol.Http hc['healthyThresholdCount'] = 10 hc['unhealthyThresholdCount'] = 10 hc['timeoutSeconds'] = 5 hc['path'] = '/' targetgroups = [ {'name': 'grafana', 'httpcode': '302', 'port': 3000}, {'name': 'prometheus', 'httpcode': '405', 'port': 9090}, {'name': 'colorgateway', 'httpcode': '200', 'port': 9080}] for tgs in targetgroups: tgname = tgs['name'] code = tgs['httpcode'] port = tgs['port'] hc['healthyHttpCodes'] = code atg = elbv2.ApplicationTargetGroup(self, id=tgname + 'TargetGroup', protocol=elbv2.ApplicationProtocol.Http, port=port, deregistration_delay_sec=30, vpc=vpc, target_group_name='appmeshdemo-' + tgname + '-1', health_check=hc, target_type=elbv2.TargetType.Ip) lbl = elbv2.ApplicationListener(self, tgname + 'LoadBalancerListener', port=port, protocol=elbv2.ApplicationProtocol.Http, default_target_groups=[atg], load_balancer=publoadbal) elbv2.ApplicationListenerRule(self, tgname + 'LoadBalancerRule', listener=lbl, target_groups=[atg], priority=1, path_pattern='*') tgroups[tgname] = atg return tgroups
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) pearson_vpn_connection = ec2.Peer.ipv4('159.182.0.0/16') # Props Setup stage = scope.node.try_get_context('stage') my_service_name = scope.node.try_get_context('serviceName') api_health_path = props['apiHealthPath'] tca_health_path = props['tcaHealthPath'] # Setup IAM user for logs vpc_flow_role = iam.Role( self, 'FlowLog', assumed_by=iam.ServicePrincipal('vpc-flow-logs.amazonaws.com')) vpc_flow_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ 'iam:PassRole', 'logs:CreateLogGroup', 'logs:DescribeLogGroups', 'logs:CreateLogStream', 'logs:PutLogEvents' ], resources=["*"])) # Create Cloudwatch log group log_group = logs.LogGroup(self, 'LogGroup', log_group_name="{0}-{1}".format( my_service_name, stage), retention=logs.RetentionDays('ONE_YEAR'), removal_policy=core.RemovalPolicy('DESTROY')) # Setup VPC resource vpc = ec2.Vpc(self, '{0}-{1}-vpc'.format(my_service_name, stage), cidr=props['cidr'], max_azs=props['vpcAzCount']) # Setup VPC flow logs vpc_log = ec2.CfnFlowLog( self, 'FlowLogs', resource_id=vpc.vpc_id, resource_type='VPC', traffic_type='ALL', deliver_logs_permission_arn=vpc_flow_role.role_arn, log_destination_type='cloud-watch-logs', log_group_name="{0}-{1}".format(log_group.log_group_name, stage)) # Setup Security Group in VPC vpc_sg = ec2.SecurityGroup(self, 'EcSSG', vpc=vpc, allow_all_outbound=None, description="Security Group for Oculus vpc", security_group_name="{0}-{1}-vpc-sg".format( my_service_name, stage)) # Add Rules to Security Group vpc_sg.add_ingress_rule(peer=pearson_vpn_connection, connection=ec2.Port.tcp(22)) # ALB Security Group alb_sg = ec2.SecurityGroup(self, 'AlbSG', vpc=vpc, allow_all_outbound=None, description="Security group for oculus ALB", security_group_name="{0}-{1}-alb-sg".format( my_service_name, stage)) # Add HTTPS Rule to Security Group alb_sg.add_ingress_rule(peer=pearson_vpn_connection, connection=ec2.Port.tcp(443)) # Setup ALB alb = elbv2.ApplicationLoadBalancer(self, 'ALB', vpc=vpc, internet_facing=True, security_group=alb_sg) # Setup API Target Group api_tg = elbv2.ApplicationTargetGroup( self, 'ApiTargetGroup', port=8080, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup Web Target Group web_tg = elbv2.ApplicationTargetGroup( self, 'WebTargetGroup', port=3030, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup API Target Group tca_tg = elbv2.ApplicationTargetGroup( self, 'TcaTargetGroup', port=8080, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup ECS Cluster ecs_cluster = ecs.Cluster(self, 'ECSCluster', vpc=vpc, cluster_name="{0}-{1}".format( my_service_name, stage)) # ECS Execution Role - Grants ECS agent to call AWS APIs ecs_execution_role = iam.Role( self, 'ECSExecutionRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name="{0}-{1}-execution-role".format(my_service_name, stage)) # Setup Role Permissions ecs_execution_role.add_to_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer', 'elasticloadbalancing:DeregisterTargets', 'elasticloadbalancing:Describe*', 'elasticloadbalancing:RegisterInstancesWithLoadBalancer', 'elasticloadbalancing:RegisterTargets', 'ec2:Describe*', 'ec2:AuthorizeSecurityGroupIngress', 'sts:AssumeRole', 'ssm:GetParameters', 'secretsmanager:GetSecretValue', 'ecr:GetAuthorizationToken', 'ecr:BatchCheckLayerAvailability', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'logs:CreateLogStream', 'logs:PutLogEvents', "application-autoscaling:*", "cloudwatch:DescribeAlarms", "cloudwatch:PutMetricAlarm" ], resources=["*"])) # ECS Task Role - Grants containers in task permission to AWS APIs ecs_task_role = iam.Role( self, 'ECSTaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name="{0}-{1}-task-role".format(my_service_name, stage)) # Setup Role Permissions ecs_task_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ 'logs:CreateLogStream', 'logs:PutLogEvents', 'dynamodb:Query', 'dynamodb:ListTables', 'secretsmanager:GetSecretValue', 'kms:Decrypt' ], resources=["*"])) # Setup API Task Definition api_taskdef = ecs.FargateTaskDefinition( self, 'APIFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-api".format(my_service_name, stage)) # Setup Web Task Definition web_taskdef = ecs.FargateTaskDefinition( self, 'WebFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-web".format(my_service_name, stage)) # # Setup TCA Task Definition tca_taskdef = ecs.FargateTaskDefinition( self, 'TcaFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-tca".format(my_service_name, stage)) api_repo = ecr.Repository.from_repository_arn( self, 'ApiImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-api") web_repo = ecr.Repository.from_repository_arn( self, 'WebImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-web") tca_repo = ecr.Repository.from_repository_arn( self, 'TcaImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-tca-api") # Add Container API to Task api_container = api_taskdef.add_container( "oculus-cdk-{}-api".format(stage), image=ecs.EcrImage(repository=api_repo, tag="devqaurl"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-api".format( my_service_name, stage), log_group=log_group)) # Add Container Web to Task web_container = web_taskdef.add_container( "oculus-cdk-{}-web".format(stage), image=ecs.EcrImage(repository=web_repo, tag="removeMetaMockup"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-web".format( my_service_name, stage), log_group=log_group)) # # Add Container TCA to Task tca_container = tca_taskdef.add_container( "oculus-cdk-{}-tca".format(stage), image=ecs.EcrImage(repository=tca_repo, tag="ocu-1109"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-tca".format( my_service_name, stage), log_group=log_group)) # Setup API Port Mappings api_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP)) # Setup Web Port Mappings web_container.add_port_mappings( ecs.PortMapping(container_port=3030, host_port=3030, protocol=ecs.Protocol.TCP)) # # Setup TCA Port Mappings tca_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP)) # Setup API Fargate Service api_service = ecs.FargateService(self, "FargateServiceAPI", task_definition=api_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-api".format( my_service_name, stage)) api_scaling = api_service.auto_scale_task_count(max_capacity=5) api_scaling.scale_on_cpu_utilization('ApiCpuScaling', target_utilization_percent=50) # Setup Web Fargate Service web_service = ecs.FargateService(self, "FargateServiceWeb", task_definition=web_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-web".format( my_service_name, stage)) web_scaling = web_service.auto_scale_task_count(max_capacity=5) web_scaling.scale_on_cpu_utilization('WebCpuScaling', target_utilization_percent=50) # # Setup TCA Fargate Service tca_service = ecs.FargateService(self, "FargateServiceTCA", task_definition=tca_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-tca".format( my_service_name, stage)) tca_scaling = tca_service.auto_scale_task_count(max_capacity=5) tca_scaling.scale_on_cpu_utilization('TcaCpuScaling', target_utilization_percent=50) # Setup ALB Listener alb_listener = alb.add_listener( 'Listener', certificate_arns=[ "arn:aws:acm:us-east-1:829809672214:certificate/a84bb369-03ce-4e5e-9d32-8c84609cad1e" ], port=443, open=False, protocol=elbv2.ApplicationProtocol.HTTPS) # Attach ALB to ECS API Service api_target = alb_listener.add_targets( 'ECSAPI', port=8080, priority=1, targets=[api_service], health_check=elbv2.HealthCheck(path=api_health_path), path_pattern='/oculus-api/*') # # Attach ALB to ECS TCA Service tca_target = alb_listener.add_targets( 'ECSTCA', port=8080, priority=2, targets=[tca_service], health_check=elbv2.HealthCheck(path=tca_health_path), path_pattern='/tca/*') # Attach ALB to ECS Web Service web_target = alb_listener.add_targets( 'ECSWeb', port=3030, protocol=elbv2.ApplicationProtocol.HTTP, targets=[web_service], health_check=elbv2.HealthCheck(path='/'), ) core.CfnOutput(self, 'LoadBalancerDNS', value=alb.load_balancer_dns_name) zone = route53.HostedZone.from_lookup(self, 'MyHostedZone', domain_name=props['zoneDomain']) route53.ARecord( self, 'ServiceAliasRecord', record_name=props['siteDomain'], target=route53.RecordTarget( alias_target=aws_route53_targets.LoadBalancerTarget( load_balancer=alb)), zone=zone)
listener_varsleuth_green = lb_varsleuth.add_listener( "varsleuthlistener_green", port=8443, open=True, protocol=elbv2.ApplicationProtocol.HTTPS, certificate_arns=[certificate_arn_sema4]) listener_spliceai_green = lb_spliceai.add_listener( "spliceailistener_green", port=8443, open=True, protocol=elbv2.ApplicationProtocol.HTTPS, certificate_arns=[certificate_arn_sema4]) health_check = elbv2.HealthCheck(interval=core.Duration.seconds(60), path="/", timeout=core.Duration.seconds(5)) dispatcher_health_check = elbv2.HealthCheck(interval=core.Duration.seconds(60), path="/getallpatients", timeout=core.Duration.seconds(5)) # Attach ALB to ECS Service listener_voncweb.add_targets("voncweb", target_group_name='Proj-VONC-VISTA-VoncWeb-Blue', port=443, targets=[service_voncweb], health_check=health_check, protocol=elbv2.ApplicationProtocol.HTTPS)
def create_jupyter_service(self): jupyter_task_definition = ecs.FargateTaskDefinition( self, id="jupyterTaskDefinition", cpu=1024, memory_limit_mib=2048) container = jupyter_task_definition.add_container( id="jupyterContainer", cpu=1024, memory_limit_mib=2048, essential=True, image=ecs.ContainerImage.from_registry(Config.JUPYTER_IMG.value), command=[ "jupyter", "notebook", f"--NotebookApp.token={Config.JUPYTER_SECRET.value}", "--ip", "0.0.0.0", "--no-browser", "--allow-root", ], logging=ecs.LogDriver.aws_logs( stream_prefix="jupyter-", log_retention=logs.RetentionDays.ONE_DAY), ) container.add_port_mappings( ecs.PortMapping(container_port=8888, host_port=8888, protocol=ecs.Protocol.TCP)) self.jupyter_service = ecs.FargateService( self, id="jupyter", cluster=self.cluster, desired_count=1, service_name="jupyter", task_definition=jupyter_task_definition, health_check_grace_period=core.Duration.seconds(120), assign_public_ip=True, ) self.jupyter_service.enable_cloud_map( dns_record_type=servicediscovery.DnsRecordType.A, name="jupyter") healthcheck = elb.HealthCheck( interval=core.Duration.seconds(60), path="/", timeout=core.Duration.seconds(40), port="8888", healthy_http_codes="200-399", ) jatg = elb.ApplicationTargetGroup( self, id="jupyterTargetGroup", port=8888, vpc=self.vpc, protocol=elb.ApplicationProtocol.HTTP, targets=[self.jupyter_service], health_check=healthcheck, ) listener = self.elb.add_listener("jupyterPublicListener", port=80, open=True) listener.add_target_groups(id="jupyterTargetGroups", target_groups=[jatg])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here EcsStack.readConfig(0) vpc = ec.Vpc( self, "Main", cidr="11.0.0.0/26", max_azs=2, nat_gateways=1, subnet_configuration=[ ec.SubnetConfiguration(name="public", cidr_mask=28, subnet_type=ec.SubnetType.PUBLIC), ec.SubnetConfiguration(name="private", cidr_mask=28, subnet_type=ec.SubnetType.PRIVATE) ]) cluster = ecs.Cluster(self, "TestingCluster", vpc=vpc) # defining the task iam role taskRole = iam.Role( self, id="taskRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'), iam.ServicePrincipal(service='ec2.amazonaws.com')), role_name="webmaintaskRole", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonRDSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonDynamoDBFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonRedshiftFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonKinesisFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonECSTaskExecutionRolePolicy"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSNSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaRole"), iam.ManagedPolicy(self, id="ManagedPolicy", managed_policy_name="Grant_dev", statements=[ iam.PolicyStatement(actions=[ "kms:Decrypt", "secretemanager:GetSecreteValue" ], resources=["*"]) ]) ]) # taskRole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSFullAccess")) # WebApp Main task Defenition & Service webmain_task_definition = ecs.FargateTaskDefinition( self, "WebAppMain", memory_limit_mib=512, cpu=256, task_role=taskRole, execution_role=taskRole) webmain_container = webmain_task_definition.add_container( "webapp-mainContainer", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"), environment=EcsStack.commands, docker_labels={ "com.datadoghq.ad.instances": "[{\"host\": \"%%host%%\", \"port\": 80}]", "com.datadoghq.ad.check_names": "[\"ecs_fargate\"]", "com.datadoghq.ad.init_configs": "[{}]" }, logging=ecs.LogDriver.aws_logs(stream_prefix="awslogs")) # Clearing the environment vairables from the commands(Map) EcsStack.commands.clear() EcsStack.readConfig(1) webmain_datadog_container = webmain_task_definition.add_container( "webapp-main_datadog_Container", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"), environment=EcsStack.commands) webmain_port_mapping = ecs.PortMapping(container_port=80, host_port=80, protocol=ecs.Protocol.TCP) datadog_port_mapping1 = ecs.PortMapping(container_port=8126, host_port=8126, protocol=ecs.Protocol.TCP) datadog_port_mapping2 = ecs.PortMapping(container_port=8125, host_port=8125, protocol=ecs.Protocol.TCP) webmain_container.add_port_mappings(webmain_port_mapping) webmain_datadog_container.add_port_mappings(datadog_port_mapping1) webmain_datadog_container.add_port_mappings(datadog_port_mapping2) # Security group for service webmain_sg = ec.SecurityGroup(self, "webmain_sg", vpc=vpc, allow_all_outbound=True, security_group_name="WebAppMain") webmain_sg.add_ingress_rule(peer=Peer.ipv4("202.65.133.194/32"), connection=Port.tcp(5432)) webmain_service = ecs.FargateService( self, "webapp-main", cluster=cluster, task_definition=webmain_task_definition, desired_count=1, security_group=webmain_sg) # defining the load balancer webmain_lb = elbv2.ApplicationLoadBalancer( self, "LB", vpc=vpc, internet_facing=True, load_balancer_name="WebAppMain", # security_group= vpc_subnets=ec.SubnetSelection(subnet_type=ec.SubnetType.PUBLIC)) webmain_target_grp = elbv2.ApplicationTargetGroup( self, id="webapp-main-target", port=80, protocol=elbv2.ApplicationProtocol.HTTP, health_check=elbv2.HealthCheck(healthy_http_codes="200-399", healthy_threshold_count=2, unhealthy_threshold_count=2, port="traffic-port", protocol=elbv2.Protocol.HTTP, timeout=core.Duration.seconds(6), interval=core.Duration.seconds(10)), targets=[webmain_service], target_group_name="WebAppMain", target_type=elbv2.TargetType.IP, vpc=vpc) listener = webmain_lb.add_listener( "webMain_Listener", port=443, open=True, default_target_groups=[webmain_target_grp], certificate_arns=[ "arn:aws:acm:us-west-2:384853870836:certificate/182c0fdd-813f-4bd3-aee1-0b4543cfb52b" ]) listener2 = webmain_lb.add_listener( "webMain_Listener2", port=80, # default_target_groups=[webmain_target_grp] ) # elbv2.ApplicationListenerCertificate(self,"WebAppMAin_Certificate",listener=listener,certificate_arns=["arn:aws:acm:us-west-2:384853870836:certificate/182c0fdd-813f-4bd3-aee1-0b4543cfb52b"]) listener2.add_redirect_response(id="HttptoHttps", status_code="HTTP_301", port="443", protocol="HTTPS")
def __init__(self, scope: core.Construct, id: str, stack_name: str, task_definition_cpu: int, task_definition_memory_limit_mib: int, docker_image_name: str, container_port: int, desired_container_count: int, private_subnets: Sequence[aws_ec2.Subnet] = None, public_subnets: Sequence[aws_ec2.Subnet] = None, private_security_group: aws_ec2.SecurityGroup = None, public_security_group: aws_ec2.SecurityGroup = None, vpc: aws_ec2.Vpc = None, fargate_cluster: aws_ecs.Cluster = None, authorizer_lambda_arn: str = None, authorizer_lambda_role_arn: str = None, **kwargs): super().__init__(scope, id, **kwargs) # Role self.role = aws_iam.Role( self, 'Role', assumed_by=aws_iam.ServicePrincipal(service='ecs.amazonaws.com'), managed_policies=[ aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name= 'service-role/AmazonECSTaskExecutionRolePolicy') ], inline_policies={ id: aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ 'kms:Encrypt', 'kms:Decrypt', 'kms:ReEncrypt*', 'kms:GenerateDataKey*', 'kms:DescribeKey', 'ec2:CreateNetworkInterface', 'ec2:DescribeNetworkInterfaces', 'ec2:DeleteNetworkInterface', # Remaining actions from https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/quickref-ecs.html 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer', 'elasticloadbalancing:DeregisterTargets', 'elasticloadbalancing:Describe*', 'elasticloadbalancing:RegisterInstancesWithLoadBalancer', 'elasticloadbalancing:RegisterTargets', 'ec2:Describe*', 'ec2:AuthorizeSecurityGroupIngress' ], resources=['*']) ]) }) self.role.assume_role_policy.add_statements( aws_iam.PolicyStatement( actions=['sts:AssumeRole'], principals=[ aws_iam.ServicePrincipal(service='ecs-tasks.amazonaws.com') ])) # Set Defaults if parameters are None if vpc is None: vpc = aws_ec2.Vpc(self, 'Vpc') if private_subnets is None: private_subnets = vpc.private_subnets if public_subnets is None: public_subnets = vpc.public_subnets if public_security_group is None: public_security_group = aws_ec2.SecurityGroup( self, 'PublicSecurityGroup', vpc=vpc, allow_all_outbound=True) # Allow inbound HTTP traffic public_security_group.add_ingress_rule( peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'), connection=aws_ec2.Port.tcp(port=80)) # Allow inbound HTTPS traffic public_security_group.add_ingress_rule( peer=aws_ec2.Peer.ipv4(cidr_ip='0.0.0.0/0'), connection=aws_ec2.Port.tcp(port=443)) if private_security_group is None: private_security_group = aws_ec2.SecurityGroup( self, 'PrivateSecurityGroup', vpc=vpc, allow_all_outbound=True) public_subnet_cidr_blocks = Utils.get_subnet_cidr_blocks( public_subnets) # Create an ingress rule for each of the NLB's subnet's CIDR ranges and add the rules to the ECS service's # security group. This will allow requests from the NLB to go into the ECS service. This allow inbound # traffic from public subnets. for cidr_block in public_subnet_cidr_blocks: private_security_group.add_ingress_rule( peer=aws_ec2.Peer.ipv4(cidr_ip=cidr_block), connection=aws_ec2.Port.tcp(port=container_port)) if fargate_cluster is None: fargate_cluster = aws_ecs.Cluster( self, 'FargateCluster', ) task_def = aws_ecs.FargateTaskDefinition( self, 'TaskDefinition', cpu=task_definition_cpu, memory_limit_mib=task_definition_memory_limit_mib, task_role=self.role, execution_role=self.role) container = aws_ecs.ContainerDefinition( self, 'Container', image=aws_ecs.ContainerImage.from_registry(name=docker_image_name), task_definition=task_def, logging=aws_ecs.AwsLogDriver(stream_prefix='/ecs')) container.add_port_mappings( aws_ecs.PortMapping(container_port=container_port, protocol=aws_ec2.Protocol.TCP)) ecs_service = aws_ecs.FargateService( self, 'FargateService', cluster=fargate_cluster, task_definition=task_def, vpc_subnets=aws_ec2.SubnetSelection(subnets=private_subnets), security_group=private_security_group, desired_count=desired_container_count) target_group = aws_elasticloadbalancingv2.NetworkTargetGroup( self, 'TargetGroup', port=80, # Health check occurs over HTTP health_check=aws_elasticloadbalancingv2.HealthCheck( protocol=aws_elasticloadbalancingv2.Protocol.TCP), targets=[ecs_service], vpc=vpc) nlb = aws_elasticloadbalancingv2.NetworkLoadBalancer( self, 'NetworkLoadBalancer', vpc=vpc, internet_facing=False, vpc_subnets=aws_ec2.SubnetSelection(subnets=public_subnets), ) nlb.add_listener( id='Listener', port=80, # HTTP listener default_target_groups=[target_group]) # nlb.log_access_logs( # todo: add this later when you have time to research the correct bucket policy. # bucket=aws_s3.Bucket( # self, 'LoadBalancerLogBucket', # bucket_name='load-balancer-logs', # public_read_access=False, # block_public_access=aws_s3.BlockPublicAccess( # block_public_policy=True, # restrict_public_buckets=True # ) # ) # ) # Dependencies ecs_service.node.add_dependency(nlb) # API Gateway rest_api = aws_apigateway.RestApi(self, stack_name) resource = rest_api.root.add_resource( path_part='{proxy+}', default_method_options=aws_apigateway.MethodOptions( request_parameters={'method.request.path.proxy': True})) token_authorizer = None if authorizer_lambda_arn and authorizer_lambda_role_arn: token_authorizer = aws_apigateway.TokenAuthorizer( #todo: make this a parameter? self, 'JwtTokenAuthorizer', results_cache_ttl=core.Duration.minutes(5), identity_source='method.request.header.Authorization', assume_role=aws_iam.Role.from_role_arn( self, 'AuthorizerLambdaInvokationRole', role_arn=authorizer_lambda_role_arn), handler=aws_lambda.Function.from_function_arn( self, 'AuthorizerLambda', function_arn=authorizer_lambda_arn)) resource.add_method( http_method='ANY', authorization_type=aws_apigateway.AuthorizationType.CUSTOM, authorizer=token_authorizer, integration=aws_apigateway.HttpIntegration( url=f'http://{nlb.load_balancer_dns_name}/{{proxy}}', http_method='ANY', proxy=True, options=aws_apigateway.IntegrationOptions( request_parameters={ 'integration.request.path.proxy': 'method.request.path.proxy' }, connection_type=aws_apigateway.ConnectionType.VPC_LINK, vpc_link=aws_apigateway.VpcLink( self, 'VpcLink', description= f'API Gateway VPC Link to internal NLB for {stack_name}', vpc_link_name=stack_name, targets=[nlb]))))
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here ############################################# #Import resorce and custom setting part start ############################################# #cn-north-1 impRes={ "vpc":"vpc-0883083ff3a10c1ec", "SvcSG":"sg-04d3b60e954c1c1ef", "ALBSG":"sg-0b6d093d52d48bba9", "ALBInternet":True, "taskRole":"arn:aws-cn:iam::627484392488:role/ecsTaskExecutionRole", "AlbSubnet":[ {"subnetId":"subnet-0d16fa0c969f234d3", "routeTabId":"rtb-074c6b532f3030ad6"}, {"subnetId":"subnet-0f28a97c04d3b11cd", "routeTabId":"rtb-074c6b532f3030ad6"} ], #"SvcSubNet":[{"subnetId":"subnet-0d16fa0c969f234d3","routeTabId":"rtb-074c6b532f3030ad6"}] "SvcSubNet":[{"subnetId":"subnet-0f28a97c04d3b11cd","routeTabId":"rtb-0587cc522717461cd"}, {"subnetId":"subnet-0d16fa0c969f234d3","routeTabId":"rtb-0587cc522717461cd"}] } newRes={ "TG":{"HealthPath":"/test.html","Port":80,"containPort":80}, "Listener":{"Port":80}, "TaskFamily":"tsFargate", "ImageAsset1":{"DockfilePath":"httpd-ssh", "BuildArgs":{"HTTP_PROXY":"http://YOUR_PROXY_SERVER:80"} } } MyTaskDefinition=[{"Cpu":512,"MemLimitMib":1024}] MyContainerDefinition=[ {"containerName":"MyContainer1", "cpu":256, "essential":True, "portMappings":[ecs.PortMapping(container_port=80,host_port=80)], #"portMappings":[ecs.PortMapping(container_port=80,host_port=80),ecs.PortMapping(container_port=22,host_port=22)], "environment":{"SSH_PUBLIC_KEY":"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC/alWrS+HH5KkPbso+Tsy+Z0WGTX5wvXvon5OacLMyOU3gj2mbbIifasXf/RadpuywuyW3uFirtRlPmSb5Q0PVLODku503Xettw+u6/Z22VV7F2ACgg4iHaCo2SR4L8saUrLLfcKXKr/WCn3w7uYcqGsXEcSFCCSZgn4BoZJqP4Q=="}, "LogMountPoint":["/usr/local/apache2/logs"] } ] MySvc={"AssignPubIp":True, "desiredCount":1} ############################################# #Import resorce and custom setting part end ############################################# #if you import external resource app you cannot set destory policy #import VPC, Private Subnet, SG vpc = ec2.Vpc.from_lookup(self, "vpc", vpc_id=impRes["vpc"]) #import SG mysvcsg = ec2.SecurityGroup.from_security_group_id(self, "svcsg", impRes["SvcSG"], mutable=False) #import Role taskRole = iam.Role.from_role_arn(self, "TaskRole",impRes["taskRole"]) #create ALB mytargetGrp = elbv2.ApplicationTargetGroup(self, "targetGrp", target_type=elbv2.TargetType.IP, port=newRes["TG"]["Port"], vpc=vpc, health_check=elbv2.HealthCheck(path=newRes["TG"]["HealthPath"])) #target group cannot use .apply_removal_policy directly cfn_mytargetGrp=mytargetGrp.node.find_child("Resource") cfn_mytargetGrp.apply_removal_policy(cdk.RemovalPolicy.DESTROY) #import public subnet for alb albsubnets = [ ec2.Subnet.from_subnet_attributes(self,'albsubnetid1', subnet_id = impRes["AlbSubnet"][0]["subnetId"], route_table_id=impRes["AlbSubnet"][0]["routeTabId"] ), ec2.Subnet.from_subnet_attributes(self,'albsubnetid2', subnet_id = impRes["AlbSubnet"][1]["subnetId"], route_table_id=impRes["AlbSubnet"][1]["routeTabId"] ) ] vpc_subnets_selection = ec2.SubnetSelection(subnets=albsubnets) #create new ALB myalb = elbv2.ApplicationLoadBalancer(self, "ALBv2", vpc=vpc, security_group=ec2.SecurityGroup.from_security_group_id(self, "ALBSG", impRes["ALBSG"],mutable=False), internet_facing=impRes["ALBInternet"], vpc_subnets=vpc_subnets_selection) myalb.apply_removal_policy(cdk.RemovalPolicy.DESTROY) #create new ALB listener myalblistener = elbv2.ApplicationListener(self, "ALBlistenter", load_balancer=myalb, port=newRes["Listener"]["Port"]) myalblistener.apply_removal_policy(cdk.RemovalPolicy.DESTROY) myalblistener.add_target_groups("albaddtg", target_groups=[mytargetGrp]) #create new ECS Cluster mycluster = ecs.Cluster(self, "cluster", vpc=vpc) mycluster.apply_removal_policy(cdk.RemovalPolicy.DESTROY) fargatetaskDefinition = ecs.FargateTaskDefinition(self, "fargatetaskDefinition", cpu=MyTaskDefinition[0]["Cpu"], memory_limit_mib=MyTaskDefinition[0]["MemLimitMib"], execution_role=taskRole, family=newRes["TaskFamily"], task_role=taskRole) #volumes=myEfsVols) fargatetaskDefinition.apply_removal_policy(cdk.RemovalPolicy.DESTROY) #defind docker image asset dirname = os.path.dirname(__file__) #for container 1 normally httpd #create Image assent image will generated locally then push to ecr asset1 = DockerImageAsset(self, "ImageAsset1", directory=os.path.join(dirname, "../..", newRes["ImageAsset1"]["DockfilePath"]), build_args=newRes["ImageAsset1"]["BuildArgs"] ) #create container definition for task definition MyContainer1def = ecs.ContainerDefinition(self, "MyContainer1def", task_definition=fargatetaskDefinition, linux_parameters=ecs.LinuxParameters(self,"LinuxPara1",init_process_enabled=True), image=ecs.ContainerImage.from_ecr_repository(asset1.repository, asset1.image_uri.rpartition(":")[-1]), container_name=MyContainerDefinition[0]["containerName"], essential=MyContainerDefinition[0]["essential"], port_mappings=MyContainerDefinition[0]["portMappings"], environment=MyContainerDefinition[0]["environment"] ) #import service private subnet mysvcprivateSNs = [ ec2.Subnet.from_subnet_attributes(self,'svcprivateSN1', subnet_id = impRes["SvcSubNet"][0]["subnetId"], route_table_id=impRes["SvcSubNet"][0]["routeTabId"]), ec2.Subnet.from_subnet_attributes(self,'svcprivateSN2', subnet_id = impRes["SvcSubNet"][1]["subnetId"], route_table_id=impRes["SvcSubNet"][1]["routeTabId"]) ] #create service myservice=ecs.FargateService(self,"service", task_definition=fargatetaskDefinition, assign_public_ip=MySvc["AssignPubIp"], platform_version=ecs.FargatePlatformVersion.VERSION1_4, vpc_subnets=ec2.SubnetSelection(subnets=mysvcprivateSNs), security_group=mysvcsg, cluster=mycluster, desired_count=MySvc["desiredCount"]) mytargetGrp.add_target(myservice.load_balancer_target(container_name="MyContainer1",container_port=newRes["TG"]["containPort"], protocol=ecs.Protocol.TCP))
def __init__(self, scope, id, vpc, **kwarg) -> None: super().__init__(scope, id, **kwarg) # cluster creation cluster = aws_ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc) # service discovery creation sd_namespace = cluster.add_default_cloud_map_namespace( name="svc.test.local", vpc=vpc) aws_servicediscovery.Service(self, "svc.test.local", namespace=sd_namespace, load_balancer=True) # ECS role creation ecs_principle = aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com') execution_role = aws_iam.Role(self, 'execution-role', assumed_by=ecs_principle) execution_role.add_managed_policy( policy=aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AWSCodeDeployRoleForECS")) execution_role.add_managed_policy( policy=aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AmazonEC2ContainerRegistryReadOnly")) task_role = aws_iam.Role(self, 'task-role', assumed_by=ecs_principle) task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AWSAppMeshEnvoyAccess")) task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="CloudWatchFullAccess")) task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AWSXRayDaemonWriteAccess")) # envoy ecr object envoy_ecr = aws_ecr.Repository.from_repository_attributes( self, 'aws-envoy', repository_arn=core.Stack.of(self).format_arn( service="ecr", resource="aws-appmesh-envoy", account="840364872350"), repository_name="aws-appmesh-envoy") # colorteller image builds gateway_image = aws_ecs.ContainerImage.from_asset("./src/gateway") colorteller_image = aws_ecs.ContainerImage.from_asset( "./src/colorteller") # logging setup log_group = aws_logs.LogGroup(self, "/ecs/colorteller", retention=aws_logs.RetentionDays.ONE_DAY) gateway_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="gateway") black_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="black") blue_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="blue") red_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="red") white_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="white") tcpecho_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="tcpecho") # Mesh properties setup mesh_properties = aws_ecs.AppMeshProxyConfigurationProps( app_ports=[9080], proxy_egress_port=15001, proxy_ingress_port=15000, egress_ignored_i_ps=["169.254.170.2", "169.254.169.254"], ignored_uid=1337) # envoy ulimit defaults envoy_ulimit = aws_ecs.Ulimit(hard_limit=15000, name=aws_ecs.UlimitName.NOFILE, soft_limit=15000) # fargate task def - requires envoy proxy container, gateway app and x-ray gateway_task_def = aws_ecs.FargateTaskDefinition( self, "gateway_task", cpu=256, memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) gateway_task_def.add_container("gateway", logging=gateway_ecs_logs, environment={ "SERVER_PORT": "9080", "STAGE": "v1.1", "COLOR_TELLER_ENDPOINT": "colorteller.svc.test.local:9080", "TCP_ECHO_ENDPOINT": "tcpecho.svc.test.local:2701" }, image=gateway_image).add_port_mappings( aws_ecs.PortMapping( container_port=9080, protocol=aws_ecs.Protocol.TCP)) gateway_task_def.add_container( "xray", logging=gateway_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) gateway_envoy_container = gateway_task_def.add_container( "envoy", logging=gateway_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "debug", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/gateway", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) gateway_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) gateway_envoy_container.add_ulimits(envoy_ulimit) # black task def - requires color app, envoy and x-ray containers black_task_def = aws_ecs.FargateTaskDefinition( self, "black-task", cpu=256, family="black", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) black_envoy_container = black_task_def.add_container( "envoy", logging=black_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/black", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) black_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) black_envoy_container.add_ulimits(envoy_ulimit) black_app_container = black_task_def.add_container( "black", logging=black_ecs_logs, environment={ "COLOR": "black", "SERVER_PORT": "9080", "STAGE": "v1.1" }, image=colorteller_image) black_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) black_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=black_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) black_task_def.add_container( "xray", logging=black_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # blue task def (same as black) blue_task_def = aws_ecs.FargateTaskDefinition( self, "blue-task", cpu=256, family="blue", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) blue_envoy_container = blue_task_def.add_container( "envoy", logging=blue_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/blue", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) blue_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) blue_envoy_container.add_ulimits(envoy_ulimit) blue_app_container = blue_task_def.add_container( "blue", logging=blue_ecs_logs, environment={ "COLOR": "black", "SERVER_PORT": "9080", "STAGE": "v1.1" }, image=colorteller_image) blue_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) blue_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=blue_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) blue_task_def.add_container( "xray", logging=blue_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # red task def (same as black) red_task_def = aws_ecs.FargateTaskDefinition( self, "red-task", cpu=256, family="red-task", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) red_envoy_container = red_task_def.add_container( "envoy", logging=red_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/red", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) red_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) red_envoy_container.add_ulimits(envoy_ulimit) red_app_container = red_task_def.add_container("red", logging=red_ecs_logs, environment={ "COLOR": "red", "SERVER_PORT": "9080", "STAGE": "v1.2" }, image=colorteller_image) red_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) red_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=red_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) red_task_def.add_container( "xray", logging=red_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # white task def (same as black) - colorteller.svc.test.local points to this service (because containers need something to resolve to or they fail) white_task_def = aws_ecs.FargateTaskDefinition( self, "white-task", cpu=256, family="white", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) white_envoy_container = white_task_def.add_container( "envoy", logging=white_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/white", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) white_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) white_envoy_container.add_ulimits(envoy_ulimit) white_app_container = white_task_def.add_container( "white", logging=white_ecs_logs, environment={ "COLOR": "white", "SERVER_PORT": "9080", "STAGE": "v1.1" }, image=colorteller_image) white_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) white_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=white_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) white_task_def.add_container( "xray", logging=white_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # tcpecho service (external docker image) tcpecho_task_def = aws_ecs.FargateTaskDefinition( self, 'tcpecho-tasks', cpu=256, family="tcpecho", memory_limit_mib=512, execution_role=execution_role, task_role=task_role) tcpecho_task_def.add_container( "tcpecho", logging=tcpecho_ecs_logs, environment={ "TCP_PORT": "2701", "NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/echo" }, image=aws_ecs.ContainerImage.from_registry("cjimti/go-echo"), essential=True, ).add_port_mappings( aws_ecs.PortMapping(container_port=2701, protocol=aws_ecs.Protocol.TCP)) # adds task defs to fargate services - adds security group access to local vpc cidr block # all the services are treated the same way gateway_fargate_service = aws_ecs.FargateService( self, "gateway", cluster=cluster, task_definition=gateway_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="gateway")) gateway_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") black_colorteller_fargate_service = aws_ecs.FargateService( self, "black", cluster=cluster, task_definition=black_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="black")) black_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") blue_colorteller_fargate_service = aws_ecs.FargateService( self, "blue", cluster=cluster, task_definition=blue_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="blue")) blue_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") red_colorteller_fargate_service = aws_ecs.FargateService( self, "red", cluster=cluster, task_definition=red_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="red")) red_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") white_colorteller_fargate_service = aws_ecs.FargateService( self, "white", cluster=cluster, task_definition=white_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="colorteller")) white_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") echo_fargate_service = aws_ecs.FargateService( self, "tcpecho", cluster=cluster, task_definition=tcpecho_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="tcpecho")) echo_fargate_service.connections.security_groups[0].add_ingress_rule( peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(2701), description="Allow http inbound from VPC") # adds autoscaling policies to all services for service in [ black_colorteller_fargate_service, blue_colorteller_fargate_service, red_colorteller_fargate_service, white_colorteller_fargate_service, gateway_fargate_service, echo_fargate_service ]: try: scaling = service.service.auto_scale_task_count(max_capacity=2) except AttributeError: scaling = service.auto_scale_task_count(max_capacity=2) scaling.scale_on_cpu_utilization( "CpuScaling", target_utilization_percent=50, scale_in_cooldown=core.Duration.seconds(60), scale_out_cooldown=core.Duration.seconds(60), ) # configure loadbalancer to listen on port 80 and add targets to gateway and echo apps load_balancer = aws_elasticloadbalancingv2.ApplicationLoadBalancer( self, "lb", vpc=vpc, internet_facing=True) listener = load_balancer.add_listener("PublicListener", port=80, open=True) health_check = aws_elasticloadbalancingv2.HealthCheck( interval=core.Duration.seconds(60), path="/ping", port="9080", timeout=core.Duration.seconds(5)) # attach ALB to ECS service listener.add_targets( "gateway", port=80, targets=[gateway_fargate_service, echo_fargate_service], health_check=health_check, ) # outputs of ALB and cluster core.CfnOutput(self, "LoadBalancerDNS", value=load_balancer.load_balancer_dns_name) core.CfnOutput(self, "ClusterName", value=cluster.cluster_name)
def __init__(self, scope: core.Construct, id_: str, props, **kwargs) -> None: super().__init__(scope, id_, **kwargs) namespace = props['namespace'] htsget_refserver_ecr_repo: ecr.Repository = props['ecr_repo'] htsget_refserver_image_tag = props['htsget_refserver_image_tag'] cors_allowed_origins = props['cors_allowed_origins'] # --- Query deployment env specific config from SSM Parameter Store cert_apse2_arn = ssm.StringParameter.from_string_parameter_name( self, "SSLCertAPSE2ARN", string_parameter_name="/htsget/acm/apse2_arn", ) cert_apse2 = acm.Certificate.from_certificate_arn( self, "SSLCertAPSE2", certificate_arn=cert_apse2_arn.string_value, ) hosted_zone_id = ssm.StringParameter.from_string_parameter_name( self, "HostedZoneID", string_parameter_name="hosted_zone_id") hosted_zone_name = ssm.StringParameter.from_string_parameter_name( self, "HostedZoneName", string_parameter_name="hosted_zone_name") domain_name = ssm.StringParameter.from_string_parameter_name( self, "DomainName", string_parameter_name="/htsget/domain", ) # --- Cognito parameters are from data portal terraform stack cog_user_pool_id = ssm.StringParameter.from_string_parameter_name( self, "CogUserPoolID", string_parameter_name="/data_portal/client/cog_user_pool_id", ) cog_app_client_id_stage = ssm.StringParameter.from_string_parameter_name( self, "CogAppClientIDStage", string_parameter_name="/data_portal/client/cog_app_client_id_stage", ) cog_app_client_id_local = ssm.StringParameter.from_string_parameter_name( self, "CogAppClientIDLocal", string_parameter_name="/data_portal/client/cog_app_client_id_local", ) # --- Query main VPC and setup Security Groups vpc = ec2.Vpc.from_lookup( self, "VPC", vpc_name="main-vpc", tags={ 'Stack': "networking", }, ) private_subnets = ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE, availability_zones=["ap-southeast-2a"], ) sg_elb = ec2.SecurityGroup( self, "ELBSecurityGroup", vpc=vpc, description=f"Security Group for ELB in {namespace} stack", security_group_name=f"{namespace} ELB Security Group", allow_all_outbound=False, ) sg_elb.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(80), description="Allow http inbound within VPC") sg_ecs_service = ec2.SecurityGroup( self, "ECSServiceSecurityGroup", vpc=vpc, description=f"Security Group for ECS Service in {namespace} stack", security_group_name=f"{namespace} ECS Security Group", ) sg_ecs_service.add_ingress_rule( peer=sg_elb, connection=ec2.Port.tcp(3000), description="Allow traffic from Load balancer to ECS service") # --- Setup ECS Fargate cluster config_vol = ecs.Volume( name="config-vol", host=ecs.Host(), ) task_execution_role = iam.Role( self, "ecsTaskExecutionRole", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com")) task_execution_role.add_to_policy( iam.PolicyStatement( actions=[ "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:ListMultipartUploadParts", "s3:GetObjectTagging", "s3:GetObjectVersionTagging", "logs:CreateLogStream", "logs:PutLogEvents", "ssm:GetParameterHistory", "ssm:GetParametersByPath", "ssm:GetParameters", "ssm:GetParameter", ], resources=["*"], )) task_execution_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonECSTaskExecutionRolePolicy')) task = ecs.FargateTaskDefinition( self, f"{namespace}-task", cpu=512, memory_limit_mib=1024, volumes=[config_vol], task_role=task_execution_role, execution_role=task_execution_role, ) cmd_ssm = "ssm get-parameter --name '/htsget/refserver/config' --output text --query Parameter.Value" sidecar_container: ecs.ContainerDefinition = task.add_container( f"{namespace}-sidecar", image=ecs.ContainerImage.from_registry( "quay.io/victorskl/aws-cli:2.1.3"), essential=False, entry_point=[ "/bin/bash", "-c", f"aws {cmd_ssm} > config.json", ], logging=ecs.LogDriver.aws_logs(stream_prefix=f"{namespace}", ), ) sidecar_container.add_mount_points( ecs.MountPoint( container_path="/aws", read_only=False, source_volume=config_vol.name, )) main_container: ecs.ContainerDefinition = task.add_container( namespace, image=ecs.ContainerImage.from_ecr_repository( repository=htsget_refserver_ecr_repo, tag=htsget_refserver_image_tag, ), essential=True, command=[ "./htsget-refserver", "-config", "/usr/src/app/config/config.json" ], logging=ecs.LogDriver.aws_logs(stream_prefix=f"{namespace}", ), ) main_container.add_port_mappings( ecs.PortMapping( container_port=3000, protocol=ecs.Protocol.TCP, )) main_container.add_mount_points( ecs.MountPoint( container_path="/usr/src/app/config", read_only=True, source_volume=config_vol.name, )) main_container.add_container_dependencies( ecs.ContainerDependency( container=sidecar_container, condition=ecs.ContainerDependencyCondition.COMPLETE, )) cluster = ecs.Cluster(self, f"{namespace}-cluster", vpc=vpc) service = ecs.FargateService( self, f"{namespace}-service", platform_version=ecs.FargatePlatformVersion.VERSION1_4, task_definition=task, cluster=cluster, vpc_subnets=private_subnets, desired_count=1, security_groups=[ sg_ecs_service, ], ) # --- Setup Application Load Balancer in front of ECS cluster lb = elbv2.ApplicationLoadBalancer( self, f"{namespace}-lb", vpc=vpc, internet_facing=False, security_group=sg_elb, deletion_protection=True, ) http_listener = lb.add_listener( "HttpLBListener", port=80, ) health_check = elbv2.HealthCheck(interval=core.Duration.seconds(30), path="/reads/service-info", timeout=core.Duration.seconds(5)) http_listener.add_targets( "LBtoECS", port=3000, protocol=elbv2.ApplicationProtocol.HTTP, targets=[service], health_check=health_check, ) core.CfnOutput(self, "LoadBalancerDNS", value=lb.load_balancer_dns_name) # --- Setup APIGatewayv2 HttpApi using VpcLink private integration to ALB/ECS in private subnets vpc_link = apigwv2.VpcLink(self, f"{namespace}-VpcLink", vpc=vpc, security_groups=[ sg_ecs_service, sg_elb, ]) self.apigwv2_alb_integration = apigwv2i.HttpAlbIntegration( listener=http_listener, vpc_link=vpc_link, ) custom_domain = apigwv2.DomainName( self, "CustomDomain", certificate=cert_apse2, domain_name=domain_name.string_value, ) self.http_api = apigwv2.HttpApi( self, f"{namespace}-apigw", default_domain_mapping=apigwv2.DomainMappingOptions( domain_name=custom_domain), cors_preflight=apigwv2.CorsPreflightOptions( allow_origins=cors_allowed_origins, allow_headers=["*"], allow_methods=[ apigwv2.CorsHttpMethod.ANY, ], allow_credentials=True, )) core.CfnOutput(self, "ApiEndpoint", value=self.http_api.api_endpoint) # --- Setup DNS for the custom domain hosted_zone = route53.HostedZone.from_hosted_zone_attributes( self, "HostedZone", hosted_zone_id=hosted_zone_id.string_value, zone_name=hosted_zone_name.string_value, ) route53.ARecord( self, "ApiCustomDomainAlias", zone=hosted_zone, record_name="htsget", target=route53.RecordTarget.from_alias( route53t.ApiGatewayv2DomainProperties( regional_domain_name=custom_domain.regional_domain_name, regional_hosted_zone_id=custom_domain. regional_hosted_zone_id)), ) core.CfnOutput( self, "HtsgetEndpoint", value=custom_domain.name, ) cognito_authzr = apigwv2.CfnAuthorizer( self, "CognitoAuthorizer", api_id=self.http_api.http_api_id, authorizer_type="JWT", identity_source=[ "$request.header.Authorization", ], name="CognitoAuthorizer", jwt_configuration=apigwv2.CfnAuthorizer.JWTConfigurationProperty( audience=[ cog_app_client_id_stage.string_value, cog_app_client_id_local.string_value, ], issuer= f"https://cognito-idp.{self.region}.amazonaws.com/{cog_user_pool_id.string_value}" )) # Add catch all routes rt_catchall = apigwv2.HttpRoute( self, "CatchallRoute", http_api=self.http_api, route_key=apigwv2.HttpRouteKey.with_( path="/{proxy+}", method=apigwv2.HttpMethod.GET), integration=self.apigwv2_alb_integration) rt_catchall_cfn: apigwv2.CfnRoute = rt_catchall.node.default_child rt_catchall_cfn.authorizer_id = cognito_authzr.ref rt_catchall_cfn.authorization_type = "JWT" # Comment this to opt-out setting up experimental Passport + htsget self.setup_ga4gh_passport()
def __init__(self, scope: core.Construct, id: str, config: ContainerPipelineConfiguration, **kwargs) -> None: super().__init__(scope, id, **kwargs) # VPC vpc = ec2.Vpc(self, "TheVPC", cidr="10.0.0.0/16", nat_gateways=1, ) # IAM roles service_task_def_exe_role = iam.Role( self, "ServiceTaskDefExecutionRole", assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com') ) service_task_def_exe_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonECSTaskExecutionRolePolicy')) service_task_def_role = iam.Role( self, 'ServiceTaskDefTaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com') ) # Fargate cluster cluster = ecs.Cluster( scope=self, id="ecs-cluster", cluster_name=config.ProjectName + "-" + config.stage, vpc=vpc ) load_balancer = elbv2.ApplicationLoadBalancer( self, "load_balancer", vpc=vpc, internet_facing=True ) # Security Group service_sg = ec2.SecurityGroup(self, "service_sg", vpc=vpc) service_sg.connections.allow_from(load_balancer, ec2.Port.tcp(80)); # ECR Repo image_repo = ecr.Repository.from_repository_name(self, "image_repo", repository_name=config.ProjectName ) log_group = logs.LogGroup(self, "log_group", log_group_name=config.ProjectName + "-" + config.stage, removal_policy=core.RemovalPolicy.DESTROY, retention=None ) # ECS Task Def fargate_task_definition = ecs.FargateTaskDefinition( scope=self, id="fargate_task_definition", cpu=1024, memory_limit_mib=2048, execution_role=service_task_def_exe_role, task_role=service_task_def_role, family=config.ProjectName + "-" + config.stage ) container = fargate_task_definition.add_container( id="fargate_task_container", image=ecs.ContainerImage.from_ecr_repository(repository=image_repo, tag='release') ) container.add_port_mappings(ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP)) # ECS Fargate Service fargate_service = ecs.FargateService( scope=self, id="fargate_service", security_group=service_sg, cluster=cluster, desired_count=2, deployment_controller=ecs.DeploymentController(type=ecs.DeploymentControllerType.ECS), task_definition=fargate_task_definition, service_name=config.ProjectName + "-" + config.stage ) # Main Env listern_health_check_main = elbv2.HealthCheck( healthy_http_codes='200', interval=core.Duration.seconds(5), healthy_threshold_count=2, unhealthy_threshold_count=3, timeout=core.Duration.seconds(4) ) # Test Env listern_health_check_test = elbv2.HealthCheck( healthy_http_codes='200', interval=core.Duration.seconds(5), healthy_threshold_count=2, unhealthy_threshold_count=3, timeout=core.Duration.seconds(4) ) listener_main = load_balancer.add_listener("load_balancer_listener_1", port=80, ) listern_main_targets = listener_main.add_targets("load_balancer_target_1", port=8080, health_check=listern_health_check_main, targets=[fargate_service] ) # Alarms: monitor 500s on target group aws_cloudwatch.Alarm(self, "TargetGroup5xx", metric=listern_main_targets.metric_http_code_target(elbv2.HttpCodeTarget.TARGET_5XX_COUNT), threshold=1, evaluation_periods=1, period=core.Duration.minutes(1) ) # Alarms: monitor unhealthy hosts on target group aws_cloudwatch.Alarm(self, "TargetGroupUnhealthyHosts", metric=listern_main_targets.metric('UnHealthyHostCount'), threshold=1, evaluation_periods=1, period=core.Duration.minutes(1) ) core.CfnOutput(self, "lburl", value=load_balancer.load_balancer_dns_name, export_name="LoadBalancerUrl" )
def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, instances: list, **kwargs): super().__init__(scope, id, **kwargs) health_check = elbv2.HealthCheck(path="/login") public_target_group = elbv2.ApplicationTargetGroup( self, "PublicTG", port=8080, vpc=vpc, health_check=health_check) for instance in instances: public_target_group.add_target( elbv2.InstanceTarget(instance.instance_id, port=8080)) private_target_group = elbv2.ApplicationTargetGroup( self, "PrivateTG", port=8080, vpc=vpc, health_check=health_check) for instance in instances: private_target_group.add_target( elbv2.InstanceTarget(instance.instance_id, port=8080)) self._public_security_group = ec2.SecurityGroup(self, "PublicLBSG", vpc=vpc) self._public_security_group.add_ingress_rule( ec2.Peer.any_ipv4(), ec2.Port.tcp(80), ) self._public_lb = elbv2.ApplicationLoadBalancer( self, "PublicLB", vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnets=vpc.select_subnets( subnet_type=ec2.SubnetType.PUBLIC).subnets), internet_facing=True, security_group=self._public_security_group) self._public_lb.add_listener( "PublicLBListener", port=80, default_target_groups=[public_target_group]) self._private_security_group = ec2.SecurityGroup(self, "PrivateLBSG", vpc=vpc) self._private_security_group.add_ingress_rule( ec2.Peer.any_ipv4(), ec2.Port.tcp(80), ) self._private_lb = elbv2.ApplicationLoadBalancer( self, "PrivateLB", vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnets=vpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE).subnets), internet_facing=False, security_group=self._private_security_group) self._private_lb.add_listener( "PrivateLBListener", port=80, default_target_groups=[private_target_group])
def __init__(self, app: cdk.App, id: str, vpc: ec2.Vpc, servicedomain: str, **kwargs) -> None: super().__init__(app, id) cluster = ecs.Cluster(self, id, vpc=vpc) cluster.add_default_cloud_map_namespace( name=servicedomain, type=ecs.NamespaceType.PrivateDns) self._cluster = cluster ecssg = ec2.SecurityGroup(self, 'ECSServiceSecurityGroup', vpc=vpc) ecssg.add_ingress_rule(peer=ec2.CidrIPv4(vpc.vpc_cidr_block), connection=ec2.TcpAllPorts()) self._clustersg = ecssg # Bastion host stuff ------------------------------------------------------------------------------------- # BastionInstanceRole pd = pu.PolicyUtils.createpolicyfromfile( './appmeshdemo/policydocs/appmesh.json') bir = iam.Role( self, 'BastionInstanceRole', assumed_by=iam.ServicePrincipal('ec2'), inline_policies={'appmesh': pd}, managed_policy_arns=[ 'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM' ]) bip = iam.CfnInstanceProfile(self, 'BastionInstanceProfile', roles=[bir.role_name]) # Bastion EC2 instance bsg = ec2.SecurityGroup(self, 'BastionSG', vpc=vpc) bsg.add_ingress_rule(peer=ec2.AnyIPv4(), connection=ec2.TcpAllPorts()) ni = ec2.CfnNetworkInterfaceProps() ni['associatePublicIpAddress'] = True ni['deviceIndex'] = '0' ni['groupSet'] = [bsg.security_group_name] ni['subnetId'] = vpc.public_subnets[0].subnet_id bhi = ec2.CfnInstance( self, 'BastionInstance', instance_type='t2.micro', iam_instance_profile=bip.instance_profile_name, image_id=ec2.AmazonLinuxImage().get_image(self).image_id, network_interfaces=[ni]) # Load-Balancer stuff ------------------------------------------------------------------------------------ plbsg = ec2.SecurityGroup(self, 'PublicLoadBalancerSG', vpc=vpc) plbsg.add_ingress_rule(peer=ec2.AnyIPv4(), connection=ec2.TcpPortRange(0, 65535)) plb = elbv2.ApplicationLoadBalancer(self, 'PublicLoadBalancer', internet_facing=True, load_balancer_name='appmeshdemo', security_group=plbsg, vpc=vpc, idle_timeout_secs=30) self._publoadbal = plb healthchk = elbv2.HealthCheck() healthchk['intervalSecs'] = 6 healthchk['healthyThresholdCount'] = 2 healthchk['unhealthyThresholdCount'] = 2 dtg = elbv2.ApplicationTargetGroup( self, 'DummyTargetGroupPublic', vpc=vpc, port=80, protocol=elbv2.ApplicationProtocol.Http, health_check=healthchk, target_group_name='appmeshdemo-drop-1') plbl = elbv2.ApplicationListener( self, 'PublicLoadBalancerListener', load_balancer=plb, port=80, protocol=elbv2.ApplicationProtocol.Http, default_target_groups=[dtg]) cdk.CfnOutput(self, id='External URL', value='http://' + plb.load_balancer_dns_name)
def __init__(self, scope: core.Construct, id: str, **kwargs,) -> None: super().__init__( scope, id, **kwargs, ) self.flower_task = ecs.FargateTaskDefinition(self, "FlowerTask") FLOWER_PASSWORD = os.environ.get("FLOWER_PASSWORD", "flowerpassword") REDIS_SERVICE_HOST = ( scope.elasticache.elasticache.attr_redis_endpoint_address ) CELERY_BROKER_URL = f"redis://{REDIS_SERVICE_HOST}:6379/1" self.flower_task.add_container( "FlowerContainer", image=ecs.ContainerImage.from_registry("mher/flower"), logging=ecs.LogDrivers.aws_logs( stream_prefix="FlowerContainer", log_retention=logs.RetentionDays.ONE_DAY, ), command=[ "--url_prefix=flower", f"--broker={CELERY_BROKER_URL}", f"--basic_auth=flower:{FLOWER_PASSWORD}", ], ) self.flower_task.add_container( "FlowerProxyContainer", image=ecs.AssetImage("./nginx/flowerproxy"), # image=ecs.ContainerImage.from_registry("nginx"), ) port_mapping = ecs.PortMapping( container_port=80, protocol=ecs.Protocol.TCP ) self.flower_task.default_container.add_port_mappings(port_mapping) self.flower_service = ecs.FargateService( self, "FlowerService", task_definition=self.flower_task, assign_public_ip=True, cluster=scope.ecs.cluster, security_group=ec2.SecurityGroup.from_security_group_id( self, "FlowerServiceSecurityGroup", security_group_id=scope.vpc.vpc_default_security_group, ), ) scope.https_listener.add_targets( "FlowerTarget", port=80, targets=[self.flower_service], priority=1, path_patterns=["/flower/*", "/flower*"], health_check=elbv2.HealthCheck( healthy_http_codes="200-404", path="/flower" ), )
def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, *, slaves=2, **kwargs) -> None: super().__init__(scope, id, **kwargs) cluster = ecs.Cluster(self, "cluster", vpc=vpc) locust_asset = ecr_assets.DockerImageAsset(self, 'locust', directory="docker", file="app/Dockerfile") master_task = ecs.FargateTaskDefinition( self, "mastert", cpu=512, memory_limit_mib=1024 ) sg_slave = ec2.SecurityGroup(self, "sgslave", vpc=vpc, allow_all_outbound=True) sg_master = ec2.SecurityGroup(self, "sgmaster", vpc=vpc, allow_all_outbound=True) sg_master.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8089)) sg_master.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5557)) master_container = master_task.add_container( "masterc", image=ecs.ContainerImage.from_docker_image_asset(locust_asset), logging=ecs.LogDriver.aws_logs(stream_prefix="master"), command=["-f", "/mnt/locust/locustfile.py", "--master"] ) master_container.add_port_mappings(ecs.PortMapping(container_port=8089, host_port=8089)) master_container.add_port_mappings(ecs.PortMapping(container_port=5557, host_port=5557)) master_service = ecs.FargateService( self, "masters", cluster=cluster, task_definition=master_task, desired_count=1, assign_public_ip=True, security_group=sg_master ) nlb = elbv2.NetworkLoadBalancer( self, "nbalancer", internet_facing=True, vpc=vpc ) listener_master_console = nlb.add_listener( "masterconsole", port=8089, protocol=elbv2.Protocol("TCP") ) listener_console = nlb.add_listener( "master", port=5557, protocol=elbv2.Protocol("TCP") ) listener_master_console.add_targets( "consoletarget", deregistration_delay=core.Duration.seconds(1), port=8089, targets=[master_service.load_balancer_target( container_name="masterc", container_port=8089 )], health_check=elbv2.HealthCheck( healthy_threshold_count=2, unhealthy_threshold_count=2, timeout=core.Duration.seconds(10) ) ) listener_console.add_targets( "mastertarget", deregistration_delay=core.Duration.seconds(1), port=5557, targets=[master_service.load_balancer_target( container_name="masterc", container_port=5557 )], health_check=elbv2.HealthCheck( healthy_threshold_count=2, unhealthy_threshold_count=2, timeout=core.Duration.seconds(10) ) ) slave_task = ecs.FargateTaskDefinition( self, "slavet", cpu=2048, memory_limit_mib=4096 ) slave_task.add_container( "slavec", image=ecs.ContainerImage.from_docker_image_asset(locust_asset), logging=ecs.LogDriver.aws_logs(stream_prefix="slave"), command=["-f", "/mnt/locust/locustfile.py", "--worker", "--master-host", nlb.load_balancer_dns_name] ) ecs.FargateService( self, "slaves", cluster=cluster, task_definition=slave_task, desired_count=slaves, assign_public_ip=True, security_group=sg_slave ) core.CfnOutput(self, "LocustWebConsole", value="http://" + nlb.load_balancer_dns_name + ":8089")
def __init__(self, scope: core.Construct, construct_id: str, properties: WordpressStackProperties, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) database = rds.ServerlessCluster( self, "WordpressServerless", engine=rds.DatabaseClusterEngine.AURORA_MYSQL, default_database_name="WordpressDatabase", vpc=properties.vpc, scaling=rds.ServerlessScalingOptions( auto_pause=core.Duration.seconds(0)), deletion_protection=False, backup_retention=core.Duration.days(7), removal_policy=core.RemovalPolicy.DESTROY, ) file_system = efs.FileSystem( self, "WebRoot", vpc=properties.vpc, performance_mode=efs.PerformanceMode.GENERAL_PURPOSE, throughput_mode=efs.ThroughputMode.BURSTING, ) # docker context directory docker_context_path = os.path.dirname(__file__) + "../../src" # upload images to ecr nginx_image = ecr_assets.DockerImageAsset( self, "Nginx", directory=docker_context_path, file="Docker.nginx", ) wordpress_image = ecr_assets.DockerImageAsset( self, "Php", directory=docker_context_path, file="Docker.wordpress", ) cluster = ecs.Cluster(self, 'ComputeResourceProvider', vpc=properties.vpc) wordpress_volume = ecs.Volume( name="WebRoot", efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=file_system.file_system_id)) event_task = ecs.FargateTaskDefinition(self, "WordpressTask", volumes=[wordpress_volume]) # # webserver # nginx_container = event_task.add_container( "Nginx", image=ecs.ContainerImage.from_docker_image_asset(nginx_image)) nginx_container.add_port_mappings(ecs.PortMapping(container_port=80)) nginx_container_volume_mount_point = ecs.MountPoint( read_only=True, container_path="/var/www/html", source_volume=wordpress_volume.name) nginx_container.add_mount_points(nginx_container_volume_mount_point) # # application server # app_container = event_task.add_container( "Php", environment={ 'WORDPRESS_DB_HOST': database.cluster_endpoint.hostname, 'WORDPRESS_TABLE_PREFIX': 'wp_' }, secrets={ 'WORDPRESS_DB_USER': ecs.Secret.from_secrets_manager(database.secret, field="username"), 'WORDPRESS_DB_PASSWORD': ecs.Secret.from_secrets_manager(database.secret, field="password"), 'WORDPRESS_DB_NAME': ecs.Secret.from_secrets_manager(database.secret, field="dbname"), }, image=ecs.ContainerImage.from_docker_image_asset(wordpress_image)) app_container.add_port_mappings(ecs.PortMapping(container_port=9000)) container_volume_mount_point = ecs.MountPoint( read_only=False, container_path="/var/www/html", source_volume=wordpress_volume.name) app_container.add_mount_points(container_volume_mount_point) # # create service # wordpress_service = ecs.FargateService( self, "InternalService", task_definition=event_task, platform_version=ecs.FargatePlatformVersion.VERSION1_4, cluster=cluster, ) # # scaling # scaling = wordpress_service.auto_scale_task_count(min_capacity=2, max_capacity=50) scaling.scale_on_cpu_utilization( "CpuScaling", target_utilization_percent=85, scale_in_cooldown=core.Duration.seconds(120), scale_out_cooldown=core.Duration.seconds(30), ) # # network acl # database.connections.allow_default_port_from(wordpress_service, "wordpress access to db") file_system.connections.allow_default_port_from(wordpress_service) # # external access # wordpress_service.connections.allow_from( other=properties.load_balancer, port_range=ec2.Port.tcp(80)) http_listener = properties.load_balancer.add_listener( "HttpListener", port=80, ) http_listener.add_targets( "HttpServiceTarget", protocol=elbv2.ApplicationProtocol.HTTP, targets=[wordpress_service], health_check=elbv2.HealthCheck(healthy_http_codes="200,301,302"))
instance_type=ec2.InstanceType("t2.micro")) # Create Task Definition task_definition = ecs.Ec2TaskDefinition(stack, "TaskDef") container = task_definition.add_container( "web", image=ecs.ContainerImage.from_registry("nginx:latest"), memory_limit_mib=256) port_mapping = ecs.PortMapping(container_port=80, host_port=8080, protocol=ecs.Protocol.TCP) container.add_port_mappings(port_mapping) # Create Service service = ecs.Ec2Service(stack, "Service", cluster=cluster, task_definition=task_definition) # Create ALB lb = elbv2.ApplicationLoadBalancer(stack, "LB", vpc=vpc, internet_facing=True) listener = lb.add_listener("PublicListener", port=80, open=True) health_check = elbv2.HealthCheck(interval=core.Duration.seconds(60), path="/health", timeout=core.Duration.seconds(5)) # Attach ALB to ECS Service listener.add_targets( "ECS", port=80, targets=[service], health_check=health_check, ) core.CfnOutput(stack, "LoadBalancerDNS", value=lb.load_balancer_dns_name) app.synth()
def createResources(self, ns): # Attach ALB to ECS Service be_health_check = elbv2.HealthCheck(interval=core.Duration.seconds(60), path="/ping", timeout=core.Duration.seconds(5)) self.bentoALB.add_redirect( source_protocol=elbv2.ApplicationProtocol.HTTP, source_port=80, target_protocol=elbv2.ApplicationProtocol.HTTPS, target_port=443) # Get certificate ARN for specified domain name client = boto3.client('acm') response = client.list_certificates(CertificateStatuses=[ 'ISSUED', ], ) for cert in response["CertificateSummaryList"]: if ('*.{}'.format(self.config[ns]['domain_name']) in cert.values()): certARN = cert['CertificateArn'] bento_cert = cfm.Certificate.from_certificate_arn( self, "bento-cert", certificate_arn=certARN) listener = self.bentoALB.add_listener("PublicListener", certificates=[bento_cert], port=443) frontendtarget = listener.add_targets( "ECS-frontend-Target", port=int(self.config[ns]['frontend_container_port']), targets=[self.frontendService], target_group_name="{}-frontend".format(ns)) core.Tags.of(frontendtarget).add("Name", "{}-frontend-alb-target".format(ns)) backendtarget = listener.add_targets( "ECS-backend-Target", port=int(self.config[ns]['backend_container_port']), targets=[self.backendService], health_check=be_health_check, target_group_name="{}-backend".format(ns)) core.Tags.of(backendtarget).add("Name", "{}-backend-alb-target".format(ns)) # Add a fixed error message when browsing an invalid URL listener.add_action( "ECS-Content-Not-Found", action=elbv2.ListenerAction.fixed_response( 200, message_body="The requested resource is not available")) elbv2.ApplicationListenerRule(self, id="alb_frontend_rule", path_pattern="/*", priority=1, listener=listener, target_groups=[frontendtarget]) elbv2.ApplicationListenerRule(self, id="alb_backend_rule", path_pattern="/v1/graphql/*", priority=2, listener=listener, target_groups=[backendtarget])
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = props['vpc'] endpoint_sg = props['endpoint_sg'] cluster = props['cluster'] # タスク実行ロールの作成 task_execution_role_policy = iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonECSTaskExecutionRolePolicy') task_execution_role = iam.Role( self, 'TaskExecutionRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), managed_policies=[task_execution_role_policy]) # タスクロールの作成 task_role = iam.Role( self, 'TaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')) # リポジトリを指定する repository = ecr.Repository.from_repository_name( self, 'Frontend', 'frontend') # タスク定義の作成 # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.README.html#task-definitions # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html task_definition = ecs.FargateTaskDefinition( self, 'TaskDef', memory_limit_mib=512, cpu=256, execution_role=task_execution_role, task_role=task_role) container = task_definition.add_container( 'Container', image=ecs.ContainerImage.from_ecr_repository(repository=repository, tag='latest'), logging=ecs.AwsLogDriver(stream_prefix='/ecs/'), environment={ 'BACKEND_URL': 'http://backend.mycluster.local:5000/messages' }) container.add_port_mappings(ecs.PortMapping(container_port=5000)) # ALB用セキュリティーグループ alb_sg = ec2.SecurityGroup(self, 'ALBSecurityGroup', vpc=vpc) # ALBを作成 alb = elbv2.ApplicationLoadBalancer(self, 'ALB', vpc=vpc, internet_facing=True, security_group=alb_sg) # # 80番ポートへのトラフィックを許可 # alb_sg.add_ingress_rule( # peer=ec2.Peer.any_ipv4(), # connection=ec2.Port.tcp(80) # ) alb.connections.allow_from_any_ipv4(ec2.Port.tcp(80)) # Frontendサービス用セキュリティーグループ frontend_service_sg = ec2.SecurityGroup(self, 'FrontendServiceSecurityGroup', vpc=vpc) # サービスの作成 # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.README.html#service # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.html frontend_service = ecs.FargateService( self, 'FrontendService', cluster=cluster, task_definition=task_definition, min_healthy_percent=50, max_healthy_percent=200, desired_count=2, security_group=frontend_service_sg, cloud_map_options=ecs.CloudMapOptions(name='frontend')) # ALB用セキュリティグループからのトラフィックを許可 frontend_service.connections.allow_from(alb, ec2.Port.all_traffic()) # 自身のセキュリティグループからのトラフィックを許可 frontend_service.connections.allow_internally(ec2.Port.all_traffic()) # エンドポイントのセキュリティグループへのアクセスを許可 frontend_service.connections.allow_to(endpoint_sg, ec2.Port.all_traffic()) # ApplicationLister # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_elasticloadbalancingv2/ApplicationListener.html#aws_cdk.aws_elasticloadbalancingv2.ApplicationListener listener = alb.add_listener('Listener', port=80) listener.add_targets('ECS', port=5000, protocol=elbv2.ApplicationProtocol.HTTP, targets=[frontend_service], health_check=elbv2.HealthCheck( path='/health', interval=core.Duration.seconds(10), healthy_threshold_count=2)) core.CfnOutput(self, 'LoadBalancerDNS', description='Load Balancer DNS Name', value=alb.load_balancer_dns_name) self.output_props = props.copy() self.output_props['frontend_service'] = frontend_service
def create_scheduler_service(self): scheduler_task_definition = ecs.FargateTaskDefinition( self, id="schedulerTaskDefinition", cpu=2048, memory_limit_mib=4096) container = scheduler_task_definition.add_container( id="schedulerContainer", cpu=2048, memory_limit_mib=4096, essential=True, image=ecs.ContainerImage.from_registry(Config.DASK_IMG.value), command=["dask-scheduler", "--interface", "eth0"], logging=ecs.LogDriver.aws_logs( stream_prefix="scheduler-", log_retention=logs.RetentionDays.ONE_DAY), ) container.add_port_mappings( ecs.PortMapping(container_port=8787, host_port=8787, protocol=ecs.Protocol.TCP), ecs.PortMapping(container_port=8786, host_port=8786, protocol=ecs.Protocol.TCP), ecs.PortMapping(container_port=9000, host_port=9000, protocol=ecs.Protocol.TCP), ) self.scheduler_service = ecs.FargateService( self, id="scheduler", cluster=self.cluster, desired_count=1, service_name="scheduler", task_definition=scheduler_task_definition, ) self.scheduler_service.enable_cloud_map( dns_record_type=servicediscovery.DnsRecordType.A, name="scheduler") healthcheck = elb.HealthCheck( interval=core.Duration.seconds(60), path="/status", timeout=core.Duration.seconds(40), port="8787", healthy_http_codes="200-399", ) satg = elb.ApplicationTargetGroup( self, id="schedulerTargetGroup", port=8787, vpc=self.vpc, protocol=elb.ApplicationProtocol.HTTP, targets=[self.scheduler_service], health_check=healthcheck, ) listener = self.elb.add_listener( "schedulerPublicListener", port=8787, open=True, protocol=elb.ApplicationProtocol.HTTP, ) listener.add_target_groups(id="schedulerTargetgroups", target_groups=[satg])