def __init__(self, scope: core.Construct, id: str, vpc, ecs_cluster, role, target_url: str, number_of_tasks=1, **kwargs) -> None: super().__init__(scope, id, **kwargs) name = id task_def = ecs.Ec2TaskDefinition(self, name, network_mode=ecs.NetworkMode.AWS_VPC) container_env = {} container_env["TARGET_URL"] = target_url if role == "worker": container_env["LOCUST_MASTER_NODE_HOST"] = "master.loadgen" container_env["LOCUST_MODE_WORKER"] = "True" elif role == "master": container_env["LOCUST_MODE_MASTER"] = "True" locust_container = task_def.add_container( name + "container", # Create an image we using the dockerfile in ./locust image=ecs.ContainerImage.from_asset("locust"), memory_reservation_mib=512, essential=True, logging=ecs.LogDrivers.aws_logs(stream_prefix=name), environment=container_env) locust_container.add_ulimits( ecs.Ulimit(name=ecs.UlimitName.NOFILE, soft_limit=65536, hard_limit=65536)) web_port_mapping = ecs.PortMapping(container_port=8089) if role != "standalone": worker1_port_mapping = ecs.PortMapping(container_port=5557) worker2_port_mapping = ecs.PortMapping(container_port=5558) locust_container.add_port_mappings(web_port_mapping, worker1_port_mapping, worker2_port_mapping) else: locust_container.add_port_mappings(web_port_mapping) security_group = ec2.SecurityGroup(self, "Locust", vpc=vpc, allow_all_outbound=True) security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8089)) if role != "standalone": security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5557)) security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5558)) # Create the ecs service locust_service = ecs.Ec2Service(self, name + "service", cluster=ecs_cluster, task_definition=task_def, security_group=security_group, desired_count=number_of_tasks) locust_service.enable_cloud_map(name=role) # Create the ALB to present the Locust UI if role != "worker": self.lb = elbv2.ApplicationLoadBalancer(self, "LoustLB", vpc=vpc, internet_facing=True) # Forward port 80 to port 8089 listener = self.lb.add_listener("Listener", port=80) listener.add_targets("ECS1", port=8089, protocol=elbv2.ApplicationProtocol.HTTP, targets=[locust_service]) core.CfnOutput(self, "lburl", description="URL for ALB fronting locust master", value="http://{}".format( self.lb.load_balancer_dns_name))
def __init__(self, scope, id, vpc, **kwarg) -> None: super().__init__(scope, id, **kwarg) # cluster creation cluster = aws_ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc) # service discovery creation sd_namespace = cluster.add_default_cloud_map_namespace( name="svc.test.local", vpc=vpc) aws_servicediscovery.Service(self, "svc.test.local", namespace=sd_namespace, load_balancer=True) # ECS role creation ecs_principle = aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com') execution_role = aws_iam.Role(self, 'execution-role', assumed_by=ecs_principle) execution_role.add_managed_policy( policy=aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AWSCodeDeployRoleForECS")) execution_role.add_managed_policy( policy=aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AmazonEC2ContainerRegistryReadOnly")) task_role = aws_iam.Role(self, 'task-role', assumed_by=ecs_principle) task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AWSAppMeshEnvoyAccess")) task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="CloudWatchFullAccess")) task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AWSXRayDaemonWriteAccess")) # envoy ecr object envoy_ecr = aws_ecr.Repository.from_repository_attributes( self, 'aws-envoy', repository_arn=core.Stack.of(self).format_arn( service="ecr", resource="aws-appmesh-envoy", account="840364872350"), repository_name="aws-appmesh-envoy") # colorteller image builds gateway_image = aws_ecs.ContainerImage.from_asset("./src/gateway") colorteller_image = aws_ecs.ContainerImage.from_asset( "./src/colorteller") # logging setup log_group = aws_logs.LogGroup(self, "/ecs/colorteller", retention=aws_logs.RetentionDays.ONE_DAY) gateway_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="gateway") black_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="black") blue_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="blue") red_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="red") white_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="white") tcpecho_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="tcpecho") # Mesh properties setup mesh_properties = aws_ecs.AppMeshProxyConfigurationProps( app_ports=[9080], proxy_egress_port=15001, proxy_ingress_port=15000, egress_ignored_i_ps=["169.254.170.2", "169.254.169.254"], ignored_uid=1337) # envoy ulimit defaults envoy_ulimit = aws_ecs.Ulimit(hard_limit=15000, name=aws_ecs.UlimitName.NOFILE, soft_limit=15000) # fargate task def - requires envoy proxy container, gateway app and x-ray gateway_task_def = aws_ecs.FargateTaskDefinition( self, "gateway_task", cpu=256, memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) gateway_task_def.add_container("gateway", logging=gateway_ecs_logs, environment={ "SERVER_PORT": "9080", "STAGE": "v1.1", "COLOR_TELLER_ENDPOINT": "colorteller.svc.test.local:9080", "TCP_ECHO_ENDPOINT": "tcpecho.svc.test.local:2701" }, image=gateway_image).add_port_mappings( aws_ecs.PortMapping( container_port=9080, protocol=aws_ecs.Protocol.TCP)) gateway_task_def.add_container( "xray", logging=gateway_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) gateway_envoy_container = gateway_task_def.add_container( "envoy", logging=gateway_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "debug", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/gateway", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) gateway_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) gateway_envoy_container.add_ulimits(envoy_ulimit) # black task def - requires color app, envoy and x-ray containers black_task_def = aws_ecs.FargateTaskDefinition( self, "black-task", cpu=256, family="black", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) black_envoy_container = black_task_def.add_container( "envoy", logging=black_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/black", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) black_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) black_envoy_container.add_ulimits(envoy_ulimit) black_app_container = black_task_def.add_container( "black", logging=black_ecs_logs, environment={ "COLOR": "black", "SERVER_PORT": "9080", "STAGE": "v1.1" }, image=colorteller_image) black_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) black_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=black_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) black_task_def.add_container( "xray", logging=black_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # blue task def (same as black) blue_task_def = aws_ecs.FargateTaskDefinition( self, "blue-task", cpu=256, family="blue", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) blue_envoy_container = blue_task_def.add_container( "envoy", logging=blue_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/blue", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) blue_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) blue_envoy_container.add_ulimits(envoy_ulimit) blue_app_container = blue_task_def.add_container( "blue", logging=blue_ecs_logs, environment={ "COLOR": "black", "SERVER_PORT": "9080", "STAGE": "v1.1" }, image=colorteller_image) blue_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) blue_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=blue_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) blue_task_def.add_container( "xray", logging=blue_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # red task def (same as black) red_task_def = aws_ecs.FargateTaskDefinition( self, "red-task", cpu=256, family="red-task", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) red_envoy_container = red_task_def.add_container( "envoy", logging=red_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/red", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) red_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) red_envoy_container.add_ulimits(envoy_ulimit) red_app_container = red_task_def.add_container("red", logging=red_ecs_logs, environment={ "COLOR": "red", "SERVER_PORT": "9080", "STAGE": "v1.2" }, image=colorteller_image) red_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) red_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=red_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) red_task_def.add_container( "xray", logging=red_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # white task def (same as black) - colorteller.svc.test.local points to this service (because containers need something to resolve to or they fail) white_task_def = aws_ecs.FargateTaskDefinition( self, "white-task", cpu=256, family="white", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) white_envoy_container = white_task_def.add_container( "envoy", logging=white_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/white", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) white_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) white_envoy_container.add_ulimits(envoy_ulimit) white_app_container = white_task_def.add_container( "white", logging=white_ecs_logs, environment={ "COLOR": "white", "SERVER_PORT": "9080", "STAGE": "v1.1" }, image=colorteller_image) white_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) white_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=white_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) white_task_def.add_container( "xray", logging=white_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # tcpecho service (external docker image) tcpecho_task_def = aws_ecs.FargateTaskDefinition( self, 'tcpecho-tasks', cpu=256, family="tcpecho", memory_limit_mib=512, execution_role=execution_role, task_role=task_role) tcpecho_task_def.add_container( "tcpecho", logging=tcpecho_ecs_logs, environment={ "TCP_PORT": "2701", "NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/echo" }, image=aws_ecs.ContainerImage.from_registry("cjimti/go-echo"), essential=True, ).add_port_mappings( aws_ecs.PortMapping(container_port=2701, protocol=aws_ecs.Protocol.TCP)) # adds task defs to fargate services - adds security group access to local vpc cidr block # all the services are treated the same way gateway_fargate_service = aws_ecs.FargateService( self, "gateway", cluster=cluster, task_definition=gateway_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="gateway")) gateway_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") black_colorteller_fargate_service = aws_ecs.FargateService( self, "black", cluster=cluster, task_definition=black_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="black")) black_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") blue_colorteller_fargate_service = aws_ecs.FargateService( self, "blue", cluster=cluster, task_definition=blue_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="blue")) blue_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") red_colorteller_fargate_service = aws_ecs.FargateService( self, "red", cluster=cluster, task_definition=red_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="red")) red_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") white_colorteller_fargate_service = aws_ecs.FargateService( self, "white", cluster=cluster, task_definition=white_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="colorteller")) white_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") echo_fargate_service = aws_ecs.FargateService( self, "tcpecho", cluster=cluster, task_definition=tcpecho_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="tcpecho")) echo_fargate_service.connections.security_groups[0].add_ingress_rule( peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(2701), description="Allow http inbound from VPC") # adds autoscaling policies to all services for service in [ black_colorteller_fargate_service, blue_colorteller_fargate_service, red_colorteller_fargate_service, white_colorteller_fargate_service, gateway_fargate_service, echo_fargate_service ]: try: scaling = service.service.auto_scale_task_count(max_capacity=2) except AttributeError: scaling = service.auto_scale_task_count(max_capacity=2) scaling.scale_on_cpu_utilization( "CpuScaling", target_utilization_percent=50, scale_in_cooldown=core.Duration.seconds(60), scale_out_cooldown=core.Duration.seconds(60), ) # configure loadbalancer to listen on port 80 and add targets to gateway and echo apps load_balancer = aws_elasticloadbalancingv2.ApplicationLoadBalancer( self, "lb", vpc=vpc, internet_facing=True) listener = load_balancer.add_listener("PublicListener", port=80, open=True) health_check = aws_elasticloadbalancingv2.HealthCheck( interval=core.Duration.seconds(60), path="/ping", port="9080", timeout=core.Duration.seconds(5)) # attach ALB to ECS service listener.add_targets( "gateway", port=80, targets=[gateway_fargate_service, echo_fargate_service], health_check=health_check, ) # outputs of ALB and cluster core.CfnOutput(self, "LoadBalancerDNS", value=load_balancer.load_balancer_dns_name) core.CfnOutput(self, "ClusterName", value=cluster.cluster_name)
def appmesh(self): # This will create the app mesh (control plane) self.mesh = aws_appmesh.Mesh(self, "EcsWorkShop-AppMesh", mesh_name="ecs-mesh") # We will create a App Mesh Virtual Gateway self.mesh_vgw = aws_appmesh.VirtualGateway( self, "Mesh-VGW", mesh=self.mesh, listeners=[aws_appmesh.VirtualGatewayListener.http(port=3000)], virtual_gateway_name="ecsworkshop-vgw") # Creating the mesh gateway task for the frontend app # For more info related to App Mesh Proxy check https://docs.aws.amazon.com/app-mesh/latest/userguide/getting-started-ecs.html self.mesh_gw_proxy_task_def = aws_ecs.FargateTaskDefinition( self, "mesh-gw-proxy-taskdef", cpu=256, memory_limit_mib=512, family="mesh-gw-proxy-taskdef", ) # LogGroup for the App Mesh Proxy Task self.logGroup = aws_logs.LogGroup( self, "ecsworkshopMeshGateway", #log_group_name="ecsworkshop-mesh-gateway", retention=aws_logs.RetentionDays.ONE_WEEK) # App Mesh Virtual Gateway Envoy proxy Task definition # For a use specific ECR region, please check https://docs.aws.amazon.com/app-mesh/latest/userguide/envoy.html container = self.mesh_gw_proxy_task_def.add_container( "mesh-gw-proxy-contdef", image=aws_ecs.ContainerImage.from_registry( "public.ecr.aws/appmesh/aws-appmesh-envoy:v1.18.3.0-prod"), container_name="envoy", memory_reservation_mib=256, environment={ "REGION": getenv('AWS_DEFAULT_REGION'), "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_STATS_TAGS": "1", # "ENABLE_ENVOY_XRAY_TRACING": "1", "APPMESH_RESOURCE_ARN": self.mesh_vgw.virtual_gateway_arn }, essential=True, logging=aws_ecs.LogDriver.aws_logs(stream_prefix='/mesh-gateway', log_group=self.logGroup), health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/server_info | grep state | grep -q LIVE" ], )) # Default port where frontend app is listening container.add_port_mappings(aws_ecs.PortMapping(container_port=3000)) #ammmesh-xray-uncomment # xray_container = self.mesh_gw_proxy_task_def.add_container( # "FrontendServiceXrayContdef", # image=aws_ecs.ContainerImage.from_registry("amazon/aws-xray-daemon"), # logging=aws_ecs.LogDriver.aws_logs( # stream_prefix='/xray-container', # log_group=self.logGroup # ), # essential=True, # container_name="xray", # memory_reservation_mib=256, # user="******" # ) # container.add_container_dependencies(aws_ecs.ContainerDependency( # container=xray_container, # condition=aws_ecs.ContainerDependencyCondition.START # ) # ) #ammmesh-xray-uncomment # For environment variables check https://docs.aws.amazon.com/app-mesh/latest/userguide/envoy-config.html self.mesh_gateway_proxy_fargate_service = aws_ecs_patterns.NetworkLoadBalancedFargateService( self, "MeshGW-Proxy-Fargate-Service", service_name='mesh-gw-proxy', cpu=256, memory_limit_mib=512, desired_count=1, listener_port=80, assign_public_ip=True, task_definition=self.mesh_gw_proxy_task_def, cluster=self.ecs_cluster, public_load_balancer=True, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=self.ecs_cluster. default_cloud_map_namespace, name='mesh-gw-proxy')) # For testing purposes we will open any ipv4 requests to port 3000 self.mesh_gateway_proxy_fargate_service.service.connections.allow_from_any_ipv4( port_range=aws_ec2.Port(protocol=aws_ec2.Protocol.TCP, string_representation="vtw_proxy", from_port=3000, to_port=3000), description="Allow NLB connections on port 3000") self.mesh_gw_proxy_task_def.default_container.add_ulimits( aws_ecs.Ulimit(hard_limit=15000, name=aws_ecs.UlimitName.NOFILE, soft_limit=15000)) #Adding necessary policies for Envoy proxy to communicate with required services self.mesh_gw_proxy_task_def.execution_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEC2ContainerRegistryReadOnly")) self.mesh_gw_proxy_task_def.execution_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchLogsFullAccess")) self.mesh_gw_proxy_task_def.task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchFullAccess")) # self.mesh_gw_proxy_task_def.task_role.add_managed_policy(aws_iam.ManagedPolicy.from_aws_managed_policy_name("AWSXRayDaemonWriteAccess")) self.mesh_gw_proxy_task_def.task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AWSAppMeshEnvoyAccess")) self.mesh_gw_proxy_task_def.execution_role.add_to_policy( aws_iam.PolicyStatement(actions=['ec2:DescribeSubnets'], resources=['*'])) core.CfnOutput(self, "MeshGwNlbDns", value=self.mesh_gateway_proxy_fargate_service. load_balancer.load_balancer_dns_name, export_name="MeshGwNlbDns") core.CfnOutput(self, "MeshArn", value=self.mesh.mesh_arn, export_name="MeshArn") core.CfnOutput(self, "MeshName", value=self.mesh.mesh_name, export_name="MeshName") core.CfnOutput( self, "MeshEnvoyServiceArn", value=self.mesh_gateway_proxy_fargate_service.service.service_arn, export_name="MeshEnvoyServiceArn") core.CfnOutput(self, "MeshVGWArn", value=self.mesh_vgw.virtual_gateway_arn, export_name="MeshVGWArn") core.CfnOutput(self, "MeshVGWName", value=self.mesh_vgw.virtual_gateway_name, export_name="MeshVGWName")
def __init__(self, scope: core.Construct, id: str, datalake: DataLakeLayer, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.__datalake = datalake self.security_group = ec2.SecurityGroup( self, 'SecurityGroup', vpc=self.datalake.vpc, allow_all_outbound=True, description='SonarQube Security Group') self.security_group.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.all_traffic(), description='Allow any traffic') self.sonarqube_svr_ecr = ecr.DockerImageAsset( self, 'Repo', directory=os.path.join(root_dir, 'images/sonarqube-server'), repository_name='sonarqube') self.sonarqube_cli_ecr = ecr.DockerImageAsset( self, 'Cli', directory=os.path.join(root_dir, 'images/sonarqube-scanner'), repository_name='sonarqube-cli') self.database = rds.DatabaseCluster( self, 'Database', engine=rds.DatabaseClusterEngine.aurora_postgres( version=rds.AuroraPostgresEngineVersion.VER_11_9), default_database_name='sonarqube', removal_policy=core.RemovalPolicy.DESTROY, credentials=rds.Credentials.from_username( username='******', password=core.SecretValue(value='postgres')), instance_props=rds.InstanceProps( vpc=self.datalake.vpc, security_groups=[self.security_group], instance_type=ec2.InstanceType('r6g.xlarge'))) # self.ecs_cluster = ecs.Cluster(self,'SonarCluster', # container_insights=True, # vpc=self.datalake.vpc, # capacity=ecs.AddCapacityOptions( # machine_image_type= ecs.MachineImageType.AMAZON_LINUX_2, # instance_type=ec2.InstanceType('m5.xlarge'), # allow_all_outbound=True, # associate_public_ip_address=False, # vpc_subnets= ec2.SubnetSelection(subnet_type= ec2.SubnetType.PUBLIC), # desired_capacity=2)) # self.service = ecsp.ApplicationLoadBalancedEc2Service(self,'SonarEc2', # cluster=self.ecs_cluster, # desired_count=1, # listener_port=80, # memory_reservation_mib= 4 * 1024, # task_image_options= ecsp.ApplicationLoadBalancedTaskImageOptions( # image= ecs.ContainerImage.from_docker_image_asset(asset=self.sonarqube_svr_ecr), # container_name='sonarqube-svr', # container_port=9000, # enable_logging=True, # environment={ # '_SONAR_JDBC_URL':'jdbc:postgresql://{}/sonarqube'.format( # self.database.cluster_endpoint.hostname), # '_SONAR_JDBC_USERNAME':'******', # '_SONAR_JDBC_PASSWORD':'******' # })) self.service = ecsp.ApplicationLoadBalancedFargateService( self, 'Server', assign_public_ip=True, vpc=self.datalake.vpc, desired_count=1, cpu=4096, memory_limit_mib=8 * 1024, listener_port=80, platform_version=ecs.FargatePlatformVersion.VERSION1_4, security_groups=[self.security_group, self.datalake.efs_sg], task_image_options=ecsp.ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_docker_image_asset( asset=self.sonarqube_svr_ecr), container_name='sonarqube-svr', container_port=9000, enable_logging=True, environment={ '_SONAR_JDBC_URL': 'jdbc:postgresql://{}/sonarqube'.format( self.database.cluster_endpoint.hostname), '_SONAR_JDBC_USERNAME': '******', '_SONAR_JDBC_PASSWORD': '******' })) for name in ['AmazonElasticFileSystemClientFullAccess']: self.service.task_definition.task_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name(name)) # Override container specific settings container = self.service.task_definition.default_container # Required to start remote sql container.add_ulimits( ecs.Ulimit(name=ecs.UlimitName.NOFILE, soft_limit=262145, hard_limit=262145)) for folder in ['data', 'logs']: efs_ap = self.datalake.efs.add_access_point( 'sonarqube-' + folder, create_acl=efs.Acl(owner_gid="0", owner_uid="0", permissions="777"), path='/sonarqube/' + folder) self.service.task_definition.add_volume( name=folder, efs_volume_configuration=ecs.EfsVolumeConfiguration( file_system_id=self.datalake.efs.file_system_id, transit_encryption='ENABLED', authorization_config=ecs.AuthorizationConfig( access_point_id=efs_ap.access_point_id, iam='DISABLED'))) container.add_mount_points( ecs.MountPoint(container_path='/opt/sonarqube/' + folder, source_volume=folder, read_only=False))
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, repository: ecr.Repository, **kwargs) -> None: super().__init__(scope, id, **kwargs) namespace = servicediscovery.PrivateDnsNamespace( scope=self, id="PRIVATE-DNS", vpc=vpc, name="private", description="a private dns" ) sg = ec2.SecurityGroup( scope=self, id="SG", vpc=vpc, allow_all_outbound=True, description="open 9200 and 9300 ports", security_group_name="es-group" ) sg.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(port=9200), ) sg.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(port=9300), ) ##################################################### elastic_task_def = ecs.Ec2TaskDefinition( scope=self, id="ES-TASK-DEF", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[ecs.Volume( name="esdata", host=ecs.Host(source_path="/usr/share/elasticsearch/data"), )], ) elastic = ecs.ContainerDefinition( scope=self, id=constants.ES_CONTAINER_NAME, start_timeout=core.Duration.seconds(amount=30), task_definition=elastic_task_def, memory_limit_mib=4500, essential=True, image=ecs.ContainerImage.from_ecr_repository( repository=repository, tag='latest'), environment={ "cluster.name": constants.ES_CLUSTER_NAME, "bootstrap.memory_lock": "true", # "discovery.zen.ping.unicast.hosts": "elasticsearch", "node.name": constants.ES_CONTAINER_NAME, "node.master": "true", "node.data": "true", "ES_JAVA_OPTS": "-Xms4g -Xmx4g", }, logging=ecs.AwsLogDriver( stream_prefix="ES", log_retention=logs.RetentionDays.ONE_DAY, ), ) elastic.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.NOFILE, hard_limit=65535, soft_limit=65535)) elastic.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1)) elastic.add_port_mappings(ecs.PortMapping(container_port=9200)) elastic.add_port_mappings(ecs.PortMapping(container_port=9300)) elastic.add_mount_points(ecs.MountPoint( container_path="/usr/share/elasticsearch/data", source_volume="esdata", read_only=False, )) # elastic.add_volumes_from(ecs.VolumeFrom( # source_container="esdata", # read_only=False, # )) es_service = ecs.Ec2Service( scope=self, id="ES-SERVICE", cluster=cluster, task_definition=elastic_task_def, desired_count=1, service_name="ES", security_group=sg, ) es_lb = elbv2.ApplicationLoadBalancer( scope=self, id="ES-ELB", vpc=vpc, internet_facing=True, ) es_listener = es_lb.add_listener( id="ES-LISTENER", port=80, ) es_service.register_load_balancer_targets( ecs.EcsTarget( new_target_group_id="ES-GRP", container_name=elastic.container_name, listener=ecs.ListenerConfig.application_listener( listener=es_listener, protocol=elbv2.ApplicationProtocol.HTTP), )) service = es_service.enable_cloud_map( cloud_map_namespace=namespace, dns_record_type=servicediscovery.DnsRecordType.A, # dns_ttl=core.Duration.seconds(amount=30), failure_threshold=1, name="elastic", ) core.CfnOutput( scope=self, id="DNS-ES", value=es_lb.load_balancer_dns_name, ) ##################################################### node_task_def = ecs.Ec2TaskDefinition( scope=self, id="NODE-TASK-DEF", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[ecs.Volume( name="esdata", host=ecs.Host(source_path="/usr/share/elasticsearch/data"), )], ) node = ecs.ContainerDefinition( scope=self, id=constants.ES_NODE_CONTAINER_NAME, start_timeout=core.Duration.seconds(amount=40), task_definition=node_task_def, memory_limit_mib=4500, essential=True, image=ecs.ContainerImage.from_ecr_repository( repository=repository, tag='latest'), environment={ "cluster.name": constants.ES_CLUSTER_NAME, "bootstrap.memory_lock": "true", "discovery.zen.ping.unicast.hosts": "elastic.private", "node.name": constants.ES_NODE_CONTAINER_NAME, "node.master": "false", "node.data": "true", "ES_JAVA_OPTS": "-Xms4g -Xmx4g", }, logging=ecs.LogDrivers.aws_logs( stream_prefix="NODE", log_retention=logs.RetentionDays.ONE_DAY, )) node.add_port_mappings(ecs.PortMapping(container_port=9200)) node.add_port_mappings(ecs.PortMapping(container_port=9300)) node.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.NOFILE, hard_limit=65536, soft_limit=65536)) node.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1)) node.add_mount_points(ecs.MountPoint( container_path="/usr/share/elasticsearch/data", source_volume="esdata", read_only=False, )) node_service = ecs.Ec2Service( scope=self, id="ES-NODE-SERVICE", cluster=cluster, task_definition=node_task_def, desired_count=1, service_name="NODE", security_group=sg, ) node_lb = elbv2.ApplicationLoadBalancer( scope=self, id="NODE-ELB", vpc=vpc, internet_facing=True, ) node_listener = node_lb.add_listener( id="NODE-LISTENER", port=80, ) node_service.register_load_balancer_targets( ecs.EcsTarget( new_target_group_id="NODE-GRP", container_name=node.container_name, listener=ecs.ListenerConfig.application_listener( listener=node_listener, protocol=elbv2.ApplicationProtocol.HTTP), )) core.CfnOutput( scope=self, id="DNS-NODE", value=node_lb.load_balancer_dns_name, )
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, **kwargs) -> None: super().__init__(scope, id, **kwargs) elastic_cluster_task_def = ecs.Ec2TaskDefinition( scope=self, id="ES-TASK-DEF", network_mode=ecs.NetworkMode.BRIDGE, ) elastic = ecs.ContainerDefinition( scope=self, id=constants.ES_CONTAINER_NAME, start_timeout=core.Duration.seconds(amount=30), task_definition=elastic_cluster_task_def, memory_limit_mib=4024, essential=True, image=ecs.ContainerImage.from_registry( name="docker.elastic.co/elasticsearch/elasticsearch:6.8.6"), environment={ "cluster.name": constants.ES_CLUSTER_NAME, "bootstrap.memory_lock": "true", # "discovery.zen.ping.unicast.hosts": "elasticsearch", "node.name": constants.ES_CONTAINER_NAME, "node.master": "true", "node.data": "true", "ES_JAVA_OPTS": "-Xms2g -Xmx2g", }, logging=ecs.AwsLogDriver( stream_prefix="ES", log_retention=logs.RetentionDays.ONE_DAY, ), ) elastic.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.NOFILE, hard_limit=65535, soft_limit=65535)) elastic.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1)) elastic.add_port_mappings(ecs.PortMapping(container_port=9200)) elastic.add_port_mappings(ecs.PortMapping(container_port=9300)) ##################################################### node = ecs.ContainerDefinition( scope=self, id=constants.ES_NODE_CONTAINER_NAME, start_timeout=core.Duration.seconds(amount=40), task_definition=elastic_cluster_task_def, memory_limit_mib=4024, essential=True, image=ecs.ContainerImage.from_registry( name="docker.elastic.co/elasticsearch/elasticsearch:6.8.6"), environment={ "cluster.name": constants.ES_CLUSTER_NAME, "bootstrap.memory_lock": "true", "discovery.zen.ping.unicast.hosts": constants.ES_CONTAINER_NAME, "node.name": constants.ES_NODE_CONTAINER_NAME, "node.master": "false", "node.data": "true", "ES_JAVA_OPTS": "-Xms2g -Xmx2g", }, logging=ecs.LogDrivers.aws_logs( stream_prefix="NODE", log_retention=logs.RetentionDays.ONE_DAY, )) node.add_port_mappings(ecs.PortMapping(container_port=9200)) node.add_port_mappings(ecs.PortMapping(container_port=9300)) node.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.NOFILE, hard_limit=65536, soft_limit=65536)) node.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1)) node.add_link(container=elastic, alias=constants.ES_CONTAINER_NAME) ##################################################### ecs_service = ecs.Ec2Service( scope=self, id="ES-SERVICE", cluster=cluster, task_definition=elastic_cluster_task_def, desired_count=1, service_name=constants.ECS_ES_SERVICE, ) lb = elbv2.ApplicationLoadBalancer( scope=self, id="ELB", vpc=vpc, internet_facing=True, ) listener = lb.add_listener( id="LISTENER", port=80, ) ecs_service.register_load_balancer_targets( ecs.EcsTarget( new_target_group_id="TARGET-GRP", container_name=elastic.container_name, # container_port=9200, listener=ecs.ListenerConfig.application_listener( listener=listener, protocol=elbv2.ApplicationProtocol.HTTP), )) core.CfnOutput( scope=self, id="DNS-NAME", value=lb.load_balancer_dns_name, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) #create VPC self.vpc = ec2.Vpc( self, 'SonarVPC', max_azs=3 ) #DB Security Group with required ingress rules self.sg= ec2.SecurityGroup( self, "SonarQubeSG", vpc=self.vpc, allow_all_outbound=True, description="Aurora Security Group" ) self.sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5432), "SonarDBAurora") pgroup = rds.ParameterGroup.from_parameter_group_name( self, "SonarDBParamGroup", parameter_group_name='default.aurora-postgresql11' ) #create RDS Cluster self.db= rds.DatabaseCluster(self, 'SonarDBCluster', engine= rds.DatabaseClusterEngine.aurora_postgres(version=rds.AuroraPostgresEngineVersion.VER_11_6), default_database_name= 'sonarqube', parameter_group= pgroup, master_user=rds.Login(username= "******"), instance_props= rds.InstanceProps( instance_type= ec2.InstanceType.of( ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MEDIUM ), security_groups= [self.sg], vpc= self.vpc ) ) #create Cluster self.cluster= ecs.Cluster(self, "SonarCluster", capacity= ecs.AddCapacityOptions( instance_type= ec2.InstanceType('m5.large')), vpc= self.vpc ) asg= self.cluster.autoscaling_group user_data= asg.user_data user_data.add_commands('sysctl -qw vm.max_map_count=262144') user_data.add_commands('sysctl -w fs.file-max=65536') user_data.add_commands('ulimit -n 65536') user_data.add_commands('ulimit -u 4096') #Create iam Role for Task self.task_role = iam.Role( self, id= "SonarTaskRole", role_name= "SonarTaskRole", assumed_by= iam.ServicePrincipal(service= "ecs-tasks.amazonaws.com"), managed_policies= [ iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy") ] ) #Grant permission for Task to read secret from SecretsManager self.db.secret.grant_read(self.task_role) url = 'jdbc:postgresql://{}/sonarqube'.format(self.db.cluster_endpoint.socket_address) #create task task= ecs_patterns.ApplicationLoadBalancedEc2Service(self, "SonarService", # if a cluster is provided use the same vpc cluster= self.cluster, cpu=512, desired_count=1, task_image_options= ecs_patterns.ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_registry("sonarqube:8.2-community"), container_port=9000, secrets={ "sonar.jdbc.username": ecs.Secret.from_secrets_manager(self.db.secret, field="username"), "sonar.jdbc.password": ecs.Secret.from_secrets_manager(self.db.secret, field="password") }, environment={ 'sonar.jdbc.url': url }, task_role= self.task_role ), memory_limit_mib=2048, public_load_balancer=True ) container = task.task_definition.default_container container.add_ulimits( ecs.Ulimit( name=ecs.UlimitName.NOFILE, soft_limit=65536, hard_limit=65536 ) )
def __init__(self, scope: core.Construct, id: str, custom_vpc, efs_share, efs_ap_nginx, enable_container_insights: bool = False, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create Security Group to allow Fargate Cluster instances to access EFS. web_svc_sg = _ec2.SecurityGroup( self, id="webSvcSecurityGroup", vpc=custom_vpc, security_group_name=f"web_svc_sg_{id}", description= "Security Group to allow Fargate Cluster instances to access EFS") # Allow Internet access to Fargate web service web_svc_sg.add_ingress_rule( _ec2.Peer.any_ipv4(), _ec2.Port.tcp(80), description="Allow Internet access to web service") # The code that defines your stack goes here fargate_cluster = _ecs.Cluster( self, "fargateClusterId", cluster_name=f"web-app-{id}", # container_insights=enable_container_insights, vpc=custom_vpc) web_app_task_def = _ecs.FargateTaskDefinition( self, "webAppTaskDef", cpu=256, memory_limit_mib=512, ) # Add EFS Volume to TaskDef web_app_task_def.add_volume( name="html", efs_volume_configuration=_ecs.EfsVolumeConfiguration( file_system_id=efs_share.file_system_id, transit_encryption="ENABLED", authorization_config=_ecs.AuthorizationConfig( access_point_id=efs_ap_nginx.access_point_id))) web_app_container = web_app_task_def.add_container( "webAppContainer", cpu=256, memory_limit_mib=512, environment={ "github": "https://github.com/miztiik", "ko_fi": "https://ko-fi.com/miztiik" }, image=_ecs.ContainerImage.from_registry("nginx:latest"), logging=_ecs.LogDrivers.aws_logs( stream_prefix="mystique-automation-logs", log_retention=_logs.RetentionDays.ONE_DAY)) web_app_container.add_ulimits( _ecs.Ulimit(name=_ecs.UlimitName.NOFILE, soft_limit=65536, hard_limit=65536)) web_app_container.add_port_mappings( _ecs.PortMapping(container_port=80, protocol=_ecs.Protocol.TCP)) web_app_container.add_port_mappings( _ecs.PortMapping(container_port=443, protocol=_ecs.Protocol.TCP)) # Mount EFS Volume to Web Server Container web_app_container.add_mount_points( _ecs.MountPoint(container_path="/usr/share/nginx/html", read_only=False, source_volume="html")) # Launch service and attach load balancer using CDK Pattern web_app_service = _ecs_patterns.ApplicationLoadBalancedFargateService( self, "webSrv", platform_version=_ecs.FargatePlatformVersion.VERSION1_4, cluster=fargate_cluster, task_definition=web_app_task_def, assign_public_ip=False, public_load_balancer=True, listener_port=80, desired_count=1, # enable_ecs_managed_tags=True, health_check_grace_period=core.Duration.seconds(60), # cpu=1024, # memory_limit_mib=2048, # service_name="chatAppService", ) # Outputs output_0 = core.CfnOutput( self, "AutomationFrom", value=f"{GlobalArgs.SOURCE_INFO}", description= "To know more about this automation stack, check out our github page." ) output_1 = core.CfnOutput( self, "ClusterNameOutput", value=f"{fargate_cluster.cluster_name}", description= "To know more about this automation stack, check out our github page." ) output_2 = core.CfnOutput( self, "webAppServiceUrl", value= f"http://{web_app_service.load_balancer.load_balancer_dns_name}", description= "Use an utility like curl or an browser to access the web server.")