def __init__(self, scope: core.Stack, id: str, **kwargs): super().__init__(scope, id, **kwargs) self.base_platform = BasePlatform(self, self.stack_name) self.fargate_task_def = aws_ecs.TaskDefinition( self, "TaskDef", compatibility=aws_ecs.Compatibility.EC2_AND_FARGATE, cpu='256', memory_mib='512', ) self.container = self.fargate_task_def.add_container( "CrystalServiceContainerDef", image=aws_ecs.ContainerImage.from_registry( "brentley/ecsdemo-crystal"), memory_reservation_mib=512, logging=aws_ecs.LogDriver.aws_logs( stream_prefix='ecsworkshop-crystal')) self.container.add_port_mappings( aws_ecs.PortMapping(container_port=3000)) self.fargate_service = aws_ecs.FargateService( self, "CrystalFargateService", task_definition=self.fargate_task_def, cluster=self.base_platform.ecs_cluster, security_group=self.base_platform.services_sec_grp, desired_count=1, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=self.base_platform.sd_namespace, name='ecsdemo-crystal'))
def __init__(self, scope: core.Construct, id: str, name_extension: str, stage:str, tags:[], vpc_name:str, region:str, ecs_conf:dict, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.base_platform = bp.BasePlatform(self, id, name_extension, stage, vpc_name) self.objects_list = [] self.ecr = _ecr.Repository.from_repository_name(self, "nginx-ecr", repository_name="nginx") self.fargate_task_def = _ecs.FargateTaskDefinition( self, "lz-nginx-ecr-td", family=ecs_conf["task_name"], cpu=256, memory_limit_mib=512, ) self.container = self.fargate_task_def.add_container( "lz-nginx-ecr-container", image=_ecs.ContainerImage.from_ecr_repository(self.ecr, "latest"), memory_reservation_mib=512, logging=_ecs.LogDriver.aws_logs( stream_prefix=name_extension+"-nginx-container" ), environment={ "REGION": region }, ) self.container.add_port_mappings( _ecs.PortMapping( container_port=80 ) ) self.fargate_service = _ecs.FargateService( self, "lz-nginx-ecr-service", task_definition=self.fargate_task_def, cluster=self.base_platform.ecs_cluster, security_group=self.base_platform.services_sec_grp, desired_count=1, cloud_map_options=_ecs.CloudMapOptions( cloud_map_namespace=self.base_platform.sd_namespace, name=ecs_conf["dns_name"] ), service_name=ecs_conf["service_name"], platform_version=_ecs.FargatePlatformVersion.VERSION1_4, ) self.objects_list.append(self.ecr) self.objects_list.append(self.fargate_service) self.objects_list.append(self.container) self.objects_list.append(self.fargate_task_def) self.tags_creation(tags)
def __init__(self, scope: core.Stack, id: str, **kwargs): super().__init__(scope, id, **kwargs) self.base_platform = BasePlatform(self, self.stack_name) self.fargate_task_def = aws_ecs.TaskDefinition( self, "TaskDef", compatibility=aws_ecs.Compatibility.EC2_AND_FARGATE, cpu='256', memory_mib='512', ) self.container = self.fargate_task_def.add_container( "CrystalServiceContainerDef", image=aws_ecs.ContainerImage.from_registry("adam9098/ecsdemo-crystal"), memory_reservation_mib=512, logging=aws_ecs.LogDriver.aws_logs( stream_prefix='ecsworkshop-crystal' ), environment={ "REGION": getenv('AWS_DEFAULT_REGION') }, ) self.container.add_port_mappings( aws_ecs.PortMapping( container_port=3000 ) ) self.fargate_service = aws_ecs.FargateService( self, "CrystalFargateService", service_name='ecsdemo-crystal', task_definition=self.fargate_task_def, cluster=self.base_platform.ecs_cluster, security_group=self.base_platform.services_sec_grp, desired_count=1, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=self.base_platform.sd_namespace, name='ecsdemo-crystal' ) ) self.fargate_task_def.add_to_task_role_policy( aws_iam.PolicyStatement( actions=['ec2:DescribeSubnets'], resources=['*'] ) )
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = props['vpc'] endpoint_sg = props['endpoint_sg'] cluster = props['cluster'] # タスク実行ロールの作成 task_execution_role_policy = iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonECSTaskExecutionRolePolicy') task_execution_role = iam.Role( self, 'TaskExecutionRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), managed_policies=[task_execution_role_policy]) # タスクロールの作成 task_role = iam.Role( self, 'TaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')) # リポジトリを指定する repository = ecr.Repository.from_repository_name( self, 'Frontend', 'frontend') # タスク定義の作成 # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.README.html#task-definitions # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html task_definition = ecs.FargateTaskDefinition( self, 'TaskDef', memory_limit_mib=512, cpu=256, execution_role=task_execution_role, task_role=task_role) container = task_definition.add_container( 'Container', image=ecs.ContainerImage.from_ecr_repository(repository=repository, tag='latest'), logging=ecs.AwsLogDriver(stream_prefix='/ecs/'), environment={ 'BACKEND_URL': 'http://backend.mycluster.local:5000/messages' }) container.add_port_mappings(ecs.PortMapping(container_port=5000)) # ALB用セキュリティーグループ alb_sg = ec2.SecurityGroup(self, 'ALBSecurityGroup', vpc=vpc) # ALBを作成 alb = elbv2.ApplicationLoadBalancer(self, 'ALB', vpc=vpc, internet_facing=True, security_group=alb_sg) # # 80番ポートへのトラフィックを許可 # alb_sg.add_ingress_rule( # peer=ec2.Peer.any_ipv4(), # connection=ec2.Port.tcp(80) # ) alb.connections.allow_from_any_ipv4(ec2.Port.tcp(80)) # Frontendサービス用セキュリティーグループ frontend_service_sg = ec2.SecurityGroup(self, 'FrontendServiceSecurityGroup', vpc=vpc) # サービスの作成 # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.README.html#service # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.html frontend_service = ecs.FargateService( self, 'FrontendService', cluster=cluster, task_definition=task_definition, min_healthy_percent=50, max_healthy_percent=200, desired_count=2, security_group=frontend_service_sg, cloud_map_options=ecs.CloudMapOptions(name='frontend')) # ALB用セキュリティグループからのトラフィックを許可 frontend_service.connections.allow_from(alb, ec2.Port.all_traffic()) # 自身のセキュリティグループからのトラフィックを許可 frontend_service.connections.allow_internally(ec2.Port.all_traffic()) # エンドポイントのセキュリティグループへのアクセスを許可 frontend_service.connections.allow_to(endpoint_sg, ec2.Port.all_traffic()) # ApplicationLister # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_elasticloadbalancingv2/ApplicationListener.html#aws_cdk.aws_elasticloadbalancingv2.ApplicationListener listener = alb.add_listener('Listener', port=80) listener.add_targets('ECS', port=5000, protocol=elbv2.ApplicationProtocol.HTTP, targets=[frontend_service], health_check=elbv2.HealthCheck( path='/health', interval=core.Duration.seconds(10), healthy_threshold_count=2)) core.CfnOutput(self, 'LoadBalancerDNS', description='Load Balancer DNS Name', value=alb.load_balancer_dns_name) self.output_props = props.copy() self.output_props['frontend_service'] = frontend_service
def appmesh(self): # This will create the app mesh (control plane) self.mesh = aws_appmesh.Mesh(self, "EcsWorkShop-AppMesh", mesh_name="ecs-mesh") # We will create a App Mesh Virtual Gateway self.mesh_vgw = aws_appmesh.VirtualGateway( self, "Mesh-VGW", mesh=self.mesh, listeners=[aws_appmesh.VirtualGatewayListener.http(port=3000)], virtual_gateway_name="ecsworkshop-vgw") # Creating the mesh gateway task for the frontend app # For more info related to App Mesh Proxy check https://docs.aws.amazon.com/app-mesh/latest/userguide/getting-started-ecs.html self.mesh_gw_proxy_task_def = aws_ecs.FargateTaskDefinition( self, "mesh-gw-proxy-taskdef", cpu=256, memory_limit_mib=512, family="mesh-gw-proxy-taskdef", ) # LogGroup for the App Mesh Proxy Task self.logGroup = aws_logs.LogGroup( self, "ecsworkshopMeshGateway", #log_group_name="ecsworkshop-mesh-gateway", retention=aws_logs.RetentionDays.ONE_WEEK) # App Mesh Virtual Gateway Envoy proxy Task definition # For a use specific ECR region, please check https://docs.aws.amazon.com/app-mesh/latest/userguide/envoy.html container = self.mesh_gw_proxy_task_def.add_container( "mesh-gw-proxy-contdef", image=aws_ecs.ContainerImage.from_registry( "public.ecr.aws/appmesh/aws-appmesh-envoy:v1.18.3.0-prod"), container_name="envoy", memory_reservation_mib=256, environment={ "REGION": getenv('AWS_DEFAULT_REGION'), "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_STATS_TAGS": "1", # "ENABLE_ENVOY_XRAY_TRACING": "1", "APPMESH_RESOURCE_ARN": self.mesh_vgw.virtual_gateway_arn }, essential=True, logging=aws_ecs.LogDriver.aws_logs(stream_prefix='/mesh-gateway', log_group=self.logGroup), health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/server_info | grep state | grep -q LIVE" ], )) # Default port where frontend app is listening container.add_port_mappings(aws_ecs.PortMapping(container_port=3000)) #ammmesh-xray-uncomment # xray_container = self.mesh_gw_proxy_task_def.add_container( # "FrontendServiceXrayContdef", # image=aws_ecs.ContainerImage.from_registry("amazon/aws-xray-daemon"), # logging=aws_ecs.LogDriver.aws_logs( # stream_prefix='/xray-container', # log_group=self.logGroup # ), # essential=True, # container_name="xray", # memory_reservation_mib=256, # user="******" # ) # container.add_container_dependencies(aws_ecs.ContainerDependency( # container=xray_container, # condition=aws_ecs.ContainerDependencyCondition.START # ) # ) #ammmesh-xray-uncomment # For environment variables check https://docs.aws.amazon.com/app-mesh/latest/userguide/envoy-config.html self.mesh_gateway_proxy_fargate_service = aws_ecs_patterns.NetworkLoadBalancedFargateService( self, "MeshGW-Proxy-Fargate-Service", service_name='mesh-gw-proxy', cpu=256, memory_limit_mib=512, desired_count=1, listener_port=80, assign_public_ip=True, task_definition=self.mesh_gw_proxy_task_def, cluster=self.ecs_cluster, public_load_balancer=True, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=self.ecs_cluster. default_cloud_map_namespace, name='mesh-gw-proxy')) # For testing purposes we will open any ipv4 requests to port 3000 self.mesh_gateway_proxy_fargate_service.service.connections.allow_from_any_ipv4( port_range=aws_ec2.Port(protocol=aws_ec2.Protocol.TCP, string_representation="vtw_proxy", from_port=3000, to_port=3000), description="Allow NLB connections on port 3000") self.mesh_gw_proxy_task_def.default_container.add_ulimits( aws_ecs.Ulimit(hard_limit=15000, name=aws_ecs.UlimitName.NOFILE, soft_limit=15000)) #Adding necessary policies for Envoy proxy to communicate with required services self.mesh_gw_proxy_task_def.execution_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEC2ContainerRegistryReadOnly")) self.mesh_gw_proxy_task_def.execution_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchLogsFullAccess")) self.mesh_gw_proxy_task_def.task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchFullAccess")) # self.mesh_gw_proxy_task_def.task_role.add_managed_policy(aws_iam.ManagedPolicy.from_aws_managed_policy_name("AWSXRayDaemonWriteAccess")) self.mesh_gw_proxy_task_def.task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AWSAppMeshEnvoyAccess")) self.mesh_gw_proxy_task_def.execution_role.add_to_policy( aws_iam.PolicyStatement(actions=['ec2:DescribeSubnets'], resources=['*'])) core.CfnOutput(self, "MeshGwNlbDns", value=self.mesh_gateway_proxy_fargate_service. load_balancer.load_balancer_dns_name, export_name="MeshGwNlbDns") core.CfnOutput(self, "MeshArn", value=self.mesh.mesh_arn, export_name="MeshArn") core.CfnOutput(self, "MeshName", value=self.mesh.mesh_name, export_name="MeshName") core.CfnOutput( self, "MeshEnvoyServiceArn", value=self.mesh_gateway_proxy_fargate_service.service.service_arn, export_name="MeshEnvoyServiceArn") core.CfnOutput(self, "MeshVGWArn", value=self.mesh_vgw.virtual_gateway_arn, export_name="MeshVGWArn") core.CfnOutput(self, "MeshVGWName", value=self.mesh_vgw.virtual_gateway_name, export_name="MeshVGWName")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # CONTAINER_IMAGE = 'daskdev/dask:0.19.4' # if use_rapids: # CONTAINER_IMAGE = 'rapidsai/rapidsai:latest' # if use_notebook: # CONTAINER_IMAGE = 'daskdev/dask-notebook:latest' #TODO : Create ECR repository #Update: Not required sunce ecs.ContainerImage already creates and pushes using same asset #ecr = aws_ecr.Repository(self, 'MyECR', repository_name='dask') # not needed if you use an asset like below: dockercontainer = ecs.ContainerImage.from_asset( directory='dockerstuff', build_args=['-t dask .']) # Create vpc vpc = ec2.Vpc(self, 'MyVpc', max_azs=3) # default is all AZs in region subnets = vpc.private_subnets # Create log groups for the scheduler and workers s_logs = logs.LogGroup(self, 'SlogGroup', log_group_name='SlogGroup') w_logs = logs.LogGroup(self, 'WlogGroup', log_group_name='WlogGroup') #Create private namespace #nspace = sd.PrivateDnsNamespace(self, 'MyNamespace', vpc=vpc, name='local-dask') # #Create role for ECS nRole = iam_.Role(self, 'ECSExecutionRole', assumed_by=iam_.ServicePrincipal('ecs-tasks')) nPolicy = iam_.Policy( self, "ECSExecutionPolicy", policy_name="ECSExecutionPolicy", statements=[ iam_.PolicyStatement(actions=[ 'ecr:BatchCheckLayerAvailability', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'ecr:GetAuthorizationToken', 'logs:CreateLogStream', 'logs:PutLogEvents', 'sagemaker:*', 's3:*' ], resources=[ '*', ]), ]).attach_to_role(nRole) # Create ECS cluster cluster = ecs.Cluster(self, 'DaskCluster', vpc=vpc, cluster_name='Fargate-Dask-Cluster') nspace = cluster.add_default_cloud_map_namespace( name='local-dask', type=sd.NamespaceType.DNS_PRIVATE, vpc=vpc) #TO DO: Use default namespace for cluster and use cmap options within fargate service #Update: done # schedulerRegistry = sd.Service(self,'serviceRegistryScheduler', # namespace=nspace,dns_ttl=core.Duration.seconds(60), # custom_health_check=sd.HealthCheckCustomConfig(failure_threshold=10), # name='Dask-Scheduler') # # schedulerRegistry.register_ip_instance(id='serviceRegistryScheduler',ipv4='') # workerRegistry = sd.Service(self,'workerRegistryScheduler', # namespace=nspace,dns_ttl=core.Duration.seconds(60), # custom_health_check=sd.HealthCheckCustomConfig(failure_threshold=10), # name='Dask-Worker') # -------------------- Add scheduler task ------------------------ schedulerTask = ecs.TaskDefinition( self, 'taskDefinitionScheduler', compatibility=ecs.Compatibility.FARGATE, cpu='4096', memory_mib='8192', network_mode=ecs.NetworkMode.AWS_VPC, placement_constraints=None, execution_role=nRole, family='Dask-Scheduler', task_role=nRole) schedulerTask.add_container('MySchedulerImage', image=dockercontainer, command=['dask-scheduler'], cpu=4096, essential=True, logging=ecs.LogDriver.aws_logs( stream_prefix='ecs', log_group=s_logs), memory_limit_mib=8192, memory_reservation_mib=8192) # -------------------- Add worker task ----------------------------- workerTask = ecs.TaskDefinition( self, 'taskDefinitionWorker', compatibility=ecs.Compatibility.FARGATE, cpu='4096', memory_mib='8192', network_mode=ecs.NetworkMode.AWS_VPC, placement_constraints=None, execution_role=nRole, family='Dask-Worker', task_role=nRole) workerTask.add_container( 'MyWorkerImage', image=dockercontainer, command=[ 'dask-worker', 'dask-scheduler.local-dask:8786', '--memory-limit 1800MB', '--worker-port 9000', '--nanny-port 9001', '--bokeh-port 9002' ], cpu=4096, essential=True, logging=ecs.LogDriver.aws_logs(stream_prefix='ecs', log_group=s_logs), memory_limit_mib=8192, memory_reservation_mib=8192) # Task security group sg = ec2.SecurityGroup(self, 'MySG', vpc=vpc, description='Enable Scheduler ports access', security_group_name='DaskSecurityGroup') # Ingress rule requires IPeer not Peer # TO DO: fix from any ipv4 to SG p1 = ec2.Peer().ipv4('0.0.0.0/0') p2 = ec2.Peer().ipv4('0.0.0.0/0') sg.add_ingress_rule(peer=p1, connection=ec2.Port(protocol=ec2.Protocol.TCP, string_representation='p1', from_port=8786, to_port=8789)) sg.add_ingress_rule(peer=p2, connection=ec2.Port(protocol=ec2.Protocol.TCP, string_representation='p2', from_port=9000, to_port=9002)) # ----------------- Add Scheduler Service ----------------------- # deployconfig = ecs.CfnService.DeploymentConfigurationProperty(maximum_percent=200,minimum_healthy_percent=100) # vpcconfig = ecs.CfnService.AwsVpcConfigurationProperty(subnets = subnets,assign_public_ip=True,security_groups=[sg]) # networkconfig = ecs.CfnService.NetworkConfigurationProperty(awsvpc_configuration=vpcconfig) # schedulerService = ecs.CfnService(self, 'DaskSchedulerService', # task_definition = schedulerTask, deployment_configuration=deployconfig, # cluster=cluster, desired_count=1, enable_ecs_managed_tags=None, # launch_type='FARGATE',network_configuration=networkconfig, # service_registries=schedulerRegistry) #ecs.CfnService.ServiceRegistryProperty() # Try fargate service? No service registry option available #using default cluster namespace cmap1 = ecs.CloudMapOptions(dns_ttl=core.Duration.seconds(60), failure_threshold=10, name='Dask-Scheduler') schedulerService = ecs.FargateService( self, 'DaskSchedulerService', task_definition=schedulerTask, assign_public_ip=True, security_group=sg, #vpc_subnets=subnets, cluster=cluster, desired_count=1, max_healthy_percent=200, min_healthy_percent=100, service_name='Dask-Scheduler', cloud_map_options=cmap1) # schedulerService.enable_cloud_map(name = 'serviceRegistryScheduler') # schedulerRegistry.register_non_ip_instance(self,instance_id='DaskSchedulerService') # ----------------- Add Worker Service ----------------------- #using default cluster namespace cmap2 = ecs.CloudMapOptions(dns_ttl=core.Duration.seconds(60), failure_threshold=10, name='Dask-Worker') workerService = ecs.FargateService( self, 'DaskWorkerService', task_definition=workerTask, assign_public_ip=True, security_group=sg, #vpc_subnets=subnets, cluster=cluster, desired_count=1, max_healthy_percent=200, min_healthy_percent=100, service_name='Dask-Worker', cloud_map_options=cmap2) # workerService.enable_cloud_map(name = 'workerRegistryScheduler') #------------------------------------------------------------------------ # Very less control with ECS patterns, did not work # ecs_patterns.ApplicationLoadBalancedFargateService(self, "DaskFargateStack", # cluster=cluster, # Required # cpu=512, # Default is 256 # desired_count=6, # Default is 1 # task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions( # image=ecs.ContainerImage.from_registry(CONTAINER_IMAGE)), # memory_limit_mib=2048, # Default is 512 # public_load_balancer=True) # Default is False # Start a notebook in the same vpc # print(type(sg.security_group_id)) # print("------------------------------") # print(subnets[0].subnet_id) #Create role for Notebook instance smRole = iam_.Role(self, "notebookAccessRole", assumed_by=iam_.ServicePrincipal('sagemaker')) smPolicy = iam_.Policy(self, "notebookAccessPolicy", policy_name="notebookAccessPolicy", statements=[ iam_.PolicyStatement( actions=['s3:*', 'ecs:*'], resources=[ '*', ]), ]).attach_to_role(smRole) notebook = sagemaker_.CfnNotebookInstance( self, 'DaskNotebook', instance_type='ml.t2.medium', volume_size_in_gb=50, security_group_ids=[sg.security_group_id], subnet_id=subnets[0].subnet_id, notebook_instance_name='DaskNotebook', role_arn=smRole.role_arn, root_access='Enabled', direct_internet_access='Enabled', default_code_repository= 'https://github.com/w601sxs/dask-examples.git')
def __init__(self, scope: core.Stack, id: str, cluster, vpc, worker, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.cluster = cluster self.vpc = vpc self.worker = worker # Building a custom image for jenkins master. self.container_image = ecr.DockerImageAsset( self, "JenkinsMasterDockerImage", directory='./docker/master/') if config['DEFAULT']['fargate_enabled'] == "yes" or not config[ 'DEFAULT']['ec2_enabled'] == "yes": # Task definition details to define the Jenkins master container self.jenkins_task = ecs_patterns.ApplicationLoadBalancedTaskImageOptions( # image=ecs.ContainerImage.from_ecr_repository(self.container_image.repository), image=ecs.ContainerImage.from_docker_image_asset( self.container_image), container_port=8080, enable_logging=True, environment={ # https://github.com/jenkinsci/docker/blob/master/README.md#passing-jvm-parameters 'JAVA_OPTS': '-Djenkins.install.runSetupWizard=false', # https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/README.md#getting-started 'CASC_JENKINS_CONFIG': '/config-as-code.yaml', 'network_stack': self.vpc.stack_name, 'cluster_stack': self.cluster.stack_name, 'worker_stack': self.worker.stack_name, 'cluster_arn': self.cluster.cluster.cluster_arn, 'aws_region': config['DEFAULT']['region'], 'jenkins_url': config['DEFAULT']['jenkins_url'], 'subnet_ids': ",".join( [x.subnet_id for x in self.vpc.vpc.private_subnets]), 'security_group_ids': self.worker.worker_security_group.security_group_id, 'execution_role_arn': self.worker.worker_execution_role.role_arn, 'task_role_arn': self.worker.worker_task_role.role_arn, 'worker_log_group': self.worker.worker_logs_group.log_group_name, 'worker_log_stream_prefix': self.worker.worker_log_stream.log_stream_name }, ) # Create the Jenkins master service self.jenkins_master_service_main = ecs_patterns.ApplicationLoadBalancedFargateService( self, "JenkinsMasterService", cpu=int(config['DEFAULT']['fargate_cpu']), memory_limit_mib=int( config['DEFAULT']['fargate_memory_limit_mib']), cluster=self.cluster.cluster, desired_count=1, enable_ecs_managed_tags=True, task_image_options=self.jenkins_task, cloud_map_options=ecs.CloudMapOptions( name="master", dns_record_type=sd.DnsRecordType('A'))) self.jenkins_master_service = self.jenkins_master_service_main.service self.jenkins_master_task = self.jenkins_master_service.task_definition if config['DEFAULT']['ec2_enabled'] == "yes": self.jenkins_load_balancer = elb.ApplicationLoadBalancer( self, "JenkinsMasterELB", vpc=self.vpc.vpc, internet_facing=True, ) self.listener = self.jenkins_load_balancer.add_listener("Listener", port=80) self.jenkins_master_task = ecs.Ec2TaskDefinition( self, "JenkinsMasterTaskDef", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[ ecs.Volume(name="efs_mount", host=ecs.Host(source_path='/mnt/efs')) ], ) self.jenkins_master_task.add_container( "JenkinsMasterContainer", image=ecs.ContainerImage.from_ecr_repository( self.container_image.repository), cpu=int(config['DEFAULT']['ec2_cpu']), memory_limit_mib=int( config['DEFAULT']['ec2_memory_limit_mib']), environment={ # https://github.com/jenkinsci/docker/blob/master/README.md#passing-jvm-parameters 'JAVA_OPTS': '-Djenkins.install.runSetupWizard=false', 'CASC_JENKINS_CONFIG': '/config-as-code.yaml', 'network_stack': self.vpc.stack_name, 'cluster_stack': self.cluster.stack_name, 'worker_stack': self.worker.stack_name, 'cluster_arn': self.cluster.cluster.cluster_arn, 'aws_region': config['DEFAULT']['region'], 'jenkins_url': config['DEFAULT']['jenkins_url'], 'subnet_ids': ",".join( [x.subnet_id for x in self.vpc.vpc.private_subnets]), 'security_group_ids': self.worker.worker_security_group.security_group_id, 'execution_role_arn': self.worker.worker_execution_role.role_arn, 'task_role_arn': self.worker.worker_task_role.role_arn, 'worker_log_group': self.worker.worker_logs_group.log_group_name, 'worker_log_stream_prefix': self.worker.worker_log_stream.log_stream_name }, logging=ecs.LogDriver.aws_logs( stream_prefix="JenkinsMaster", log_retention=logs.RetentionDays.ONE_WEEK), ) self.jenkins_master_task.default_container.add_mount_points( ecs.MountPoint(container_path='/var/jenkins_home', source_volume="efs_mount", read_only=False)) self.jenkins_master_task.default_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080)) self.jenkins_master_service = ecs.Ec2Service( self, "EC2MasterService", task_definition=self.jenkins_master_task, cloud_map_options=ecs.CloudMapOptions( name="master", dns_record_type=sd.DnsRecordType('A')), desired_count=1, min_healthy_percent=0, max_healthy_percent=100, enable_ecs_managed_tags=True, cluster=self.cluster.cluster, ) self.target_group = self.listener.add_targets( "JenkinsMasterTarget", port=80, targets=[ self.jenkins_master_service.load_balancer_target( container_name=self.jenkins_master_task. default_container.container_name, container_port=8080, ) ], deregistration_delay=core.Duration.seconds(10)) # Opening port 5000 for master <--> worker communications self.jenkins_master_service.task_definition.default_container.add_port_mappings( ecs.PortMapping(container_port=50000, host_port=50000)) # Enable connection between Master and Worker self.jenkins_master_service.connections.allow_from( other=self.worker.worker_security_group, port_range=ec2.Port(protocol=ec2.Protocol.TCP, string_representation='Master to Worker 50000', from_port=50000, to_port=50000)) # Enable connection between Master and Worker on 8080 self.jenkins_master_service.connections.allow_from( other=self.worker.worker_security_group, port_range=ec2.Port(protocol=ec2.Protocol.TCP, string_representation='Master to Worker 8080', from_port=8080, to_port=8080)) # IAM Statements to allow jenkins ecs plugin to talk to ECS as well as the Jenkins cluster # self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement( actions=[ "ecs:RegisterTaskDefinition", "ecs:DeregisterTaskDefinition", "ecs:ListClusters", "ecs:DescribeContainerInstances", "ecs:ListTaskDefinitions", "ecs:DescribeTaskDefinition", "ecs:DescribeTasks" ], resources=["*"], )) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement(actions=["ecs:ListContainerInstances"], resources=[self.cluster.cluster.cluster_arn])) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement( actions=["ecs:RunTask"], resources=[ "arn:aws:ecs:{0}:{1}:task-definition/fargate-workers*". format( self.region, self.account, ) ])) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement(actions=["ecs:StopTask"], resources=[ "arn:aws:ecs:{0}:{1}:task/*".format( self.region, self.account) ], conditions={ "ForAnyValue:ArnEquals": { "ecs:cluster": self.cluster.cluster.cluster_arn } })) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement(actions=["iam:PassRole"], resources=[ self.worker.worker_task_role.role_arn, self.worker.worker_execution_role.role_arn ])) # END OF JENKINS ECS PLUGIN IAM POLICIES # self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement( actions=["*"], resources=[self.worker.worker_logs_group.log_group_arn]))
def __init__(self, scope, id, vpc, **kwarg) -> None: super().__init__(scope, id, **kwarg) # cluster creation cluster = aws_ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc) # service discovery creation sd_namespace = cluster.add_default_cloud_map_namespace( name="svc.test.local", vpc=vpc) aws_servicediscovery.Service(self, "svc.test.local", namespace=sd_namespace, load_balancer=True) # ECS role creation ecs_principle = aws_iam.ServicePrincipal('ecs-tasks.amazonaws.com') execution_role = aws_iam.Role(self, 'execution-role', assumed_by=ecs_principle) execution_role.add_managed_policy( policy=aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AWSCodeDeployRoleForECS")) execution_role.add_managed_policy( policy=aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AmazonEC2ContainerRegistryReadOnly")) task_role = aws_iam.Role(self, 'task-role', assumed_by=ecs_principle) task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AWSAppMeshEnvoyAccess")) task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="CloudWatchFullAccess")) task_role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name="AWSXRayDaemonWriteAccess")) # envoy ecr object envoy_ecr = aws_ecr.Repository.from_repository_attributes( self, 'aws-envoy', repository_arn=core.Stack.of(self).format_arn( service="ecr", resource="aws-appmesh-envoy", account="840364872350"), repository_name="aws-appmesh-envoy") # colorteller image builds gateway_image = aws_ecs.ContainerImage.from_asset("./src/gateway") colorteller_image = aws_ecs.ContainerImage.from_asset( "./src/colorteller") # logging setup log_group = aws_logs.LogGroup(self, "/ecs/colorteller", retention=aws_logs.RetentionDays.ONE_DAY) gateway_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="gateway") black_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="black") blue_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="blue") red_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="red") white_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="white") tcpecho_ecs_logs = aws_ecs.LogDriver.aws_logs(log_group=log_group, stream_prefix="tcpecho") # Mesh properties setup mesh_properties = aws_ecs.AppMeshProxyConfigurationProps( app_ports=[9080], proxy_egress_port=15001, proxy_ingress_port=15000, egress_ignored_i_ps=["169.254.170.2", "169.254.169.254"], ignored_uid=1337) # envoy ulimit defaults envoy_ulimit = aws_ecs.Ulimit(hard_limit=15000, name=aws_ecs.UlimitName.NOFILE, soft_limit=15000) # fargate task def - requires envoy proxy container, gateway app and x-ray gateway_task_def = aws_ecs.FargateTaskDefinition( self, "gateway_task", cpu=256, memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) gateway_task_def.add_container("gateway", logging=gateway_ecs_logs, environment={ "SERVER_PORT": "9080", "STAGE": "v1.1", "COLOR_TELLER_ENDPOINT": "colorteller.svc.test.local:9080", "TCP_ECHO_ENDPOINT": "tcpecho.svc.test.local:2701" }, image=gateway_image).add_port_mappings( aws_ecs.PortMapping( container_port=9080, protocol=aws_ecs.Protocol.TCP)) gateway_task_def.add_container( "xray", logging=gateway_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) gateway_envoy_container = gateway_task_def.add_container( "envoy", logging=gateway_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "debug", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/gateway", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) gateway_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) gateway_envoy_container.add_ulimits(envoy_ulimit) # black task def - requires color app, envoy and x-ray containers black_task_def = aws_ecs.FargateTaskDefinition( self, "black-task", cpu=256, family="black", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) black_envoy_container = black_task_def.add_container( "envoy", logging=black_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/black", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) black_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) black_envoy_container.add_ulimits(envoy_ulimit) black_app_container = black_task_def.add_container( "black", logging=black_ecs_logs, environment={ "COLOR": "black", "SERVER_PORT": "9080", "STAGE": "v1.1" }, image=colorteller_image) black_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) black_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=black_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) black_task_def.add_container( "xray", logging=black_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # blue task def (same as black) blue_task_def = aws_ecs.FargateTaskDefinition( self, "blue-task", cpu=256, family="blue", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) blue_envoy_container = blue_task_def.add_container( "envoy", logging=blue_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/blue", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) blue_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) blue_envoy_container.add_ulimits(envoy_ulimit) blue_app_container = blue_task_def.add_container( "blue", logging=blue_ecs_logs, environment={ "COLOR": "black", "SERVER_PORT": "9080", "STAGE": "v1.1" }, image=colorteller_image) blue_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) blue_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=blue_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) blue_task_def.add_container( "xray", logging=blue_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # red task def (same as black) red_task_def = aws_ecs.FargateTaskDefinition( self, "red-task", cpu=256, family="red-task", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) red_envoy_container = red_task_def.add_container( "envoy", logging=red_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/red", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) red_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) red_envoy_container.add_ulimits(envoy_ulimit) red_app_container = red_task_def.add_container("red", logging=red_ecs_logs, environment={ "COLOR": "red", "SERVER_PORT": "9080", "STAGE": "v1.2" }, image=colorteller_image) red_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) red_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=red_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) red_task_def.add_container( "xray", logging=red_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # white task def (same as black) - colorteller.svc.test.local points to this service (because containers need something to resolve to or they fail) white_task_def = aws_ecs.FargateTaskDefinition( self, "white-task", cpu=256, family="white", memory_limit_mib=512, execution_role=execution_role, task_role=task_role, proxy_configuration=aws_ecs.AppMeshProxyConfiguration( container_name="envoy", properties=mesh_properties)) white_envoy_container = white_task_def.add_container( "envoy", logging=white_ecs_logs, environment={ "ENVOY_LOG_LEVEL": "info", "ENABLE_ENVOY_XRAY_TRACING": "1", "ENABLE_ENVOY_STATS_TAGS": "1", "APPMESH_VIRTUAL_NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/white", "APPMESH_XDS_ENDPOINT": "" }, image=aws_ecs.ContainerImage.from_ecr_repository( repository=envoy_ecr, tag="v1.12.1.1-prod"), essential=True, user="******", health_check=aws_ecs.HealthCheck(command=[ "CMD-SHELL", "curl -s http://localhost:9901/ready |grep -q LIVE" ])) white_envoy_container.add_port_mappings( aws_ecs.PortMapping(container_port=9901, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15000, protocol=aws_ecs.Protocol.TCP), aws_ecs.PortMapping(container_port=15001, protocol=aws_ecs.Protocol.TCP), ) white_envoy_container.add_ulimits(envoy_ulimit) white_app_container = white_task_def.add_container( "white", logging=white_ecs_logs, environment={ "COLOR": "white", "SERVER_PORT": "9080", "STAGE": "v1.1" }, image=colorteller_image) white_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=9080, protocol=aws_ecs.Protocol.TCP)) white_app_container.add_container_dependencies( aws_ecs.ContainerDependency( container=white_envoy_container, condition=aws_ecs.ContainerDependencyCondition.HEALTHY)) white_task_def.add_container( "xray", logging=white_ecs_logs, image=aws_ecs.ContainerImage.from_registry( "amazon/aws-xray-daemon")).add_port_mappings( aws_ecs.PortMapping(container_port=2000, protocol=aws_ecs.Protocol.UDP)) # tcpecho service (external docker image) tcpecho_task_def = aws_ecs.FargateTaskDefinition( self, 'tcpecho-tasks', cpu=256, family="tcpecho", memory_limit_mib=512, execution_role=execution_role, task_role=task_role) tcpecho_task_def.add_container( "tcpecho", logging=tcpecho_ecs_logs, environment={ "TCP_PORT": "2701", "NODE_NAME": "mesh/ColorTellerAppMesh/virtualNode/echo" }, image=aws_ecs.ContainerImage.from_registry("cjimti/go-echo"), essential=True, ).add_port_mappings( aws_ecs.PortMapping(container_port=2701, protocol=aws_ecs.Protocol.TCP)) # adds task defs to fargate services - adds security group access to local vpc cidr block # all the services are treated the same way gateway_fargate_service = aws_ecs.FargateService( self, "gateway", cluster=cluster, task_definition=gateway_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="gateway")) gateway_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") black_colorteller_fargate_service = aws_ecs.FargateService( self, "black", cluster=cluster, task_definition=black_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="black")) black_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") blue_colorteller_fargate_service = aws_ecs.FargateService( self, "blue", cluster=cluster, task_definition=blue_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="blue")) blue_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") red_colorteller_fargate_service = aws_ecs.FargateService( self, "red", cluster=cluster, task_definition=red_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="red")) red_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") white_colorteller_fargate_service = aws_ecs.FargateService( self, "white", cluster=cluster, task_definition=white_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="colorteller")) white_colorteller_fargate_service.connections.security_groups[ 0].add_ingress_rule(peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(9080), description="Allow http inbound from VPC") echo_fargate_service = aws_ecs.FargateService( self, "tcpecho", cluster=cluster, task_definition=tcpecho_task_def, desired_count=2, cloud_map_options=aws_ecs.CloudMapOptions( cloud_map_namespace=sd_namespace, name="tcpecho")) echo_fargate_service.connections.security_groups[0].add_ingress_rule( peer=aws_ec2.Peer.ipv4(vpc.vpc_cidr_block), connection=aws_ec2.Port.tcp(2701), description="Allow http inbound from VPC") # adds autoscaling policies to all services for service in [ black_colorteller_fargate_service, blue_colorteller_fargate_service, red_colorteller_fargate_service, white_colorteller_fargate_service, gateway_fargate_service, echo_fargate_service ]: try: scaling = service.service.auto_scale_task_count(max_capacity=2) except AttributeError: scaling = service.auto_scale_task_count(max_capacity=2) scaling.scale_on_cpu_utilization( "CpuScaling", target_utilization_percent=50, scale_in_cooldown=core.Duration.seconds(60), scale_out_cooldown=core.Duration.seconds(60), ) # configure loadbalancer to listen on port 80 and add targets to gateway and echo apps load_balancer = aws_elasticloadbalancingv2.ApplicationLoadBalancer( self, "lb", vpc=vpc, internet_facing=True) listener = load_balancer.add_listener("PublicListener", port=80, open=True) health_check = aws_elasticloadbalancingv2.HealthCheck( interval=core.Duration.seconds(60), path="/ping", port="9080", timeout=core.Duration.seconds(5)) # attach ALB to ECS service listener.add_targets( "gateway", port=80, targets=[gateway_fargate_service, echo_fargate_service], health_check=health_check, ) # outputs of ALB and cluster core.CfnOutput(self, "LoadBalancerDNS", value=load_balancer.load_balancer_dns_name) core.CfnOutput(self, "ClusterName", value=cluster.cluster_name)
def __init__(self, scope: core.Construct, id: str, props: props_type, **kwargs) -> None: super().__init__(scope, id, **kwargs) ns = SimpleNamespace(**props) bucket_name = os.environ.get("BUCKET_NAME") fernet_key_secret = sm.Secret.from_secret_arn( self, "fernetSecret", os.environ.get("FERNET_SECRET_ARN")) webserver_ns = sd.PrivateDnsNamespace( self, "webserver-dns-namespace", vpc=ns.vpc.instance, name="airflow", description="Private DNS for Airflow webserver", ) # Webserver webserver_task = ecs.FargateTaskDefinition( self, "webserver-cdk", family="webserver-cdk", cpu=512, memory_limit_mib=1024, task_role=ns.airflow_cluster.airflow_task_role, execution_role=ns.airflow_cluster.task_execution_role, ) webserver_container = webserver_task.add_container( "webserver-cdk-container", image=ecs.ContainerImage.from_ecr_repository( ns.ecr.airflow_webserver_repo, os.environ.get("IMAGE_TAG", "latest"), ), logging=ecs.AwsLogDriver( stream_prefix="ecs", log_group=ns.airflow_cluster.webserver_log_group), environment={ "AIRFLOW_DATABASE_NAME": ns.rds.db_name, "AIRFLOW_DATABASE_PORT_NUMBER": "5432", "AIRFLOW_DATABASE_HOST": ns.rds.instance.db_instance_endpoint_address, "AIRFLOW_EXECUTOR": "CeleryExecutor", "AIRFLOW_LOAD_EXAMPLES": "no", "AIRFLOW__SCHEDULER__DAG_DIR_LIST_INTERVAL": "30", "BUCKET_NAME": bucket_name, }, secrets={ "AIRFLOW_DATABASE_USERNAME": ecs.Secret.from_secrets_manager(ns.rds.rds_secret, field="username"), "AIRFLOW_DATABASE_PASSWORD": ecs.Secret.from_secrets_manager(ns.rds.rds_secret, field="password"), "AIRFLOW_FERNET_KEY": ecs.Secret.from_secrets_manager(fernet_key_secret), }, ) ws_port_mapping = ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP) webserver_container.add_port_mappings(ws_port_mapping) # Webserver service webserver_service = ecs.FargateService( self, "webserverService", service_name="webserver_cdk", cluster=ns.airflow_cluster.instance, task_definition=webserver_task, desired_count=1, security_group=ns.vpc.airflow_sg, assign_public_ip=False, cloud_map_options=ecs.CloudMapOptions( cloud_map_namespace=webserver_ns, name="webserver", dns_record_type=sd.DnsRecordType.A, dns_ttl=core.Duration.seconds(30), ), ) # Scheduler scheduler_task = ecs.FargateTaskDefinition( self, "scheduler-cdk", family="scheduler-cdk", cpu=512, memory_limit_mib=2048, task_role=ns.airflow_cluster.airflow_task_role, execution_role=ns.airflow_cluster.task_execution_role, ) scheduler_task.add_container( "scheduler-cdk-container", image=ecs.ContainerImage.from_ecr_repository( ns.ecr.airflow_scheduler_repo, os.environ.get("IMAGE_TAG", "latest"), ), logging=ecs.AwsLogDriver( stream_prefix="ecs", log_group=ns.airflow_cluster.scheduler_log_group), environment={ "AIRFLOW_DATABASE_NAME": ns.rds.db_name, "AIRFLOW_DATABASE_PORT_NUMBER": "5432", "AIRFLOW_DATABASE_HOST": ns.rds.instance.db_instance_endpoint_address, "AIRFLOW_EXECUTOR": "CeleryExecutor", "AIRFLOW_WEBSERVER_HOST": "webserver.airflow", "AIRFLOW_LOAD_EXAMPLES": "no", "AIRFLOW__SCHEDULER__DAG_DIR_LIST_INTERVAL": "30", "REDIS_HOST": ns.redis.instance.attr_redis_endpoint_address, "BUCKET_NAME": bucket_name, }, secrets={ "AIRFLOW_DATABASE_USERNAME": ecs.Secret.from_secrets_manager(ns.rds.rds_secret, field="username"), "AIRFLOW_DATABASE_PASSWORD": ecs.Secret.from_secrets_manager(ns.rds.rds_secret, field="password"), "AIRFLOW_FERNET_KEY": ecs.Secret.from_secrets_manager(fernet_key_secret), }, ) # Scheduler service ecs.FargateService( self, "schedulerService", service_name="scheduler_cdk", cluster=ns.airflow_cluster.instance, task_definition=scheduler_task, desired_count=1, security_group=ns.vpc.airflow_sg, assign_public_ip=False, ) # Worker worker_task = ecs.FargateTaskDefinition( self, "worker-cdk", family="worker-cdk", cpu=1024, memory_limit_mib=3072, task_role=ns.airflow_cluster.airflow_task_role, execution_role=ns.airflow_cluster.task_execution_role, ) worker_container = worker_task.add_container( "worker-cdk-container", image=ecs.ContainerImage.from_ecr_repository( ns.ecr.airflow_worker_repo, os.environ.get("IMAGE_TAG", "latest"), ), logging=ecs.AwsLogDriver( stream_prefix="ecs", log_group=ns.airflow_cluster.worker_log_group), environment={ "AIRFLOW_DATABASE_NAME": ns.rds.db_name, "AIRFLOW_DATABASE_PORT_NUMBER": "5432", "AIRFLOW_DATABASE_HOST": ns.rds.instance.db_instance_endpoint_address, "AIRFLOW_EXECUTOR": "CeleryExecutor", "AIRFLOW_WEBSERVER_HOST": "webserver.airflow", "AIRFLOW__SCHEDULER__DAG_DIR_LIST_INTERVAL": "30", "AIRFLOW_LOAD_EXAMPLES": "no", "REDIS_HOST": ns.redis.instance.attr_redis_endpoint_address, "BUCKET_NAME": bucket_name, }, secrets={ "AIRFLOW_DATABASE_USERNAME": ecs.Secret.from_secrets_manager(ns.rds.rds_secret, field="username"), "AIRFLOW_DATABASE_PASSWORD": ecs.Secret.from_secrets_manager(ns.rds.rds_secret, field="password"), "AIRFLOW_FERNET_KEY": ecs.Secret.from_secrets_manager(fernet_key_secret), }, ) worker_port_mapping = ecs.PortMapping(container_port=8793, host_port=8793, protocol=ecs.Protocol.TCP) worker_container.add_port_mappings(worker_port_mapping) # Worker service ecs.FargateService( self, "workerService", service_name="worker_cdk", cluster=ns.airflow_cluster.instance, task_definition=worker_task, desired_count=1, security_group=ns.vpc.airflow_sg, assign_public_ip=False, ) # ALB lb = elbv2.ApplicationLoadBalancer( self, "LB", vpc=ns.vpc.instance, internet_facing=True, security_group=ns.vpc.alb_sg, ) listener = lb.add_listener("airflow-webserver-cdk-listener", port=80, open=True) webserver_hc = elbv2.HealthCheck( interval=core.Duration.seconds(60), path="/health", timeout=core.Duration.seconds(5), ) # Attach ALB to ECS Service listener.add_targets( "airflow-webserver-cdk-default", port=80, targets=[webserver_service], health_check=webserver_hc, )
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) table_name = self.node.try_get_context('table_name') vpc = props['vpc'] endpoint_sg = props['endpoint_sg'] cluster = props['cluster'] frontend_service = props['frontend_service'] # タスク実行ロールの作成 task_execution_role_policy = iam.ManagedPolicy.from_aws_managed_policy_name( 'service-role/AmazonECSTaskExecutionRolePolicy') task_execution_role = iam.Role( self, 'TaskExecutionRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), managed_policies=[task_execution_role_policy]) # タスクロールの作成 task_role_policy = iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonDynamoDBFullAccess') task_role = iam.Role( self, 'TaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), managed_policies=[task_role_policy]) # リポジトリを指定する repository = ecr.Repository.from_repository_name( self, 'Backend', 'backend') # タスク定義の作成 # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.README.html#task-definitions # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/FargateTaskDefinition.html task_definition = ecs.FargateTaskDefinition( self, 'TaskDef', memory_limit_mib=512, cpu=256, execution_role=task_execution_role, task_role=task_role) container = task_definition.add_container( 'Container', image=ecs.ContainerImage.from_ecr_repository(repository=repository, tag='latest'), logging=ecs.AwsLogDriver(stream_prefix='/ecs/'), environment={'DYNAMODB_TABLE_NAME': table_name}) container.add_port_mappings(ecs.PortMapping(container_port=5000)) # Backendサービス用セキュリティーグループ backend_service_sg = ec2.SecurityGroup(self, 'BackendServiceSecurityGroup', vpc=vpc) # サービスの作成 # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.README.html#service # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs.html backend_service = ecs.FargateService( self, 'BackendService', cluster=cluster, task_definition=task_definition, desired_count=2, min_healthy_percent=50, max_healthy_percent=200, security_group=backend_service_sg, cloud_map_options=ecs.CloudMapOptions(name='backend')) # Frontendサービスからのトラフィックを許可 backend_service.connections.allow_from(frontend_service, ec2.Port.all_traffic()) # 自身のセキュリティーグループからのトラフィックを許可 backend_service.connections.allow_internally(ec2.Port.all_traffic()) # エンドポイントのセキュリティーグループへのアクセスを許可 backend_service.connections.allow_to(endpoint_sg, ec2.Port.all_traffic()) self.output_props = props.copy()
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = ec2.Vpc(self, "SampleVPC", max_azs=2) # default is all AZs in region cluster = ecs.Cluster(self, "ServiceCluster", vpc=vpc) cluster.add_default_cloud_map_namespace(name="service.local") # two docker containers # two ECS services/tasks frontend_asset = DockerImageAsset(self, "frontend", directory="./frontend", file="Dockerfile") frontend_task = ecs.FargateTaskDefinition( self, "frontend-task", cpu=512, memory_limit_mib=2048, ) frontend_task.add_container( "frontend", image=ecs.ContainerImage.from_docker_image_asset(frontend_asset), essential=True, environment={ "LOCALDOMAIN": "service.local" }, logging=ecs.LogDrivers.aws_logs( stream_prefix="FrontendContainer", log_retention=logs.RetentionDays.ONE_WEEK, ), ).add_port_mappings( ecs.PortMapping(container_port=5000, host_port=5000)) backend_task = ecs.FargateTaskDefinition( self, "backend-task", cpu=512, memory_limit_mib=2048, ) backend_task.add_container( "backend", image=ecs.ContainerImage.from_registry("redis:alpine"), essential=True, logging=ecs.LogDrivers.aws_logs( stream_prefix="BackendContainer", log_retention=logs.RetentionDays.ONE_WEEK, ), ).add_port_mappings( ecs.PortMapping(container_port=6379, host_port=6379)) frontend_service = ecs_patterns.NetworkLoadBalancedFargateService( self, id="frontend-service", service_name="frontend", cluster=cluster, # Required cloud_map_options=ecs.CloudMapOptions(name="frontend"), cpu=512, # Default is 256 desired_count=2, # Default is 1 task_definition=frontend_task, memory_limit_mib=2048, # Default is 512 listener_port=80, public_load_balancer=True, ) frontend_service.service.connections.allow_from_any_ipv4( ec2.Port.tcp(5000), "flask inbound") backend_service = ecs_patterns.NetworkLoadBalancedFargateService( self, id="backend-service", service_name="backend", cluster=cluster, # Required cloud_map_options=ecs.CloudMapOptions(name="backend"), cpu=512, # Default is 256 desired_count=2, # Default is 1 task_definition=backend_task, memory_limit_mib=2048, # Default is 512 listener_port=6379, public_load_balancer=False, ) backend_service.service.connections.allow_from( frontend_service.service, ec2.Port.tcp(6379))