def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #vpc ecs_vpc = aws_ec2.Vpc(self, "EcsVpc", max_azs=2, nat_gateways=1) #ecs cluster ecs_cluster = aws_ecs.Cluster(self, "EcsCluster", vpc=ecs_vpc) #ecs cluster capacity ecs_cluster.add_capacity( "ecsClusterASGgroup", instance_type=aws_ec2.InstanceType("t2.micro")) #ecs attached to load balancer ecs_elb_service = aws_ecs_patterns.ApplicationLoadBalancedEc2Service( self, "EcsElb", cluster=ecs_cluster, memory_reservation_mib=512, task_image_options={ "image": aws_ecs.ContainerImage.from_registry("httpd"), "environment": { "ENVIRONMENT": "PROD" } }) #ecs elb url output ecs_output = core.CfnOutput( self, "ecsOutput", value=f"{ecs_elb_service.load_balancer.load_balancer_dns_name}", description="elb url")
def __init__(self, scope: core.Stack, id: str, **kwargs): super().__init__(scope, id, **kwargs) self.base_platform = BasePlatform(self, self.stack_name) self.task_image = aws_ecs_patterns.ApplicationLoadBalancedTaskImageOptions( image=aws_ecs.ContainerImage.from_registry( "adam9098/ecsdemo-capacityproviders:latest"), container_port=5000, environment={'AWS_DEFAULT_REGION': getenv('AWS_DEFAULT_REGION')}) self.load_balanced_service = aws_ecs_patterns.ApplicationLoadBalancedEc2Service( self, "EC2CapacityProviderService", service_name='ecsdemo-capacityproviders-ec2', cluster=self.base_platform.ecs_cluster, cpu=256, memory_limit_mib=512, desired_count=3, #desired_count=12, public_load_balancer=True, task_image_options=self.task_image, ) # This should work, but the default child is not the service cfn, it's a list of cfn service and sec group #self.cfn_resource = self.load_balanced_service.service.node.default_child self.cfn_resource = self.load_balanced_service.service.node.children[0] self.cfn_resource.add_deletion_override("Properties.LaunchType") self.load_balanced_service.task_definition.add_to_task_role_policy( aws_iam.PolicyStatement( actions=['ecs:ListTasks', 'ecs:DescribeTasks'], resources=['*']))
def __init__(self, scope: core.Construct, construct_id: str, cluster, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) service = ecsPatterns.ApplicationLoadBalancedEc2Service( self, "nginx", cluster=cluster, memory_limit_mib=1024, cpu=512, task_image_options=ecsPatterns. ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_registry("nginx:stable"), )) target = service.service.auto_scale_task_count(min_capacity=1, max_capacity=10) target.scale_on_request_count("ReqCountScalePolicy", requests_per_target=50, target_group=service.target_group) target.scale_on_schedule("TestSchedulePolicy", schedule=applicationautoscaling.Schedule.cron( hour="8", minute="0"), min_capacity=3)
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Docker build asset = ecs.ContainerImage.from_asset( path.join(path.dirname(__file__), 'myWebApp')) # ECS cluster/resources # NOTE 't4g.micro' is a Graviton2 EC2 type # https://aws.amazon.com/ec2/graviton/ # Free trial until June 30th 2021 # NOTE Bottlerocket is AWS's new container-specific Linux # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-bottlerocket.html # https://github.com/bottlerocket-os/bottlerocket/blob/develop/QUICKSTART-ECS.md # but doesn't seem to work yet with CDK constructs (https://github.com/aws/aws-cdk/issues/9945) cluster = ecs.Cluster( self, 'myWebApp-cluster', capacity=dict( instance_type=ec2.InstanceType('t4g.micro'), machine_image=ecs.EcsOptimizedImage.amazon_linux2( ecs.AmiHardwareType.ARM), # needed for Graviton2, works desired_capacity=1, task_drain_time=cdk.Duration.seconds(0)), container_insights=True) hosted_zone = route53.HostedZone.from_hosted_zone_attributes( self, 'myTestZone', hosted_zone_id='Z10366642MKE2T8K2STDR', zone_name='livedata-dev-work.com') # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs_patterns/ApplicationLoadBalancedEc2Service.html load_balanced_service = ecs_patterns.ApplicationLoadBalancedEc2Service( self, 'myWebApp-service', # https://docs.aws.amazon.com/AmazonECS/latest/developerguide/memory-management.html memory_limit_mib=512, # TODO cluster=cluster, desired_count=1, max_healthy_percent=100, # https://github.com/aws/aws-cdk/issues/14107 min_healthy_percent=0, # TODO daemon question service_name='myWebApp-service', task_image_options={'image': asset}, protocol=ApplicationProtocol.HTTPS, redirect_http=True, domain_name='alb.livedata-dev-work.com', domain_zone=hosted_zone, public_load_balancer=True) load_balanced_service.target_group.set_attribute( 'deregistration_delay.timeout_seconds', '0')
def __init__(self, scope: core.Construct, id: str, cluster_configuration, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.cluster_configuration = cluster_configuration cluster_vpc = aws_ec2.Vpc( self, "ClusterVPC", cidr="10.0.0.0/16", nat_gateways=1, ) core.Tags.of(cluster_vpc).add( "Name", cluster_configuration['cluster_name'] + "VPC") cluster = aws_ecs.Cluster( self, "ECSCluster", cluster_name=cluster_configuration['cluster_name'], vpc=cluster_vpc) if self.cluster_configuration['fargate_enabled'] is True: aws_ecs_patterns.ApplicationLoadBalancedFargateService( self, "ECSFargateService", service_name=cluster_configuration['cluster_name'] + "Service", cluster=cluster, # Required cpu=cluster_configuration["container_cpu"], # Default is 256 desired_count=cluster_configuration["container_desired_count"], task_image_options=aws_ecs_patterns. ApplicationLoadBalancedTaskImageOptions( image=aws_ecs.ContainerImage.from_registry( cluster_configuration["container_image"]), container_port=cluster_configuration["container_port"]), memory_limit_mib=cluster_configuration[ "container_mem"], # Default is 512 public_load_balancer=True) # Default is False else: aws_ecs_patterns.ApplicationLoadBalancedEc2Service( self, "ECSFargateService", service_name=cluster_configuration['cluster_name'] + "Service", cluster=cluster, # Required cpu=cluster_configuration["container_cpu"], # Default is 256 desired_count=cluster_configuration["container_desired_count"], task_image_options=aws_ecs_patterns. ApplicationLoadBalancedTaskImageOptions( image=aws_ecs.ContainerImage.from_registry( cluster_configuration["container_image"]), container_port=cluster_configuration["container_port"]), memory_limit_mib=cluster_configuration[ "container_mem"], # Default is 512 public_load_balancer=True) # Default is False
def __init__(self, scope: core.Stack, id: str, **kwargs): super().__init__(scope, id, **kwargs) self.base_platform = BasePlatform(self, self.stack_name) self.task_image = aws_ecs_patterns.ApplicationLoadBalancedTaskImageOptions( image=aws_ecs.ContainerImage.from_registry("adam9098/ecsdemo-capacityproviders:latest"), container_port=5000, environment={ 'AWS_DEFAULT_REGION': getenv('AWS_DEFAULT_REGION') } ) self.load_balanced_service = aws_ecs_patterns.ApplicationLoadBalancedEc2Service( self, "EC2CapacityProviderService", service_name='ecsdemo-capacityproviders-ec2', cluster=self.base_platform.ecs_cluster, cpu=256, memory_limit_mib=512, desired_count=1, #desired_count=10, public_load_balancer=True, task_image_options=self.task_image, ) # Update Target group settings for Spot instances to adjust deregistration delay to less than 120 sec. # Adjust healthy threshold to 2 to reduce the time for a new task to be healthy in 1 minute self.cfn_target_group = self.load_balanced_service.node.find_child('LB' ).node.find_child('PublicListener' ).node.find_child('ECSGroup' ).node.default_child self.cfn_target_group.target_group_attributes = [{ "key" : "deregistration_delay.timeout_seconds", "value": "90" }] self.cfn_target_group.healthy_threshold_count = 2 # This should work, but the default child is not the service cfn, it's a list of cfn service and sec group #self.cfn_resource = self.load_balanced_service.service.node.default_child self.cfn_resource = self.load_balanced_service.service.node.children[0] self.cfn_resource.add_deletion_override("Properties.LaunchType") self.load_balanced_service.task_definition.add_to_task_role_policy( aws_iam.PolicyStatement( actions=[ 'ecs:ListTasks', 'ecs:DescribeTasks' ], resources=['*'] ) )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = ec2.Vpc(self, "CDK-LB", max_azs=2) cluster = ecs.Cluster(self, "CDK-Cluster", vpc=vpc) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t3.nano")) task_definition = ecs.Ec2TaskDefinition( self, "nginx-awsvpc", network_mode=ecs.NetworkMode.BRIDGE, ) container = task_definition.add_container( "nginx", image=ecs.ContainerImage.from_registry("nginx:latest"), cpu=100, memory_limit_mib=256, essential=True) port_mapping = ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP) container.add_port_mappings(port_mapping) ecs_service = ecs_patterns.ApplicationLoadBalancedEc2Service( self, "EC2-Service", cluster=cluster, memory_limit_mib=512, task_definition=task_definition, listener_port=80) core.CfnOutput(self, "LoadBalancerDNS", value=ecs_service.load_balancer.load_balancer_dns_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Add your stack resources below): # Create VPC vpc = _ec2.Vpc(self, "microServiceVpc", max_azs=2, nat_gateways=1) # Create ECS Cluster micro_service_cluster = _ecs.Cluster(self, "webServiceCluster", vpc=vpc) # Define ECS Cluster Capacity micro_service_cluster.add_capacity( "microServiceAutoScalingGroup", instance_type=_ec2.InstanceType("t2.micro")) # Deploy Container in the micro Service & Attach a LoadBalancer load_balanced_web_service = _ecs_patterns.ApplicationLoadBalancedEc2Service( self, "webService", cluster=micro_service_cluster, memory_reservation_mib=512, # Soft Limit task_image_options={ "image": _ecs.ContainerImage.from_registry("mystique/web-server"), "environment": { "ENVIRONEMNT": "PROD" } }) # Output Web Service Url output_1 = core.CfnOutput( self, "webServiceUrl", value= f"{load_balanced_web_service.load_balancer.load_balancer_dns_name}", description="Acces the web service url from your browser")
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) vpc = ec2.Vpc(self, "PirateVpc", max_azs=1) cluster = ecs.Cluster(self, "PirateCluster", container_insights=True, vpc=vpc) cluster.add_capacity( 'Shipyard', block_devices=[ autoscaling.BlockDevice( device_name='/dev/xvda', volume=autoscaling.BlockDeviceVolume.ebs( volume_size=1000)) # 1 TB ], instance_type=ec2.InstanceType('m4.4xlarge')) task_definition = ecs.Ec2TaskDefinition( self, 'PirateTask', family='eth2', volumes=[ ecs.Volume( name='v', docker_volume_configuration=ecs.DockerVolumeConfiguration( driver='local', scope=ecs.Scope. SHARED, # So it persists between beyond the lifetime of the task autoprovision=True)) ]) container = task_definition.add_container( 'barbosa', image=ecs.ContainerImage.from_registry( 'sigp/lighthouse'), # TODO: configurable command=[ '--network pyrmont beacon', '--http', '--http-address 0.0.0.0' ], cpu=4 * 1024, # 4vCPU -> 8-30GB memory container_name='Pirate', logging=ecs.LogDrivers.aws_logs(stream_prefix='pirate'), memory_reservation_mib=16 * 1024, # 16GB port_mappings=[ ecs.PortMapping(container_port=9000, host_port=9000), # protocol=TCP ecs.PortMapping(container_port=5052, host_port=5052), # protocol=TCP ], secrets={ # TODO: populate these with our keys }, user='******') service = ecs_patterns.ApplicationLoadBalancedEc2Service( self, "Pirateship", # certificate=???, # TODO: set up the public domain cluster=cluster, desired_count=1, # domain_name='ethpirates.com', # domain_zone=???, # TODO: set up the public domain public_load_balancer=True, task_definition=task_definition)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here #vpc = ec2.Vpc.from_lookup(self, 'VPC', is_default=True) vpc = ec2.Vpc(self, "MyVpc", max_azs=2) rdsInst = rds.DatabaseInstance( self, 'SpringPetclinicDB', engine=rds.DatabaseInstanceEngine.MYSQL, instance_class=ec2.InstanceType('t2.medium'), master_username='******', database_name='petclinic', master_user_password=core.SecretValue('Welcome#123456'), vpc=vpc, deletion_protection=False, backup_retention=core.Duration.days(0), removal_policy=core.RemovalPolicy.DESTROY, #vpc_placement = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC) ) rdsInst.connections.allow_default_port_from_any_ipv4() cluster = ecs.Cluster(self, 'EcsCluster', vpc=vpc) asset = ecr_assets.DockerImageAsset( self, 'spring-petclinic', directory='./docker/', build_args={ 'JAR_FILE': 'spring-petclinic-2.1.0.BUILD-SNAPSHOT.jar' }) cluster.add_capacity( "DefaultAutoScalingGroup", instance_type=ec2.InstanceType('t2.large'), vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), min_capacity=2) ecs_service = ecs_patterns.ApplicationLoadBalancedEc2Service( self, "Ec2Service", cluster=cluster, memory_limit_mib=1024, service_name='spring-petclinic', desired_count=2, task_image_options={ "image": ecs.ContainerImage.from_docker_image_asset(asset), "container_name": 'spring-petclinic', "container_port": 8080, "environment": { 'SPRING_DATASOURCE_PASSWORD': '******', 'SPRING_DATASOURCE_USERNAME': '******', 'SPRING_PROFILES_ACTIVE': 'mysql', 'SPRING_DATASOURCE_URL': 'jdbc:mysql://' + rdsInst.db_instance_endpoint_address + '/petclinic?useUnicode=true' } })
def __init__(self, scope: core.Construct, id: str, cluster: ecs.Cluster, hosted_zone_id: str, zone_name: str, sub_domain: str, host_port: int, file_system_id: str, mad_secret_arn: str, mad_domain_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # check context values for v in [sub_domain, hosted_zone_id, zone_name]: if v == '': raise Exception( "Please provide required parameters sub_domain, hosted_zone_id, zone_name via context variables" ) # configure zone domain_zone = r53.PublicHostedZone.from_hosted_zone_attributes( self, "hosted_zone", hosted_zone_id=hosted_zone_id, zone_name=zone_name) domain_name = sub_domain + "." + zone_name # setup for pseudo parameters stack = core.Stack.of(self) ## Custom Resource - Task family = stack.stack_name + "_webserver" task_role = iam.Role( self, "TaskRole", role_name=family + '_task', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')) execution_role = iam.Role( self, "ExecutionRole", role_name=family + '_execution', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), inline_policies=[ iam.PolicyDocument(statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret" ], resources=[mad_secret_arn]), iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["fsx:DescribeFileSystems"], resources=["*"]) ]) ], managed_policies=[ iam.ManagedPolicy.from_managed_policy_arn( self, "AmazonECSTaskExecutionRolePolicy", 'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy' ) ]) # Custom Task Definition task_definition_arn = custom_fsx_task(self, host_port=host_port, family=family, file_system_id=file_system_id, mad_secret_arn=mad_secret_arn, mad_domain_name=mad_domain_name, task_role=task_role, execution_role=execution_role) # importing a task is broken https://github.com/aws/aws-cdk/issues/6240 # task_definition = ecs.Ec2TaskDefinition.from_ec2_task_definition_arn(self, "TaskDef", ec2_task_definition_arn=task_definition_arn) # Task Definition - Work Around Part 1 (Create a temp task, this won't actually be used) task_definition = ecs.Ec2TaskDefinition( self, "TaskDef", #network_mode=ecs.NetworkMode.DEFAULT # Parameter not available yet, escape hatch required ) # Edit Ec2TaskDefinition via an Escape Hatch to remove network_mode (required for windows) - This Task Definition is completely ignored for now... # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-taskdefinition.html#cfn-ecs-taskdefinition-networkmode cfn_task_definition = task_definition.node.default_child cfn_task_definition.add_property_deletion_override('NetworkMode') container = task_definition.add_container( "IISContainer", image=ecs.ContainerImage.from_registry('microsoft/iis'), memory_limit_mib=1028, cpu=512, entry_point=["powershell", "-Command"], command=["C:\\ServiceMonitor.exe w3svc"], ) container.add_port_mappings( ecs.PortMapping(protocol=ecs.Protocol.TCP, container_port=80, host_port=host_port)) # Task Definition - Work Around Part 1 End # ECS Service, ALB, Cert ApplicationLoadBalancedEc2Service = ecs_patterns.ApplicationLoadBalancedEc2Service( self, "iis-service", cluster=cluster, task_definition=task_definition, desired_count=2, domain_name=domain_name, domain_zone=domain_zone, protocol=elbv2.ApplicationProtocol.HTTPS, redirect_http=True) # Task Definition - Work Around Part 2 (Override the temp task we created earlier that won't actually be used) cfn_service = ApplicationLoadBalancedEc2Service.node.find_child( 'Service').node.find_child('Service') cfn_service.add_property_override('TaskDefinition', task_definition_arn)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) #create VPC self.vpc = ec2.Vpc( self, 'SonarVPC', max_azs=3 ) #DB Security Group with required ingress rules self.sg= ec2.SecurityGroup( self, "SonarQubeSG", vpc=self.vpc, allow_all_outbound=True, description="Aurora Security Group" ) self.sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5432), "SonarDBAurora") pgroup = rds.ParameterGroup.from_parameter_group_name( self, "SonarDBParamGroup", parameter_group_name='default.aurora-postgresql11' ) #create RDS Cluster self.db= rds.DatabaseCluster(self, 'SonarDBCluster', engine= rds.DatabaseClusterEngine.aurora_postgres(version=rds.AuroraPostgresEngineVersion.VER_11_6), default_database_name= 'sonarqube', parameter_group= pgroup, master_user=rds.Login(username= "******"), instance_props= rds.InstanceProps( instance_type= ec2.InstanceType.of( ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MEDIUM ), security_groups= [self.sg], vpc= self.vpc ) ) #create Cluster self.cluster= ecs.Cluster(self, "SonarCluster", capacity= ecs.AddCapacityOptions( instance_type= ec2.InstanceType('m5.large')), vpc= self.vpc ) asg= self.cluster.autoscaling_group user_data= asg.user_data user_data.add_commands('sysctl -qw vm.max_map_count=262144') user_data.add_commands('sysctl -w fs.file-max=65536') user_data.add_commands('ulimit -n 65536') user_data.add_commands('ulimit -u 4096') #Create iam Role for Task self.task_role = iam.Role( self, id= "SonarTaskRole", role_name= "SonarTaskRole", assumed_by= iam.ServicePrincipal(service= "ecs-tasks.amazonaws.com"), managed_policies= [ iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonECSTaskExecutionRolePolicy") ] ) #Grant permission for Task to read secret from SecretsManager self.db.secret.grant_read(self.task_role) url = 'jdbc:postgresql://{}/sonarqube'.format(self.db.cluster_endpoint.socket_address) #create task task= ecs_patterns.ApplicationLoadBalancedEc2Service(self, "SonarService", # if a cluster is provided use the same vpc cluster= self.cluster, cpu=512, desired_count=1, task_image_options= ecs_patterns.ApplicationLoadBalancedTaskImageOptions( image=ecs.ContainerImage.from_registry("sonarqube:8.2-community"), container_port=9000, secrets={ "sonar.jdbc.username": ecs.Secret.from_secrets_manager(self.db.secret, field="username"), "sonar.jdbc.password": ecs.Secret.from_secrets_manager(self.db.secret, field="password") }, environment={ 'sonar.jdbc.url': url }, task_role= self.task_role ), memory_limit_mib=2048, public_load_balancer=True ) container = task.task_definition.default_container container.add_ulimits( ecs.Ulimit( name=ecs.UlimitName.NOFILE, soft_limit=65536, hard_limit=65536 ) )
def __init__(self, scope: core.Construct, id: str, cluster_configuration, autoscaling_spec, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.cluster_configuration = cluster_configuration self.autoscaling_spec = autoscaling_spec cluster_vpc = aws_ec2.Vpc( self, "ClusterVPC", cidr="10.0.0.0/16", nat_gateways=1, ) core.Tags.of(cluster_vpc).add( "Name", cluster_configuration['cluster_name'] + "VPC") cluster = aws_ecs.Cluster( self, "ECSCluster", cluster_name=cluster_configuration['cluster_name'], vpc=cluster_vpc, capacity=aws_ecs.AddCapacityOptions( instance_type=aws_ec2.InstanceType( instance_type_identifier=self. autoscaling_spec['instance_type']), min_capacity=1, max_capacity=self.cluster_configuration['node_max'], desired_capacity=self.cluster_configuration["node_desired"], auto_scaling_group_name=cluster_configuration['cluster_name'] + "ASG")) loadbalancedservice = None if self.cluster_configuration['fargate_enabled'] is True: loadbalancedservice = aws_ecs_patterns.ApplicationLoadBalancedFargateService( self, "ECSFargateService", service_name=self.cluster_configuration['cluster_name'] + "Service", cluster=cluster, # Required cpu=self. cluster_configuration["container_cpu"], # Default is 256 desired_count=self.autoscaling_spec["task_desired"], task_image_options=aws_ecs_patterns. ApplicationLoadBalancedTaskImageOptions( image=aws_ecs.ContainerImage.from_registry( cluster_configuration["container_image"]), container_port=cluster_configuration["container_port"]), memory_limit_mib=cluster_configuration[ "container_mem"], # Default is 512 public_load_balancer=True) # Default is False else: # cluster.add_capacity("Capacity", # instance_type = aws_ec2.InstanceType(instance_type_identifier=self.autoscaling_spec['instance_type']), # ) loadbalancedservice = aws_ecs_patterns.ApplicationLoadBalancedEc2Service( self, "ECSFargateService", service_name=self.cluster_configuration['cluster_name'] + "Service", cluster=cluster, # Required cpu=self. cluster_configuration["container_cpu"], # Default is 256 desired_count=self.autoscaling_spec["task_desired"], task_image_options=aws_ecs_patterns. ApplicationLoadBalancedTaskImageOptions( image=aws_ecs.ContainerImage.from_registry( cluster_configuration["container_image"]), container_port=cluster_configuration["container_port"]), memory_limit_mib=cluster_configuration[ "container_mem"], # Default is 512 public_load_balancer=True) # Default is False if self.autoscaling_spec['enabled'] == True: scalableTarget = loadbalancedservice.service.auto_scale_task_count( max_capacity=self.autoscaling_spec['max']) scalableTarget.scale_on_cpu_utilization( 'CpuScaling', target_utilization_percent=self.autoscaling_spec['cpu'], ) scalableTarget.scale_on_memory_utilization( 'MemoryScaling', target_utilization_percent=self.autoscaling_spec['mem'], )