def airflow_web_service(self, environment): service_name = get_webserver_service_name(self.deploy_env) family = get_webserver_taskdef_family_name(self.deploy_env) task_def = ecs.FargateTaskDefinition(self, family, cpu=512, memory_limit_mib=1024, family=family) task_def.add_container(f"WebWorker-{self.deploy_env}", image=self.image, environment=environment, secrets=self.secrets, logging=ecs.LogDrivers.aws_logs( stream_prefix=family, log_retention=RetentionDays.ONE_DAY)) task_def.default_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=Protocol.TCP)) # we want only 1 instance of the web server so when new versions are deployed max_healthy_percent=100 # you have to manually stop the current version and then it should start a new version - done by deploy task service = ecs_patterns.ApplicationLoadBalancedFargateService( self, service_name, cluster=self.cluster, # Required service_name=service_name, platform_version=ecs.FargatePlatformVersion.VERSION1_4, cpu=512, # Default is 256 desired_count=1, # Default is 1 task_definition=task_def, memory_limit_mib=2048, # Default is 512 public_load_balancer=True, max_healthy_percent=100) service.target_group.configure_health_check(path="/health") return service
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, repository: ecr.Repository, shared_context: Dict[str, Any], **kwargs) -> None: super().__init__(scope, id, **kwargs) self.vpc = vpc self.model_bucket = s3.Bucket.from_bucket_name(scope=self, id=f'{id}-model-bucket', bucket_name=shared_context['model_bucket_name']) self.ecs_cluster = ecs.Cluster(self, id=f'{id}-ecs', cluster_name='serving-ecs', vpc=self.vpc, container_insights=True) self.task_definition = ecs.FargateTaskDefinition(self, id=f'{id}-ecs-task-definition', memory_limit_mib=shared_context['fargate_memory_limit_mb'], cpu=shared_context['fargate_cpu_units']) self.task_definition.add_to_task_role_policy(iam.PolicyStatement( actions=['s3:getObject'], effect=iam.Effect.ALLOW, resources=[self.model_bucket.bucket_arn, self.model_bucket.bucket_arn + '/*'] )) image = ecs.ContainerImage.from_ecr_repository(repository, 'latest') log_driver = ecs.AwsLogDriver( stream_prefix=id, log_retention=logs.RetentionDays.FIVE_DAYS ) environment = { 'MODEL_BUCKET_NAME': shared_context['model_bucket_name'] } app_container = self.task_definition.add_container(id=f'{id}-container', image=image, logging=log_driver, environment=environment) app_container.add_port_mappings(PortMapping(container_port=shared_context['port'], host_port=shared_context['port'])) self.service = ecs_patterns.ApplicationLoadBalancedFargateService(self, id=f'{id}-fargate-service', assign_public_ip=True, cluster=self.ecs_cluster, desired_count=1, task_definition=self.task_definition, open_listener=True, listener_port=shared_context['port'], target_protocol=ApplicationProtocol.HTTP)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here vpc = ec2.Vpc(self, "MyVpc", max_azs=2) cluster = ecs.Cluster(self, "MyCluster", vpc=vpc) task_def = ecs.FargateTaskDefinition(self, "TaskDef") app_container = task_def.add_container( "AppContainer", # image=ecs.ContainerImage.from_asset( # "myapp") image=ecs.ContainerImage.from_registry( "mystique/predict-attire-for-weather")) app_container.add_port_mappings( ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP)) myService = ecs_patterns.ApplicationLoadBalancedFargateService( self, "myService", cluster=cluster, desired_count=2, task_definition=task_def)
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) vpc = ec2.Vpc(self, "EcsVpc", max_azs=2, nat_gateways=0) vpc.add_s3_endpoint('S3Endpoint') vpc.add_interface_endpoint( 'EcrDockerEndpoint', service=ec2.InterfaceVpcEndpointAwsService.ECR_DOCKER) vpc.add_interface_endpoint( 'EcrEndpoint', service=ec2.InterfaceVpcEndpointAwsService.ECR) vpc.add_interface_endpoint( 'CloudWatchLogsEndpoint', service=ec2.InterfaceVpcEndpointAwsService.CLOUDWATCH_LOGS) cluster = ecs.Cluster(self, "EcsCluster", vpc=vpc) task_definition = ecs.FargateTaskDefinition(self, "DemoServiceTask", family="DemoServiceTask") image = ecs.ContainerImage.from_asset("service") container = task_definition.add_container("app", image=image) container.add_port_mappings(ecs.PortMapping(container_port=8080)) ecs_patterns.ApplicationLoadBalancedFargateService( self, "DemoService", cluster=cluster, desired_count=2, task_definition=task_definition)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = ec2.Vpc(self, "CDKFargateVpc", max_azs=2) cluster = ecs.Cluster(self, "CDKFargateCluster", vpc=vpc) role = iam.Role.from_role_arn(self, "CDKFargateECSTaskRole", ROLE_ARN) image = ecs.ContainerImage.from_registry(ECR_REGISOTRY) task_definition = ecs.FargateTaskDefinition(scope=self, id="CDKFargateECSTask", execution_role=role, task_role=role) port_mapping = ecs.PortMapping(container_port=8080, host_port=8080) task_definition.add_container( id="CDKFargateContainer", image=image).add_port_mappings(port_mapping) fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService( self, "CDKFargateService", cluster=cluster, task_definition=task_definition, ) core.CfnOutput( self, "CDKFargateLoadBalancerDNS", value=fargate_service.load_balancer.load_balancer_dns_name, )
def __init__( self, scope: core.Construct, id: str, **kwargs, ) -> None: super().__init__( scope, id, **kwargs, ) self.backend_task = ecs.FargateTaskDefinition(self, "BackendTask") self.backend_task.add_container( "BackendContainer", image=scope.image, logging=ecs.LogDrivers.aws_logs( stream_prefix="BackendContainer", log_retention=logs.RetentionDays.ONE_WEEK, ), environment=scope.variables.regular_variables, secrets=scope.variables.secret_variables, command=["/start_prod.sh"], ) scope.backend_assets_bucket.grant_read_write( self.backend_task.task_role) for secret in [scope.variables.django_secret_key, scope.rds.db_secret]: secret.grant_read(self.backend_task.task_role) port_mapping = ecs.PortMapping(container_port=8000, protocol=ecs.Protocol.TCP) self.backend_task.default_container.add_port_mappings(port_mapping) self.backend_service = ecs.FargateService( self, "BackendService", task_definition=self.backend_task, assign_public_ip=True, cluster=scope.ecs.cluster, security_group=ec2.SecurityGroup.from_security_group_id( self, "BackendServiceSecurityGroup", security_group_id=scope.vpc.vpc_default_security_group, ), ) scope.https_listener.add_targets( "BackendTarget", port=80, targets=[self.backend_service], priority=2, path_patterns=["*"], health_check=elbv2.HealthCheck( healthy_http_codes="200-299", path="/api/health-check/", ), )
def __init__(self, scope: core.Construct, id: str, vpcId: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) cluster = aws_ecs.Cluster(self, 'Cluster', vpc=scope.node.try_find_child(vpcId)) task = aws_ecs.FargateTaskDefinition( self, 'Task', memory_limit_mib=512, cpu=256, ) task.add_container('Nginx', image=aws_ecs.ContainerImage.from_registry( 'nginx')).add_port_mappings( aws_ecs.PortMapping(container_port=80)) svc = aws_ecs_patterns.ApplicationLoadBalancedFargateService( self, 'FargateService', cluster=cluster, task_definition=task) core.CfnOutput(self, 'ServiceURL', value='http://{}/'.format( svc.load_balancer.load_balancer_full_name))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # import default VPC vpc = aws_ec2.Vpc.from_lookup(self, 'VPC', is_default=True) # ECS cluster cluster = aws_ecs.Cluster(self, 'Cluster', vpc=vpc) task = aws_ecs.FargateTaskDefinition( self, 'Task', cpu=256, memory_limit_mib=512, ) task.add_container( 'flask', image=aws_ecs.ContainerImage.from_asset('flask-docker-app'), environment={ 'PLATFORM': 'AWS Fargate :-)' }).add_port_mappings(aws_ecs.PortMapping(container_port=5000)) svc = aws_ecs_patterns.ApplicationLoadBalancedFargateService( self, 'svc', cluster=cluster, task_definition=task) core.CfnOutput(self, 'SericeURL', value="http://{}".format( svc.load_balancer.load_balancer_dns_name))
def __init__(self, scope: Construct, id: str, *, vpc: aws_ec2.IVpc, url: str, tps: int): """ Defines an instance of the traffic generator. :param scope: construct scope :param id: construct id :param vpc: the VPC in which to host the traffic generator :param url: the URL to hit :param tps: the number of transactions per second """ super().__init__(scope, id) ## Define an ECS Cluster hosted within the requested VPC cluster = aws_ecs.Cluster(self, 'cluster', vpc=vpc) ## Define our ECS Task Definition with a single Container. ## The image is built & published from a local asset directory task_definition = aws_ecs.FargateTaskDefinition(self, 'PingTask') task_definition.add_container( 'Pinger', image=aws_ecs.ContainerImage.from_asset("pinger"), environment={'URL': url}) ## Define our Fargate Service. TPS determines how many Instances we ## want from our Task (each Task produces a single TPS) aws_ecs.FargateService(self, 'service', cluster=cluster, task_definition=task_definition, desired_count=tps)
def __init__(self, scope: core.Construct, id: str, vpc: aws_ec2.IVpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) e2e_cluster = aws_ecs.Cluster(self, 'e2e-cluster', vpc=vpc, cluster_name='e2e-cluster') e2e_image = aws_ecr_assets.DockerImageAsset(self, 'e2e-image', directory='test/e2e') e2e_task = aws_ecs.FargateTaskDefinition(self, 'e2e-task', family='e2e-task') e2e_task.add_container( 'e2e-test-kafka', image=aws_ecs.ContainerImage.from_docker_image_asset(e2e_image), logging=aws_ecs.AwsLogDriver(stream_prefix='e2e')) e2e_security_group = aws_ec2.SecurityGroup(self, 'e2e', vpc=vpc) self.e2e_security_group = e2e_security_group # expose it to give it access to kafka core.CfnOutput(self, "subnets", value=','.join([ subnet.subnet_id for subnet in vpc.private_subnets ])) core.CfnOutput(self, "securitygroup", value=e2e_security_group.security_group_id)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) #initialize all the required parameters vpc_id = '[fill in VPC ID]' security_group_id = '[fill in sg id]' #existing subnetids for two availiability zones which subnet_id_1a = '[fill in subnetid]' subnet_id_1b = '[fill in subnetid]' #name of the existing ecs cluster where the fargate service needs to be deployed ecs_cluster_name = '[fill in name for cluster]' #get an instance of existing vpc vpc = ec2.Vpc.from_lookup(self, 'vpc', vpc_id=vpc_id) #get handle for existing security group security_group = ec2.SecurityGroup.from_security_group_id( self, "SG", security_group_id, mutable=True) #get handle for existing subnets subnet1a = ec2.Subnet.from_subnet_attributes( self, 'subnet1a', availability_zone='us-east-1a', subnet_id=subnet_id_1a) subnet1b = ec2.Subnet.from_subnet_attributes( self, 'subnet1b', availability_zone='us-east-1b', subnet_id=subnet_id_1b) vpc_subnets_selection = ec2.SubnetSelection( subnets=[subnet1a, subnet1b]) #get handle for existing ecs cluster cluster = ecs.Cluster.from_cluster_attributes( self, 'test-cluster', cluster_name=ecs_cluster_name, vpc=vpc, security_groups=[security_group]) #create fargate task definition task_definition = ecs.FargateTaskDefinition(self, "test-with-cdk", cpu=256, memory_limit_mib=512) #add a container to task definition using a sample image container = task_definition.add_container( "test-with-cdk", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"), memory_limit_mib=256) #expose required ports port_mapping = ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP) #adding port mappings to container container.add_port_mappings(port_mapping) #creating fargate service with all required inputs fargateService = ecs.FargateService(self, "test-with-cdk-service", cluster=cluster, task_definition=task_definition, vpc_subnets=vpc_subnets_selection, security_group=security_group)
def __init__(self, scope: core.Construct, id: str, vpc, redis, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.ecr = _ecr.Repository(self, "ecrRepo") self.ecs_cluster = _ecs.Cluster(self, "ecsCluster", container_insights=True, vpc=vpc) self.task_definition = _ecs.FargateTaskDefinition(self, "taskDefinition", memory_limit_mib=512, cpu=256) self.docker_image = _ecr_assets.DockerImageAsset(self, "dockerImage", directory="./code") self.container = self.task_definition.add_container( "testContainer", image=_ecs.ContainerImage.from_docker_image_asset( self.docker_image), logging=_ecs.LogDriver.aws_logs(stream_prefix="containerlogs"), environment={ "STAGE": "dev", "REDIS_ENDPOINT": redis.attr_configuration_end_point_address }, ) self.container.add_port_mappings( _ecs.PortMapping(container_port=5000, protocol=_ecs.Protocol.TCP)) self.service = _ecs.FargateService( self, "fargateService", cluster=self.ecs_cluster, task_definition=self.task_definition, desired_count=3, vpc_subnets=_ec2.SubnetSelection(subnets=vpc.private_subnets), security_groups=[vpc.sg]) self.lb = _elbv2.ApplicationLoadBalancer(self, "alb", vpc=vpc, security_group=vpc.sg, internet_facing=True) listener = self.lb.add_listener("listener", port=80) self.target_group = listener.add_targets("fargateTarget", port=80, targets=[self.service]) core.CfnOutput(self, "albDnsName", value=self.lb.load_balancer_dns_name)
def __init__(self, scope: core.Construct, id: str, custom_vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here fargate_cluster = _ecs.Cluster(self, "fargateClusterId", vpc=custom_vpc) # Export resource name. You can import in another stack if required core.CfnOutput(self, "ClusterNameOutput", value=f"{fargate_cluster.cluster_name}", export_name="ClusterName") weather_svc_task_def = _ecs.FargateTaskDefinition( self, "weatherTaskDefId") weather_container = weather_svc_task_def.add_container( "weatherContainer", environment={'PLATFORM': 'Mystikal Fargate World :-)'}, image=_ecs.ContainerImage.from_registry( "mystique/predict-attire-for-weather"), memory_limit_mib=256, cpu=256, entry_point=[ "gunicorn", "--bind", "0.0.0.0:80", "--bind", "0.0.0.0:443", "wsgi:application", "--access-logfile", "-", "--error-logfile", "-", "--capture-output", "--enable-stdio-inheritance" ], logging=_ecs.LogDrivers.aws_logs(stream_prefix="Mystique")) weather_container.add_port_mappings( _ecs.PortMapping(container_port=80)) weather_container.add_port_mappings( _ecs.PortMapping(container_port=443)) weather_service = _ecs_patterns.ApplicationLoadBalancedFargateService( self, "weatherServiceId", cluster=fargate_cluster, task_definition=weather_svc_task_def, assign_public_ip=False, public_load_balancer=True, listener_port=80, desired_count=2, cpu=256, memory_limit_mib=512, service_name="weatherService", ) # Export resource name. You can import in another stack if required core.CfnOutput( self, "WeatherServiceUrl", value= f"http://{weather_service.load_balancer.load_balancer_dns_name}") """
def _create_mlflow_server(self): """ Create a Farget task for MLflow server """ cluster = ecs.Cluster(scope=self, id="CLUSTER", cluster_name=self.cluster_name, vpc=self.vpc) task_id = f"{self.stack_name}-{self.component_id}-MLflow" task_definition = ecs.FargateTaskDefinition( scope=self, id=task_id, task_role=self.role, ) container_id = f"{self.stack_name}-{self.component_id}-container" container = task_definition.add_container( id=container_id, image=ecs.ContainerImage.from_asset( directory="cdk_ml_cicd_pipeline/resources/visualization/mlflow/container", ), environment={ "BUCKET": f"s3://{self.artifact_bucket.bucket_name}", "HOST": self.database.db_instance_endpoint_address, "PORT": str(self.port), "DATABASE": self.dbname, "USERNAME": self.username, }, secrets={"PASSWORD": ecs.Secret.from_secrets_manager(self.db_password_secret)}, logging=ecs.LogDriver.aws_logs(stream_prefix='mlflow') ) port_mapping = ecs.PortMapping(container_port=5000, host_port=5000, protocol=ecs.Protocol.TCP) container.add_port_mappings(port_mapping) fargate_service_id = f"{self.stack_name}-{self.component_id}-" + "mlflow-fargate" self.fargate_service = ecs_patterns.NetworkLoadBalancedFargateService( scope=self, id=fargate_service_id, service_name=self.service_name, cluster=cluster, task_definition=task_definition, ) # Setup security group self.fargate_service.service.connections.security_groups[0].add_ingress_rule( peer=ec2.Peer.ipv4(self.vpc.vpc_cidr_block), connection=ec2.Port.tcp(5000), description="Allow inbound from VPC for mlflow", ) # Setup autoscaling policy autoscaling_policy_id = f"{self.stack_name}-{self.component_id}-" + "autoscaling-policy" scaling = self.fargate_service.service.auto_scale_task_count(max_capacity=2) scaling.scale_on_cpu_utilization( id=autoscaling_policy_id, target_utilization_percent=70, scale_in_cooldown=Duration.seconds(60), scale_out_cooldown=Duration.seconds(60), )
def airflow_web_service(self, environment): service_name = get_webserver_service_name(self.deploy_env) family = get_webserver_taskdef_family_name(self.deploy_env) task_def = ecs.FargateTaskDefinition(self, family, cpu=512, memory_limit_mib=1024, family=family) task_def.add_container(f"WebWorker-{self.deploy_env}", image=self.image, environment=environment, secrets=self.secrets, logging=ecs.LogDrivers.aws_logs( stream_prefix=family, log_retention=RetentionDays.ONE_DAY)) task_def.default_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ec2.Protocol.TCP)) # we want only 1 instance of the web server so when new versions are deployed max_healthy_percent=100 # you have to manually stop the current version and then it should start a new version - done by deploy task lb_security_group = ec2.SecurityGroup( self, f"lb-sec-group-{self.deploy_env}", vpc=self.vpc) service = ecs_patterns.ApplicationLoadBalancedFargateService( self, service_name, cluster=self.cluster, # Required service_name=service_name, platform_version=ecs.FargatePlatformVersion.VERSION1_4, cpu=512, # Default is 256 desired_count=1, # Default is 1 task_definition=task_def, memory_limit_mib=2048, # Default is 512 public_load_balancer=True, security_groups=[lb_security_group], certificate=Certificate.from_certificate_arn( self, f"lb-cert-{self.deploy_env}", certificate_arn=self.config["lb_certificate_arn"]), max_healthy_percent=100) service.target_group.configure_health_check(path="/health") # restrict access to the load balancer to only VPN lb_security_group.connections.allow_from( ec2.Peer.ipv4(self.config["lb_vpn_addresses"]), ec2.Port.tcp(443)) # configure DNS alias for the load balancer route53.ARecord(self, f"lb-record-{self.deploy_env}", zone=route53.HostedZone.from_hosted_zone_attributes( self, f"Zone-{self.deploy_env}", zone_name=f"Zone-{self.deploy_env}", hosted_zone_id=self.config["route53_zone_id"]), record_name=self.config["lb_dns_name"], target=route53.RecordTarget.from_alias( targets.LoadBalancerTarget(service.load_balancer))) return service
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # 建VPC与ECS Cluster # TODO: 即使指定 max_azs, 也只能部署2个AZ vpc = ec2.Vpc(self, "ECSVPC", cidr='10.0.0.0/16') cluster = ecs.Cluster(self, "ECSCluster", vpc=vpc) #建Task Definition task_definition = ecs.FargateTaskDefinition( self, "ECSDemoTaskDefinition", task_role=iam.Role.from_role_arn( self, "fargate_task_role", "arn:aws-cn:iam::402202783068:role/ECS-Task-Role-Firelens"), execution_role=iam.Role.from_role_arn( self, "fargate_task_execution_role", "arn:aws-cn:iam::402202783068:role/ecsTaskExecutionRole")) task_definition.add_volume(name="data") # App Container app_container = task_definition.add_container( "AppContainer", image=ecs.ContainerImage.from_ecr_repository( ecr.Repository.from_repository_name( self, id="app-file-image", repository_name="app-file")), logging=ecs.FireLensLogDriver()) app_container.add_mount_points( ecs.MountPoint(container_path="/data/logs", read_only=False, source_volume="data")) # app_container.add_port_mappings(ecs.PortMapping(container_port=80)) # Log Router fluentbit_container = ecs.FirelensLogRouter( self, "fluentbit_container", firelens_config=ecs.FirelensConfig( type=ecs.FirelensLogRouterType.FLUENTBIT, options=ecs.FirelensOptions(config_file_value="/extra.conf")), task_definition=task_definition, image=ecs.ContainerImage.from_ecr_repository( ecr.Repository.from_repository_name( self, id="log-router", repository_name="firelens-file")), logging=ecs.AwsLogDriver( stream_prefix="/ecs/firelens-fluentbit-demo/")) fluentbit_container.add_mount_points( ecs.MountPoint(container_path="/data/logs", read_only=False, source_volume="data"))
def __init__(self, scope: core.Construct, id: str, name_extension: str, stage:str, tags:[], vpc_name:str, region:str, ecs_conf:dict, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.base_platform = bp.BasePlatform(self, id, name_extension, stage, vpc_name) self.objects_list = [] self.ecr = _ecr.Repository.from_repository_name(self, "nginx-ecr", repository_name="nginx") self.fargate_task_def = _ecs.FargateTaskDefinition( self, "lz-nginx-ecr-td", family=ecs_conf["task_name"], cpu=256, memory_limit_mib=512, ) self.container = self.fargate_task_def.add_container( "lz-nginx-ecr-container", image=_ecs.ContainerImage.from_ecr_repository(self.ecr, "latest"), memory_reservation_mib=512, logging=_ecs.LogDriver.aws_logs( stream_prefix=name_extension+"-nginx-container" ), environment={ "REGION": region }, ) self.container.add_port_mappings( _ecs.PortMapping( container_port=80 ) ) self.fargate_service = _ecs.FargateService( self, "lz-nginx-ecr-service", task_definition=self.fargate_task_def, cluster=self.base_platform.ecs_cluster, security_group=self.base_platform.services_sec_grp, desired_count=1, cloud_map_options=_ecs.CloudMapOptions( cloud_map_namespace=self.base_platform.sd_namespace, name=ecs_conf["dns_name"] ), service_name=ecs_conf["service_name"], platform_version=_ecs.FargatePlatformVersion.VERSION1_4, ) self.objects_list.append(self.ecr) self.objects_list.append(self.fargate_service) self.objects_list.append(self.container) self.objects_list.append(self.fargate_task_def) self.tags_creation(tags)
def create_task(self, name, guild, operators, type): # define an ECS task volume = self.create_efs_volume(name) task = ecs.FargateTaskDefinition(self, name, cpu=1024, memory_limit_mib=4096, volumes=[volume]) core.Tags.of(task).add("guild", guild) self.create_container(name, task, operators, volume, type) return task
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create the ECR Repository ecr_repository = ecr.Repository(self, "ecs-devops-sandbox-repository", repository_name="ecs-devops-sandbox-repository") # Create the ECS Cluster (and VPC) vpc = ec2.Vpc(self, "ecs-devops-sandbox-vpc", max_azs=3) cluster = ecs.Cluster(self, "ecs-devops-sandbox-cluster", cluster_name="ecs-devops-sandbox-cluster", vpc=vpc) # Create the ECS Task Definition with placeholder container (and named Task Execution IAM Role) execution_role = iam.Role(self, "ecs-devops-sandbox-execution-role", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), role_name="ecs-devops-sandbox-execution-role") execution_role.add_to_policy(iam.PolicyStatement( effect=iam.Effect.ALLOW, resources=["*"], actions=[ "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "logs:CreateLogStream", "logs:PutLogEvents" ] )) task_definition = ecs.FargateTaskDefinition(self, "ecs-devops-sandbox-task-definition", execution_role=execution_role, family="ecs-devops-sandbox-task-definition") container = task_definition.add_container( "ecs-devops-sandbox", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample") ) port_mapping = ecs.PortMapping(container_port=5000) container.add_port_mappings(port_mapping) # Create a load balanced ECS Service # Athough this block works, I was not able to load_balanced_service = ecs_patterns.ApplicationLoadBalancedFargateService(self, "ecs-devops-sandbox-service", cluster=cluster, task_definition=task_definition, service_name="ecs-devops-sandbox-service")
def __create_pull_service(self, service_name: str, ctx: object): ctx_srv = getattr(ctx.inbound.services.pull, service_name) ecs_task_role = self.__create_default_task_role(service_name) log_driver = ecs.LogDriver.aws_logs(log_group=self.log_group, stream_prefix=service_name) # create a Fargate task definition task_definition = ecs.FargateTaskDefinition( scope=self, id=f"{service_name}_task_definition", cpu=ctx_srv.size.cpu, memory_limit_mib=ctx_srv.size.ram, execution_role=self.ecs_exec_role, task_role=ecs_task_role, ) # create a container definition and associate with the Fargate task container_vars = self.__get_container_vars(service_name, ctx, ctx_srv) container = ecs.ContainerDefinition( scope=self, id=f"{service_name}_container_definition", task_definition=task_definition, image=ecs.ContainerImage.from_ecr_repository( self.ecr_repository, "latest"), logging=log_driver, **container_vars) security_group = ec2.SecurityGroup(scope=self, id=f"{service_name}_sg", vpc=self.vpc) service = ecs.FargateService(scope=self, id=f"{service_name}_service", task_definition=task_definition, cluster=self.cluster, desired_count=getattr( ctx_srv, "desired_count", ctx.default_desired_count), service_name=service_name, security_group=security_group) scaling = service.auto_scale_task_count( max_capacity=ctx_srv.scaling.max_capacity, min_capacity=ctx_srv.scaling.min_capacity) scaling.scale_on_cpu_utilization( id="cpu_scaling", target_utilization_percent=ctx_srv.scaling. target_utilization_percent, scale_in_cooldown=core.Duration.seconds( ctx_srv.scaling.scale_in_cooldown_seconds), scale_out_cooldown=core.Duration.seconds( ctx_srv.scaling.scale_out_cooldown_seconds))
def create_fagate_NLB_autoscaling(self, vpc): cluster = ecs.Cluster(self, 'fargate-service-autoscaling', vpc=vpc) # config IAM role # add managed policy statement ecs_base_role = iam.Role( self, "ecs_service_role", assumed_by=iam.ServicePrincipal("ecs.amazonaws.com")) ecs_role = ecs_base_role.from_role_arn( self, 'gw-ecr-role-test', role_arn='arn:aws:iam::002224604296:role/ecsTaskExecutionRole') # Create Fargate Task Definition fargate_task = ecs.FargateTaskDefinition( self, "graph-inference-task-definition", execution_role=ecs_role, task_role=ecs_role, cpu=2048, memory_limit_mib=4096) #ecr_repo = ecr.IRepository(self, "002224604296.dkr.ecr.us-east-1.amazonaws.com/sagemaker-recsys-graph-inference") ecr_repo = ecr.Repository.from_repository_name( self, id="graph-inference-docker", repository_name="sagemaker-recsys-graph-inference") port_mapping = ecs.PortMapping(container_port=8080, host_port=8001, protocol=ecs.Protocol.TCP) ecs_log = ecs.LogDrivers.aws_logs(stream_prefix='gw-inference-test') farget_container = fargate_task.add_container( "graph-inference", image=ecs.ContainerImage.from_ecr_repository(ecr_repo), logging=ecs_log) farget_container.add_port_mappings(port_mapping) fargate_service = ecs.FargateService(self, 'graph-inference-service', cluster=cluster, task_definition=fargate_task, assign_public_ip=True) fargate_service.connections.security_groups[0].add_ingress_rule( peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(8080), description="Allow http inbound from VPC") return fargate_service.load_balancer.load_balancer_dns_name
def __init__(self, scope: core.Construct, id: str, vpc, security_group: ec2.SecurityGroup, rds: rds.DatabaseInstance, config: dict, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.cluster = ecs.Cluster( self, "VdbEcsStackCluster", cluster_name="VdbCluster", vpc=vpc ) self.cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.xlarge"), vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE), max_capacity=1) execute_task_definition = ecs.FargateTaskDefinition( self, "ExecuteTaskDef", cpu=1024, memory_limit_mib=2048 ) execute_container = execute_task_definition.add_container( "execute", image=ecs.ContainerImage.from_registry(name=config['VDB_EXECUTE_IMAGE']), entry_point=[ "sh", "-c" ], command=[ "./startup_script.sh" ], logging=ecs.LogDrivers.aws_logs(stream_prefix="VdbExecuteLogs"), environment={ "DATABASE_USER": config['DATABASE_USER'], "DATABASE_PASSWORD": config['DATABASE_PASSWORD'], "DATABASE_NAME": config['DATABASE_NAME'], "DATABASE_PORT": rds.db_instance_endpoint_port, "DATABASE_HOSTNAME": rds.db_instance_endpoint_address, "STORAGEDIFFS_SOURCE": "geth", "CLIENT_IPCPATH": config['CLIENT_IPCPATH'] } ) ecs.FargateService( self, "VdbExecuteService", cluster=self.cluster, task_definition=execute_task_definition, service_name="VdbExecuteService", security_group=security_group )
def create_master_task(self): """ Create master task """ task = ecs.FargateTaskDefinition(self, "task", memory_limit_mib=512, cpu=256) task.add_to_task_role_policy(self.create_dynamodb_access_policy()) task.add_to_task_role_policy(self.create_xray_access_policy()) return task
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) ecs_vpc = aws_ec2.Vpc( self, "EcsVPC", max_azs=2) # minimum number of AZs is 2 for this stack to deploy ecs_vpc.apply_removal_policy(cdk.RemovalPolicy.DESTROY) ecs_cluster = aws_ecs.Cluster(self, "EcsCluster", vpc=ecs_vpc) ecs_taskdefinition = aws_ecs.FargateTaskDefinition( self, "EcsTaskDefinition") ecs_taskdefinition.add_container( "ecsContainer", image=aws_ecs.ContainerImage.from_registry( 'coderaiser/cloudcmd:latest'), # change image here cpu=256, memory_limit_mib=512, port_mappings=[aws_ecs.PortMapping(container_port=8000) ] # container port used for the image ) ecs_fargate_service = aws_ecs.FargateService( self, "FargateService", cluster=ecs_cluster, task_definition=ecs_taskdefinition) ecs_application_lb = aws_elasticloadbalancingv2.ApplicationLoadBalancer( self, "EcsAlb", vpc=ecs_vpc, internet_facing=True) alb_listener = ecs_application_lb.add_listener("AlbListener", port=80) ecs_tg = alb_listener.add_targets( "EcsTG", port=80, targets=[ ecs_fargate_service.load_balancer_target( container_name="ecsContainer", container_port=8000 # container port used for the image ) ]) cdk.CfnOutput( self, "WebUrl", description="URL from the load balancer", value=f"http://{ecs_application_lb.load_balancer_dns_name}/")
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here ecr_repository = ecr.Repository( self, "ecs-devops-sandbox-repository", repository_name="ecs-devops-sandbox-repository") vpc = ec2.Vpc(self, "ecs-devops-sandbox-vpc", max_azs=3) cluster = ecs.Cluster(self, "ecs-devops-sandbox-cluster", cluster_name="ecs-devops-sandbox-cluster", vpc=vpc) execution_role = iam.Role( self, "ecs-devops-sandbox-execution-role", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), role_name="ecs-devops-sandbox-execution-role") execution_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=["*"], actions=[ 'ecr:GetAuthorizationToken', 'ecr:BatchCheckLayerAvailability', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'logs:CreateLogStream', 'logs:PutLogEvents' ])) task_definition = ecs.FargateTaskDefinition( self, "ecs-devops-sandbox-task-definition", execution_role=execution_role, family="ecs-devops-sandbox-task-definition") container = task_definition.add_container( "ecs-devops-sandbox", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")) service = ecs.FargateService(self, "ecs-devops-sandbox-service", cluster=cluster, task_definition=task_definition, service_name="ecs-devops-sandbox-service")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create the ECR Repository ecr_repository = ecr.Repository( self, "zd-add-cust-card-to-stripe-repository", repository_name="zd-add-cust-card-to-stripe-repository") # Create the ECS Cluster (and VPC) vpc = ec2.Vpc(self, "zd-add-cust-card-to-stripe-vpc", max_azs=3) cluster = ecs.Cluster( self, "zd-add-cust-card-to-stripe-cluster", cluster_name="zd-add-cust-card-to-stripe-cluster", vpc=vpc) # Create the ECS Task Definition with placeholder container (and named Task Execution IAM Role) execution_role = iam.Role( self, "zd-add-cust-card-to-stripe-execution-role", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), role_name="zd-add-cust-card-to-stripe-execution-role") execution_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=["*"], actions=[ "ecr:GetAuthorizationToken", "ecr:BatchCheckLayerAvailability", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "logs:CreateLogStream", "logs:PutLogEvents" ])) task_definition = ecs.FargateTaskDefinition( self, "zd-add-cust-card-to-stripe-task-definition", execution_role=execution_role, family="zd-add-cust-card-to-stripe-task-definition") container = task_definition.add_container( "zd-add-cust-card-to-stripe", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample")) # Create the ECS Service service = ecs.FargateService( self, "zd-add-cust-card-to-stripe-service", cluster=cluster, task_definition=task_definition, service_name="zd-add-cust-card-to-stripe-service")
def __init__(self, scope: core.Stack, id: str, cluster: ecs.ICluster, vpc, private_subnets, sec_group, desired_service_count, **kwargs): super().__init__(scope, id, **kwargs) self.cluster = cluster self.vpc = vpc self.private_subnets = private_subnets self.sec_group = sec_group self.service_discovery = cluster.default_cloud_map_namespace self.desired_service_count = desired_service_count self.task_definition = ecs.FargateTaskDefinition( self, "BackendNodeServiceTaskDef", cpu=256, memory_limit_mib=512, ) self.task_definition.add_container( "BackendNodeServiceContainer", image=ecs.ContainerImage.from_registry("brentley/ecsdemo-nodejs"), logging=ecs.LogDrivers.firelens( options={ "Name": "cloudwatch", "log_key": "log", "region": "us-west-2", "delivery_stream": "my-stream", "log_group_name": "firelens-fluent-bit", "auto_create_group": "true", "log_stream_prefix": "from-fluent-bit"} ) ) self.fargate_service = ecs.FargateService( self, "BackendNodeFargateService", service_name="Fargate-Backend-NodeJS", task_definition=self.task_definition, cluster=self.cluster, max_healthy_percent=100, min_healthy_percent=0, vpc_subnets={ "subnet_name" : "Private" }, desired_count=self.desired_service_count, security_group=self.sec_group, cloud_map_options={ "name": "ecsdemo-nodejs" }, )
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #vpc chat_app_vpc = aws_ec2.Vpc(self, "ChatAppVpc", max_azs=2, nat_gateways=1) #fargate cluster chat_app_cluster = aws_ecs.Cluster(self, "ChatAppCluster") #fargate task definition chat_app_fg_def = aws_ecs.FargateTaskDefinition( self, "ChatAppTaskDefinition") #container definition chat_app_container = chat_app_fg_def.add_container( "ChatAppContainer", image=aws_ecs.ContainerImage.from_registry( "manuja/chat-app:latest"), environment={"github": "https://github.com/manujakau"}) #port mapping to container chat_app_container.add_port_mappings( aws_ecs.PortMapping(container_port=3000, protocol=aws_ecs.Protocol.TCP)) #attached load balancer chat_app_alb = aws_ecs_patterns.ApplicationLoadBalancedFargateService( self, "ChatAppALB", cluster=chat_app_cluster, task_definition=chat_app_fg_def, assign_public_ip=False, public_load_balancer=True, listener_port=80, desired_count=1, service_name="ServerlessChatApp") #output chat_app_output = core.CfnOutput( self, "chatappoutput", value=f"http://{chat_app_alb.load_balancer.load_balancer_dns_name}", description="Chat app url")
def __init__(self, scope: core.Construct, id: str, custom_vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here fargate_cluster = _ecs.Cluster(self, "fargateClusterId", vpc=custom_vpc) core.CfnOutput(self, "ClusterNameOutput", value=f"{fargate_cluster.cluster_name}", export_name="ClusterName") """ Service running chat service """ chat_app_task_def = _ecs.FargateTaskDefinition(self, "chatAppTaskDef") chat_app_container = chat_app_task_def.add_container( "chatAppContainer", environment={'github': 'https://github.com/miztiik'}, image=_ecs.ContainerImage.from_registry( "mystique/fargate-chat-app:latest"), logging=_ecs.LogDrivers.aws_logs(stream_prefix="Mystique")) chat_app_container.add_port_mappings( _ecs.PortMapping(container_port=3000, protocol=_ecs.Protocol.TCP)) chat_app_service = _ecs_patterns.ApplicationLoadBalancedFargateService( self, "chatAppServiceId", cluster=fargate_cluster, task_definition=chat_app_task_def, assign_public_ip=False, public_load_balancer=True, listener_port=80, desired_count=1, # cpu=1024, # memory_limit_mib=2048, # service_name="chatAppService", ) core.CfnOutput( self, "chatAppServiceUrl", value= f"http://{chat_app_service.load_balancer.load_balancer_dns_name}")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Create the VPC for the honeypot(s), default is all AZs in region vpc = ec2.Vpc(self, "HoneypotVpc", max_azs=3) # Create the ECS cluster where fargate can deploy the Docker containers cluster = ecs.Cluster(self, "HoneypotCluster", vpc=vpc) # Define task definition for Fargate Service task_definition = ecs.FargateTaskDefinition(self, "HoneypotTasks", cpu=256, memory_limit_mib=512) # Container definition container_definition = ecs.ContainerDefinition( self, "HoneypotContainerDefinition", image=ecs.ContainerImage.from_registry("statixs/cowrie"), #image=ecs.ContainerImage.from_asset(directory = "docker"), task_definition=task_definition, stop_timeout=core.Duration.seconds(2), logging=ecs.AwsLogDriver( stream_prefix="cowrie", log_retention=logs.RetentionDays.ONE_WEEK, ), ) # ECS Security Group definition sg_ssh = ec2.SecurityGroup(self, "honeypot-sg-ssh", vpc=vpc, description="Allow SSH to the honeypot") sg_ssh.add_ingress_rule(ec2.Peer.ipv4("0.0.0.0/0"), ec2.Port.tcp(22)) # Fargate service definition fargate_service = ecs.FargateService( self, "HoneypotFargate", cluster=cluster, assign_public_ip=True, desired_count=1, security_group=sg_ssh, task_definition=task_definition, platform_version=ecs.FargatePlatformVersion.VERSION1_4)