def create_ghost_ecs_service(self, cluster, website_url): # TODO: Set up persistent storage with EFS once CDK supports this # https://github.com/aws/aws-cdk/issues/6918 task_definition = ecs.Ec2TaskDefinition( self, 'GhostTaskDef', ) environment_variables = {'url': website_url} if website_url else None container = task_definition.add_container( 'GhostContainer', # Change this container version to update Ghost version image=ecs.ContainerImage.from_registry('ghost:3.16'), memory_limit_mib=256, environment=environment_variables, ) port_mapping = ecs.PortMapping( container_port=2368, # the Ghost container uses port 2368 host_port=80, protocol=ecs.Protocol.TCP, ) container.add_port_mappings(port_mapping) return ecs.Ec2Service( self, 'GhostService', cluster=cluster, task_definition=task_definition, )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, *kwargs) # Create a cluster vpc = ec2.Vpc(self, "MicFinVpc", max_azs=2) cluster = ecs.Cluster(self, 'EcsCluster', vpc=vpc) cluster.add_capacity( "DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.micro"), machine_image=ecs.EcsOptimizedAmi(), update_type=autoscaling.UpdateType.REPLACING_UPDATE, desired_capacity=1) # Create Task Definition task_definition = ecs.Ec2TaskDefinition(self, "TaskDef") container = task_definition.add_container( "web", image=ecs.ContainerImage.from_registry( "210525354699.dkr.ecr.ap-southeast-1.amazonaws.com/micfin-repo" ), memory_limit_mib=512, logging=ecs.LogDrivers.awslogs({streamPrefix: 'EventDemo'})) port_mapping = ecs.PortMapping(container_port=80, host_port=8080, protocol=ecs.Protocol.TCP) container.add_port_mappings(port_mapping) # Create Service service = ecs.Ec2Service(self, "Service", cluster=cluster, task_definition=task_definition)
def __init__(self, scope: core.Construct, id: str, ecs: aws_ecs.Cluster, registry: aws_ecr.Repository, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.task_definition = aws_ecs.Ec2TaskDefinition(self, "TaskDef") self.task_definition.add_container( "hello-docker", image=aws_ecs.ContainerImage.from_ecr_repository(registry), memory_limit_mib=128) self.tweet_ingest_service = aws_ecs.Ec2Service( self, "Service", cluster=ecs, task_definition=self.task_definition)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = ec2.Vpc(self, "CDK-LB", max_azs=2) cluster = ecs.Cluster(self, "CDK-Cluster", vpc=vpc) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t3.nano")) task_definition = ecs.Ec2TaskDefinition( self, "nginx-awsvpc", network_mode=ecs.NetworkMode.BRIDGE, ) container = task_definition.add_container( "nginx", image=ecs.ContainerImage.from_registry("nginx:latest"), cpu=100, memory_limit_mib=256, essential=True) port_mapping = ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP) container.add_port_mappings(port_mapping) ecs_service = ecs_patterns.ApplicationLoadBalancedEc2Service( self, "EC2-Service", cluster=cluster, memory_limit_mib=512, task_definition=task_definition, listener_port=80) core.CfnOutput(self, "LoadBalancerDNS", value=ecs_service.load_balancer.load_balancer_dns_name)
def __init__(self, scope: core.Construct, name: str, **kwargs) -> None: super().__init__(scope, name, **kwargs) # step 0 - prepare VPC vpc = ec2.Vpc(self, "CdkTutorial_Vpc", max_azs=2) # step 1 - create an Amazon ECS cluster cluster = ecs.Cluster(self, "CdkTutorial_Cluster", vpc=vpc) # step 2 - create the auto scaling resources asg = autoscaling.AutoScalingGroup( self, "CdkTutorial_ASG", instance_type=ec2.InstanceType('t2.micro'), machine_image=ecs.EcsOptimizedImage.amazon_linux2(), vpc=vpc, max_capacity=10, min_capacity=0, ) asg.add_user_data( f"echo ECS_CLUSTER={cluster.cluster_name} >> /etc/ecs/ecs.config") cluster.add_auto_scaling_group(asg) # step 3 - task definition task_def = ecs.Ec2TaskDefinition( self, "CdkTutorial_TaskDefinition", network_mode=ecs.NetworkMode.BRIDGE, ) container = task_def.add_container( "CdkTutorialContainer", image=ecs.ContainerImage.from_asset( os.path.join(os.path.dirname(__file__), "docker")), command=["--cpu", "1", "--vm-bytes", "128M", "--timeout", "300s" ], # simulated load, consuming 1 CPU and 128MB of RAM memory_reservation_mib=256, )
def __init__(self, scope: core.Construct, id: str, cluster: ecs.Cluster, hosted_zone_id: str, zone_name: str, sub_domain: str, host_port: int, file_system_id: str, mad_secret_arn: str, mad_domain_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # check context values for v in [sub_domain, hosted_zone_id, zone_name]: if v == '': raise Exception( "Please provide required parameters sub_domain, hosted_zone_id, zone_name via context variables" ) # configure zone domain_zone = r53.PublicHostedZone.from_hosted_zone_attributes( self, "hosted_zone", hosted_zone_id=hosted_zone_id, zone_name=zone_name) domain_name = sub_domain + "." + zone_name # setup for pseudo parameters stack = core.Stack.of(self) ## Custom Resource - Task family = stack.stack_name + "_webserver" task_role = iam.Role( self, "TaskRole", role_name=family + '_task', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')) execution_role = iam.Role( self, "ExecutionRole", role_name=family + '_execution', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), inline_policies=[ iam.PolicyDocument(statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret" ], resources=[mad_secret_arn]), iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=["fsx:DescribeFileSystems"], resources=["*"]) ]) ], managed_policies=[ iam.ManagedPolicy.from_managed_policy_arn( self, "AmazonECSTaskExecutionRolePolicy", 'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy' ) ]) # Custom Task Definition task_definition_arn = custom_fsx_task(self, host_port=host_port, family=family, file_system_id=file_system_id, mad_secret_arn=mad_secret_arn, mad_domain_name=mad_domain_name, task_role=task_role, execution_role=execution_role) # importing a task is broken https://github.com/aws/aws-cdk/issues/6240 # task_definition = ecs.Ec2TaskDefinition.from_ec2_task_definition_arn(self, "TaskDef", ec2_task_definition_arn=task_definition_arn) # Task Definition - Work Around Part 1 (Create a temp task, this won't actually be used) task_definition = ecs.Ec2TaskDefinition( self, "TaskDef", #network_mode=ecs.NetworkMode.DEFAULT # Parameter not available yet, escape hatch required ) # Edit Ec2TaskDefinition via an Escape Hatch to remove network_mode (required for windows) - This Task Definition is completely ignored for now... # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-taskdefinition.html#cfn-ecs-taskdefinition-networkmode cfn_task_definition = task_definition.node.default_child cfn_task_definition.add_property_deletion_override('NetworkMode') container = task_definition.add_container( "IISContainer", image=ecs.ContainerImage.from_registry('microsoft/iis'), memory_limit_mib=1028, cpu=512, entry_point=["powershell", "-Command"], command=["C:\\ServiceMonitor.exe w3svc"], ) container.add_port_mappings( ecs.PortMapping(protocol=ecs.Protocol.TCP, container_port=80, host_port=host_port)) # Task Definition - Work Around Part 1 End # ECS Service, ALB, Cert ApplicationLoadBalancedEc2Service = ecs_patterns.ApplicationLoadBalancedEc2Service( self, "iis-service", cluster=cluster, task_definition=task_definition, desired_count=2, domain_name=domain_name, domain_zone=domain_zone, protocol=elbv2.ApplicationProtocol.HTTPS, redirect_http=True) # Task Definition - Work Around Part 2 (Override the temp task we created earlier that won't actually be used) cfn_service = ApplicationLoadBalancedEc2Service.node.find_child( 'Service').node.find_child('Service') cfn_service.add_property_override('TaskDefinition', task_definition_arn)
cluster = ecs.Cluster( stack, "wes-onetest-ecs", vpc=vpc ) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("c5.xlarge"), key_name='aws-eb',max_capacity=4,machine_image=amitouse, desired_capacity=2,min_capacity=2) # Create a task definition with its own elastic network interface iam.ServicePrincipal('task') task_definition_vistaweb = ecs.Ec2TaskDefinition( stack, "west-onetest-task-vistaweb", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[docker_volume] ) task_definition_varsleuth = ecs.Ec2TaskDefinition( stack, "west-onetest-task-varsleuth", network_mode=ecs.NetworkMode.AWS_VPC ) task_definition_voncweb = ecs.Ec2TaskDefinition( stack, "west-onetest-task-voncweb", network_mode=ecs.NetworkMode.AWS_VPC, ) task_definition_dispatcher = ecs.Ec2TaskDefinition( stack, "west-onetest-task-dispatcher",
) print ('cluster sec group ',str(type(cluster.autoscaling_group))) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("c5.xlarge"), key_name='Vonc-Prod-Key',max_capacity=4,machine_image=amitouse, desired_capacity=2,min_capacity=2) print ('connections ',str(cluster.connections)) port=ec2.Port(protocol=ec2.Protocol.TCP,string_representation='inbound to container instances', from_port=22, to_port=22) cluster.connections.add_security_group(app_security_group_import) cluster.connections.allow_from_any_ipv4(port,'in bound to container instances') # Create a task definition with its own elastic network interface task_definition_vistaweb = ecs.Ec2TaskDefinition( stack, "VONC_VISTA-task-vistaweb", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[docker_volume], family='VONC_VISTA-Vista' ) task_definition_vistaweb.add_to_execution_role_policy(custom_policy) task_definition_vistaweb.add_to_task_role_policy(custom_policy) #task_definition_vistaweb.task_role.add_managed_policy(ecr_read_only_policy) #print ("testk role ",str(taskrole)) #secret_policy.attach_to_role(task_definition_vistaweb.task_role) #secret_policy.attach_to_role(task_definition_vistaweb.role) #secret_policy.attach_to_role(task_definition_vistaweb.role) task_definition_varsleuth = ecs.Ec2TaskDefinition( stack, "VONC_VISTA-task-varsleuth", network_mode=ecs.NetworkMode.AWS_VPC, family='VONC_VISTA-VarSleuth'
app = core.App() stack = core.Stack(app, "lunchNlearn-cdk-ecs") # Create a VPC vpc = ec2.Vpc(stack, "lunchNlearn-cdk-vpc", max_azs=3, cidr="12.0.0.0/16") # Create an ECS cluster cluster = ecs.Cluster(stack, "Cluster", vpc=vpc) # Add capacity to it cluster.add_capacity("DefaultAutoScalingGroupCapacity", instance_type=ec2.InstanceType("t3.xlarge"), desired_capacity=3) task_definition = ecs.Ec2TaskDefinition(stack, "TaskDef", network_mode=ecs.NetworkMode.AWS_VPC) docker_image_asset = ecr_assets.DockerImageAsset(stack, "container-image", directory="docker") container = task_definition.add_container( "nginx-container", image=ecs.ContainerImage.from_docker_image_asset(docker_image_asset), memory_limit_mib=512) port_mapping = ecs.PortMapping(container_port=80) container.add_port_mappings(port_mapping)
def createResources(self, ns): # Security Group Updates albsg = self.bentoALB.connections.security_groups[0] self.ecssg = self.bentoECS_ASG.connections.security_groups[0] botoec2 = boto3.client('ec2') group_name = 'bento-bastion-sg' response = botoec2.describe_security_groups( Filters=[dict(Name='group-name', Values=[group_name])]) bastion_group_id = response['SecurityGroups'][0]['GroupId'] self.bastionsg = ec2.SecurityGroup.from_security_group_id( self, 'bastion-security-group', security_group_id=bastion_group_id) self.ecssg.add_ingress_rule( albsg, ec2.Port.tcp(int(self.config[ns]['backend_container_port']))) self.ecssg.add_ingress_rule( albsg, ec2.Port.tcp(int(self.config[ns]['frontend_container_port']))) self.ecssg.add_ingress_rule(self.bastionsg, ec2.Port.tcp(22)) # Backend Task Definition backendECSTask = ecs.Ec2TaskDefinition( self, "bento-ecs-backend", network_mode=ecs.NetworkMode.AWS_VPC) backendECSContainer = backendECSTask.add_container( 'api', image=ecs.ContainerImage.from_registry( "cbiitssrepo/bento-backend:latest"), memory_reservation_mib=1024, cpu=512) backend_port_mapping = ecs.PortMapping( container_port=int(self.config[ns]['backend_container_port']), host_port=int(self.config[ns]['backend_container_port']), protocol=ecs.Protocol.TCP) backendECSContainer.add_port_mappings(backend_port_mapping) # Backend Service self.backendService = ecs.Ec2Service( self, "{}-backend".format(ns), service_name="{}-backend".format(ns), task_definition=backendECSTask, cluster=self.bentoECS) # Frontend Task Definition frontendECSTask = ecs.Ec2TaskDefinition( self, "bento-ecs-frontend", network_mode=ecs.NetworkMode.AWS_VPC) frontendECSContainer = frontendECSTask.add_container( 'ui', image=ecs.ContainerImage.from_registry( "cbiitssrepo/bento-frontend:latest"), memory_reservation_mib=1024, cpu=512) frontend_port_mapping = ecs.PortMapping( container_port=int(self.config[ns]['frontend_container_port']), host_port=int(self.config[ns]['frontend_container_port']), protocol=ecs.Protocol.TCP) frontendECSContainer.add_port_mappings(frontend_port_mapping) # Frontend Service self.frontendService = ecs.Ec2Service( self, "{}-frontend".format(ns), service_name="{}-frontend".format(ns), task_definition=frontendECSTask, cluster=self.bentoECS)
vpc = ec2.Vpc( stack, "Vpc", max_a_zs=2 ) cluster = ecs.Cluster( stack, "EcsCluster", vpc=vpc ) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.micro")) # Create a task definition with placement constraints task_definition = ecs.Ec2TaskDefinition( stack, "TaskDef", placement_constraints=[ ecs.PlacementConstraint.distinct_instances() ] ) container = task_definition.add_container( "web", image=ecs.ContainerImage.from_registry("nginx:latest"), memory_limit_mi_b=256, ) container.add_port_mappings( container_port=80, host_port=8080, protocol=ecs.Protocol.Tcp ) # Create Service
def __init__(self, scope: core.Construct, id: str, vpc, ecs_cluster, role, target_url: str, number_of_tasks=1, **kwargs) -> None: super().__init__(scope, id, **kwargs) name = id task_def = ecs.Ec2TaskDefinition(self, name, network_mode=ecs.NetworkMode.AWS_VPC) container_env = {} container_env["TARGET_URL"] = target_url if role == "worker": container_env["LOCUST_MASTER_NODE_HOST"] = "master.loadgen" container_env["LOCUST_MODE_WORKER"] = "True" elif role == "master": container_env["LOCUST_MODE_MASTER"] = "True" locust_container = task_def.add_container( name + "container", # Create an image we using the dockerfile in ./locust image=ecs.ContainerImage.from_asset("locust"), memory_reservation_mib=512, essential=True, logging=ecs.LogDrivers.aws_logs(stream_prefix=name), environment=container_env) locust_container.add_ulimits( ecs.Ulimit(name=ecs.UlimitName.NOFILE, soft_limit=65536, hard_limit=65536)) web_port_mapping = ecs.PortMapping(container_port=8089) if role != "standalone": worker1_port_mapping = ecs.PortMapping(container_port=5557) worker2_port_mapping = ecs.PortMapping(container_port=5558) locust_container.add_port_mappings(web_port_mapping, worker1_port_mapping, worker2_port_mapping) else: locust_container.add_port_mappings(web_port_mapping) security_group = ec2.SecurityGroup(self, "Locust", vpc=vpc, allow_all_outbound=True) security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8089)) if role != "standalone": security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5557)) security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(5558)) # Create the ecs service locust_service = ecs.Ec2Service(self, name + "service", cluster=ecs_cluster, task_definition=task_def, security_group=security_group, desired_count=number_of_tasks) locust_service.enable_cloud_map(name=role) # Create the ALB to present the Locust UI if role != "worker": self.lb = elbv2.ApplicationLoadBalancer(self, "LoustLB", vpc=vpc, internet_facing=True) # Forward port 80 to port 8089 listener = self.lb.add_listener("Listener", port=80) listener.add_targets("ECS1", port=8089, protocol=elbv2.ApplicationProtocol.HTTP, targets=[locust_service]) core.CfnOutput(self, "lburl", description="URL for ALB fronting locust master", value="http://{}".format( self.lb.load_balancer_dns_name))
deployment_mode = ecs.DeploymentController deployment_mode.type = ecs.DeploymentControllerType.CODE_DEPLOY certificate_arn_sema4 = 'arn:aws:acm:us-east-1:417302553802:certificate/b46a1c06-bc3b-4012-8f47-b2735ceccbc5' cluster = ecs.Cluster(stack, "wes-onetest-ecs", vpc=vpc) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.medium"), key_name='aws-eb', max_capacity=3, machine_image=amitouse) # Create a task definition with its own elastic network interface task_definition_vistaweb = ecs.Ec2TaskDefinition( stack, "west-onetest-task-vistaweb" #network_mode=ecs.NetworkMode.AWS_VPC, ) vistaweb_container = task_definition_vistaweb.add_container( "west-onetest-vistaweb", image=ecs.ContainerImage.from_registry(ecr_repo_vistaweb), cpu=16, memory_limit_mib=256, essential=True, environment={ 'USE': 'me', 'LAGI': 'ddd' }) port_mapping = ecs.PortMapping(container_port=80, host_port=8080, protocol=ecs.Protocol.TCP)
) deployment_mode = ecs.DeploymentController deployment_mode.type = ecs.DeploymentControllerType.CODE_DEPLOY certificate_arn_sema4 = 'arn:aws:acm:us-east-1:417302553802:certificate/b46a1c06-bc3b-4012-8f47-b2735ceccbc5' cluster = ecs.Cluster(stack, "wes-onetest-ecs", vpc=vpc) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.medium"), key_name='aws-eb', max_capacity=3, machine_image=amitouse) # Create a task definition with its own elastic network interface task_definition_voncweb = ecs.Ec2TaskDefinition( stack, "west-onetest-task-voncweb" #network_mode=ecs.NetworkMode.AWS_VPC, ) task_definition_dispatcher = ecs.Ec2TaskDefinition( stack, "west-onetest-task-dispatcher" #network_mode=ecs.NetworkMode.AWS_VPC, ) task_definition_vistaweb = ecs.Ec2TaskDefinition( stack, "west-onetest-task-vistaweb" #network_mode=ecs.NetworkMode.AWS_VPC, ) voncweb_container = task_definition_voncweb.add_container( "west-onetest-voncwebserver", image=ecs.ContainerImage.from_registry(ecr_repo_voncweb),
vpc = ec2.VpcNetwork( stack, "Vpc", max_a_zs=2 ) cluster = ecs.Cluster( stack, "EcsCluster", vpc=vpc ) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.micro")) # Create a task definition with placement constraints task_definition = ecs.Ec2TaskDefinition( stack, "TaskDef", placement_constraints=[ {"type": ecs.PlacementConstraintType.DistinctInstance} ] ) container = task_definition.add_container( "web", image=ecs.ContainerImage.from_registry("nginx:latest"), memory_limit_mi_b=256, ) container.add_port_mappings( container_port=80, host_port=8080, protocol=ecs.Protocol.Tcp ) # Create Service
def __init__(self, scope: core.Construct, id: str, vpc, ecs_cluster, role, target_url: str, number_of_tasks = 1, **kwargs) -> None: super().__init__(scope, id, **kwargs) name = id task_def = ecs.Ec2TaskDefinition(self, name, network_mode=ecs.NetworkMode.AWS_VPC ) if role == "slave": container_env={"TARGET_URL": target_url, "LOCUST_MODE": role, #Need to update to pull the name from Cloudmap "LOCUST_MASTER_HOST": "master.loadgen" } else: container_env={"TARGET_URL": target_url, "LOCUST_MODE": role } locust_container = task_def.add_container( name + "container", # Use Locust image from DockerHub # Or not. we'll use an image we create using the dockerfile in ./locust image=ecs.ContainerImage.from_asset("locust"), memory_reservation_mib=512, essential=True, logging=ecs.LogDrivers.aws_logs(stream_prefix=name), environment=container_env ) web_port_mapping = ecs.PortMapping(container_port=8089) if role != "standalone": slave1_port_mapping = ecs.PortMapping(container_port=5557) slave2_port_mapping = ecs.PortMapping(container_port=5558) locust_container.add_port_mappings(web_port_mapping,slave1_port_mapping,slave2_port_mapping) else: locust_container.add_port_mappings(web_port_mapping) security_group = ec2.SecurityGroup( self, "Locust", vpc=vpc, allow_all_outbound=True ) security_group.add_ingress_rule( ec2.Peer.any_ipv4(), ec2.Port.tcp(8089) ) if role != "standalone": security_group.add_ingress_rule( ec2.Peer.any_ipv4(), ec2.Port.tcp(5557) ) security_group.add_ingress_rule( ec2.Peer.any_ipv4(), ec2.Port.tcp(5558) ) # Create the ecs service locust_service = ecs.Ec2Service( self, name +"service", cluster = ecs_cluster, task_definition = task_def, security_group = security_group, desired_count = number_of_tasks ) locust_service.enable_cloud_map(name=role) # Create the ALB to present the Locust UI if role != "slave": self.lb = elbv2.ApplicationLoadBalancer(self, "LoustLB", vpc=vpc, internet_facing=True) listener = self.lb.add_listener("Listener", port=80) listener.add_targets("ECS1", port=80, targets=[locust_service] ) core.CfnOutput( self, "lburl", description = "URL for ALB fronting locust master", value = self.lb.load_balancer_dns_name )
def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) cluster = ecs.Cluster( self, 'EKSGraviton2', vpc=vpc, container_insights=True ) task_definition = ecs.Ec2TaskDefinition( self, "TaskDef") container_uri = ssm.StringParameter.value_for_string_parameter(self ,"graviton_lab_container_uri") ecs_ami = ecs.EcsOptimizedAmi(generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, hardware_type=ecs.AmiHardwareType.ARM) asg_ecs = cluster.add_capacity("G2AutoScalingGroup", instance_type=ec2.InstanceType("m6g.2xlarge"), machine_image=ecs_ami ) container = task_definition.add_container( "web", image=ecs.ContainerImage.from_registry(container_uri), memory_limit_mib=512, logging=ecs.LogDrivers.firelens( options={ "Name": "cloudwatch", "log_key": "log", "region": "us-east-1", "delivery_stream": "my-stream", "log_group_name": "firelens-fluent-bit", "auto_create_group": "true", "log_stream_prefix": "from-fluent-bit"} ) ) port_mapping = ecs.PortMapping( container_port=3000, host_port=8080, protocol=ecs.Protocol.TCP ) container.add_port_mappings(port_mapping) # Create Service service = ecs.Ec2Service( self, "Service", cluster=cluster, task_definition=task_definition ) # Create ALB lb = elbv2.ApplicationLoadBalancer( self, "LB", vpc=vpc, internet_facing=True ) listener = lb.add_listener( "PublicListener", port=80, open=True ) # Attach ALB to ECS Service listener.add_targets( "ECS", port=80, targets=[service] ) core.CfnOutput( self, "LoadBalancerDNS", value=lb.load_balancer_dns_name )
min_capacity=2) print('connections ', str(cluster.connections)) port = ec2.Port(protocol=ec2.Protocol.TCP, string_representation='inbound to container instances', from_port=22, to_port=22) cluster.connections.add_security_group(app_security_group_import) cluster.connections.allow_from_any_ipv4(port, 'in bound to container instances') # Create a task definition with its own elastic network interface task_definition_vistaweb = ecs.Ec2TaskDefinition( stack, "VONC_VISTA-task-vistaweb", network_mode=ecs.NetworkMode.AWS_VPC, family='VONC_VISTA-Vista') task_definition_vistaweb.add_to_execution_role_policy(custom_policy) task_definition_vistaweb.add_to_task_role_policy(custom_policy) #task_definition_vistaweb.task_role.add_managed_policy(ecr_read_only_policy) #print ("testk role ",str(taskrole)) #secret_policy.attach_to_role(task_definition_vistaweb.task_role) #secret_policy.attach_to_role(task_definition_vistaweb.role) #secret_policy.attach_to_role(task_definition_vistaweb.role) vistaweb_container = task_definition_vistaweb.add_container( "VONC_VISTA-vistaweb-container", image=ecs.ContainerImage.from_registry(ecr_repo_vistaweb), cpu=16, memory_limit_mib=256,
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here #vpc = ec2.Vpc.from_lookup(self, 'VPC', is_default=True) vpc = ec2.Vpc( self, "MyVpc", max_azs=2 ) rdsInst = rds.DatabaseInstance(self, 'SpringPetclinicDB', engine=rds.DatabaseInstanceEngine.MYSQL, engine_version='5.7.31', instance_class=ec2.InstanceType('t2.medium'), master_username = '******', database_name = 'petclinic', master_user_password = core.SecretValue('Welcome#123456'), vpc = vpc, deletion_protection = False, backup_retention = core.Duration.days(0), removal_policy = core.RemovalPolicy.DESTROY, #vpc_placement = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC) ) rdsInst.connections.allow_default_port_from_any_ipv4() cluster = ecs.Cluster( self, 'EcsCluster', vpc=vpc ) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType('t2.large'), vpc_subnets = ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), min_capacity = 6) alb = elbv2.ApplicationLoadBalancer(self, 'EcsLb', vpc=vpc, internet_facing=True) listener = alb.add_listener('EcsListener', port=80) listener.add_fixed_response('Default-Fix', status_code= '404') listener.node.default_child.default_action=[{ "type": "fixed-response", "fixedResponseConfig": {"statusCode": "404"} }] for s in ['customers', 'vets', 'visits', 'static']: asset = ecr_assets.DockerImageAsset(self, 'spring-petclinic-' + s, directory='./work/build/spring-petclinic-' + s + '-service', build_args={ 'JAR_FILE': 'spring-petclinic-' + s + '-service-2.1.4.jar' }) ecs_task = ecs.Ec2TaskDefinition(self, 'TaskDef-' + s) env={} if s != 'static': env = { 'SPRING_DATASOURCE_PASSWORD': '******', 'SPRING_DATASOURCE_USERNAME': '******', 'SPRING_PROFILES_ACTIVE': 'mysql', 'SPRING_DATASOURCE_URL': 'jdbc:mysql://' + rdsInst.db_instance_endpoint_address + '/petclinic?useUnicode=true', 'SERVER_SERVLET_CONTEXT_PATH': '/api/' + s.rstrip('s') } ecs_container = ecs_task.add_container( 'Container-' + s, memory_limit_mib=512, image=ecs.ContainerImage.from_docker_image_asset(asset), logging=ecs.LogDriver.aws_logs(stream_prefix=s), environment=env ) ecs_container.add_port_mappings(ecs.PortMapping(container_port=8080)) ecs_service = ecs.Ec2Service( self, 'Ec2Service-' + s, cluster = cluster, service_name = 'spring-petclinic-' + s, desired_count = 2, task_definition = ecs_task ) if s == 'static': parttern = '/*' priority = 1100 check={'path': '/'} else: parttern = '/api/' + s.rstrip('s') + '/*' priority = randint(1, 1000) check={'path': '/api/' + s.rstrip('s') + '/manage'} target = listener.add_targets( 'ECS-' + s, path_pattern=parttern, priority = priority, port=80, targets=[ecs_service], health_check=check ) core.CfnOutput(self,"LoadBalancer",export_name="LoadBalancer",value=alb.load_balancer_dns_name)
def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) vpc = ec2.Vpc(self, "PirateVpc", max_azs=1) cluster = ecs.Cluster(self, "PirateCluster", container_insights=True, vpc=vpc) cluster.add_capacity( 'Shipyard', block_devices=[ autoscaling.BlockDevice( device_name='/dev/xvda', volume=autoscaling.BlockDeviceVolume.ebs( volume_size=1000)) # 1 TB ], instance_type=ec2.InstanceType('m4.4xlarge')) task_definition = ecs.Ec2TaskDefinition( self, 'PirateTask', family='eth2', volumes=[ ecs.Volume( name='v', docker_volume_configuration=ecs.DockerVolumeConfiguration( driver='local', scope=ecs.Scope. SHARED, # So it persists between beyond the lifetime of the task autoprovision=True)) ]) container = task_definition.add_container( 'barbosa', image=ecs.ContainerImage.from_registry( 'sigp/lighthouse'), # TODO: configurable command=[ '--network pyrmont beacon', '--http', '--http-address 0.0.0.0' ], cpu=4 * 1024, # 4vCPU -> 8-30GB memory container_name='Pirate', logging=ecs.LogDrivers.aws_logs(stream_prefix='pirate'), memory_reservation_mib=16 * 1024, # 16GB port_mappings=[ ecs.PortMapping(container_port=9000, host_port=9000), # protocol=TCP ecs.PortMapping(container_port=5052, host_port=5052), # protocol=TCP ], secrets={ # TODO: populate these with our keys }, user='******') service = ecs_patterns.ApplicationLoadBalancedEc2Service( self, "Pirateship", # certificate=???, # TODO: set up the public domain cluster=cluster, desired_count=1, # domain_name='ethpirates.com', # domain_zone=???, # TODO: set up the public domain public_load_balancer=True, task_definition=task_definition)
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, **kwargs) -> None: super().__init__(scope, id, **kwargs) elastic_cluster_task_def = ecs.Ec2TaskDefinition( scope=self, id="ES-TASK-DEF", network_mode=ecs.NetworkMode.BRIDGE, ) elastic = ecs.ContainerDefinition( scope=self, id=constants.ES_CONTAINER_NAME, start_timeout=core.Duration.seconds(amount=30), task_definition=elastic_cluster_task_def, memory_limit_mib=4024, essential=True, image=ecs.ContainerImage.from_registry( name="docker.elastic.co/elasticsearch/elasticsearch:6.8.6"), environment={ "cluster.name": constants.ES_CLUSTER_NAME, "bootstrap.memory_lock": "true", # "discovery.zen.ping.unicast.hosts": "elasticsearch", "node.name": constants.ES_CONTAINER_NAME, "node.master": "true", "node.data": "true", "ES_JAVA_OPTS": "-Xms2g -Xmx2g", }, logging=ecs.AwsLogDriver( stream_prefix="ES", log_retention=logs.RetentionDays.ONE_DAY, ), ) elastic.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.NOFILE, hard_limit=65535, soft_limit=65535)) elastic.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1)) elastic.add_port_mappings(ecs.PortMapping(container_port=9200)) elastic.add_port_mappings(ecs.PortMapping(container_port=9300)) ##################################################### node = ecs.ContainerDefinition( scope=self, id=constants.ES_NODE_CONTAINER_NAME, start_timeout=core.Duration.seconds(amount=40), task_definition=elastic_cluster_task_def, memory_limit_mib=4024, essential=True, image=ecs.ContainerImage.from_registry( name="docker.elastic.co/elasticsearch/elasticsearch:6.8.6"), environment={ "cluster.name": constants.ES_CLUSTER_NAME, "bootstrap.memory_lock": "true", "discovery.zen.ping.unicast.hosts": constants.ES_CONTAINER_NAME, "node.name": constants.ES_NODE_CONTAINER_NAME, "node.master": "false", "node.data": "true", "ES_JAVA_OPTS": "-Xms2g -Xmx2g", }, logging=ecs.LogDrivers.aws_logs( stream_prefix="NODE", log_retention=logs.RetentionDays.ONE_DAY, )) node.add_port_mappings(ecs.PortMapping(container_port=9200)) node.add_port_mappings(ecs.PortMapping(container_port=9300)) node.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.NOFILE, hard_limit=65536, soft_limit=65536)) node.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1)) node.add_link(container=elastic, alias=constants.ES_CONTAINER_NAME) ##################################################### ecs_service = ecs.Ec2Service( scope=self, id="ES-SERVICE", cluster=cluster, task_definition=elastic_cluster_task_def, desired_count=1, service_name=constants.ECS_ES_SERVICE, ) lb = elbv2.ApplicationLoadBalancer( scope=self, id="ELB", vpc=vpc, internet_facing=True, ) listener = lb.add_listener( id="LISTENER", port=80, ) ecs_service.register_load_balancer_targets( ecs.EcsTarget( new_target_group_id="TARGET-GRP", container_name=elastic.container_name, # container_port=9200, listener=ecs.ListenerConfig.application_listener( listener=listener, protocol=elbv2.ApplicationProtocol.HTTP), )) core.CfnOutput( scope=self, id="DNS-NAME", value=lb.load_balancer_dns_name, )
def __init__( self, scope: core.Construct, id: str, **kwargs, ) -> None: super().__init__( scope, id, **kwargs, ) # add ingress rule on port 22 for SSH ec2.SecurityGroup.from_security_group_id( self, "DefaultSecurityGroupForIngress", scope.vpc.vpc_default_security_group, ).add_ingress_rule( ec2.Peer.any_ipv4(), ec2.Port.tcp(22), ) self.asg = autoscaling.AutoScalingGroup( self, "AutoScalingGroup", instance_type=ec2.InstanceType("t2.micro"), machine_image=ecs.EcsOptimizedAmi(), security_group=ec2.SecurityGroup.from_security_group_id( self, "DefaultSecurityGroupId", scope.vpc.vpc_default_security_group, ), associate_public_ip_address=True, update_type=autoscaling.UpdateType.REPLACING_UPDATE, desired_capacity=1, vpc=scope.vpc, key_name=os.environ.get("KEY_NAME"), vpc_subnets={'subnet_type': ec2.SubnetType.PUBLIC}, ) self.cluster = scope.cluster self.cluster.add_auto_scaling_group(self.asg) self.bastion_host_task = ecs.Ec2TaskDefinition(self, "BastionHostTask") self.bastion_host_task.add_container( "BastionHostContainer", image=scope.image, command=["/start_prod.sh"], environment=scope.variables.regular_variables, memory_reservation_mib=128 # secrets=scope.variables.secret_variables, ) self.bastion_host_service = ecs.Ec2Service( self, "BastionHostService", task_definition=self.bastion_host_task, cluster=self.cluster, )
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, cluster: ecs.Cluster, repository: ecr.Repository, **kwargs) -> None: super().__init__(scope, id, **kwargs) namespace = servicediscovery.PrivateDnsNamespace( scope=self, id="PRIVATE-DNS", vpc=vpc, name="private", description="a private dns" ) sg = ec2.SecurityGroup( scope=self, id="SG", vpc=vpc, allow_all_outbound=True, description="open 9200 and 9300 ports", security_group_name="es-group" ) sg.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(port=9200), ) sg.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(port=9300), ) ##################################################### elastic_task_def = ecs.Ec2TaskDefinition( scope=self, id="ES-TASK-DEF", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[ecs.Volume( name="esdata", host=ecs.Host(source_path="/usr/share/elasticsearch/data"), )], ) elastic = ecs.ContainerDefinition( scope=self, id=constants.ES_CONTAINER_NAME, start_timeout=core.Duration.seconds(amount=30), task_definition=elastic_task_def, memory_limit_mib=4500, essential=True, image=ecs.ContainerImage.from_ecr_repository( repository=repository, tag='latest'), environment={ "cluster.name": constants.ES_CLUSTER_NAME, "bootstrap.memory_lock": "true", # "discovery.zen.ping.unicast.hosts": "elasticsearch", "node.name": constants.ES_CONTAINER_NAME, "node.master": "true", "node.data": "true", "ES_JAVA_OPTS": "-Xms4g -Xmx4g", }, logging=ecs.AwsLogDriver( stream_prefix="ES", log_retention=logs.RetentionDays.ONE_DAY, ), ) elastic.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.NOFILE, hard_limit=65535, soft_limit=65535)) elastic.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1)) elastic.add_port_mappings(ecs.PortMapping(container_port=9200)) elastic.add_port_mappings(ecs.PortMapping(container_port=9300)) elastic.add_mount_points(ecs.MountPoint( container_path="/usr/share/elasticsearch/data", source_volume="esdata", read_only=False, )) # elastic.add_volumes_from(ecs.VolumeFrom( # source_container="esdata", # read_only=False, # )) es_service = ecs.Ec2Service( scope=self, id="ES-SERVICE", cluster=cluster, task_definition=elastic_task_def, desired_count=1, service_name="ES", security_group=sg, ) es_lb = elbv2.ApplicationLoadBalancer( scope=self, id="ES-ELB", vpc=vpc, internet_facing=True, ) es_listener = es_lb.add_listener( id="ES-LISTENER", port=80, ) es_service.register_load_balancer_targets( ecs.EcsTarget( new_target_group_id="ES-GRP", container_name=elastic.container_name, listener=ecs.ListenerConfig.application_listener( listener=es_listener, protocol=elbv2.ApplicationProtocol.HTTP), )) service = es_service.enable_cloud_map( cloud_map_namespace=namespace, dns_record_type=servicediscovery.DnsRecordType.A, # dns_ttl=core.Duration.seconds(amount=30), failure_threshold=1, name="elastic", ) core.CfnOutput( scope=self, id="DNS-ES", value=es_lb.load_balancer_dns_name, ) ##################################################### node_task_def = ecs.Ec2TaskDefinition( scope=self, id="NODE-TASK-DEF", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[ecs.Volume( name="esdata", host=ecs.Host(source_path="/usr/share/elasticsearch/data"), )], ) node = ecs.ContainerDefinition( scope=self, id=constants.ES_NODE_CONTAINER_NAME, start_timeout=core.Duration.seconds(amount=40), task_definition=node_task_def, memory_limit_mib=4500, essential=True, image=ecs.ContainerImage.from_ecr_repository( repository=repository, tag='latest'), environment={ "cluster.name": constants.ES_CLUSTER_NAME, "bootstrap.memory_lock": "true", "discovery.zen.ping.unicast.hosts": "elastic.private", "node.name": constants.ES_NODE_CONTAINER_NAME, "node.master": "false", "node.data": "true", "ES_JAVA_OPTS": "-Xms4g -Xmx4g", }, logging=ecs.LogDrivers.aws_logs( stream_prefix="NODE", log_retention=logs.RetentionDays.ONE_DAY, )) node.add_port_mappings(ecs.PortMapping(container_port=9200)) node.add_port_mappings(ecs.PortMapping(container_port=9300)) node.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.NOFILE, hard_limit=65536, soft_limit=65536)) node.add_ulimits(ecs.Ulimit( name=ecs.UlimitName.MEMLOCK, hard_limit=-1, soft_limit=-1)) node.add_mount_points(ecs.MountPoint( container_path="/usr/share/elasticsearch/data", source_volume="esdata", read_only=False, )) node_service = ecs.Ec2Service( scope=self, id="ES-NODE-SERVICE", cluster=cluster, task_definition=node_task_def, desired_count=1, service_name="NODE", security_group=sg, ) node_lb = elbv2.ApplicationLoadBalancer( scope=self, id="NODE-ELB", vpc=vpc, internet_facing=True, ) node_listener = node_lb.add_listener( id="NODE-LISTENER", port=80, ) node_service.register_load_balancer_targets( ecs.EcsTarget( new_target_group_id="NODE-GRP", container_name=node.container_name, listener=ecs.ListenerConfig.application_listener( listener=node_listener, protocol=elbv2.ApplicationProtocol.HTTP), )) core.CfnOutput( scope=self, id="DNS-NODE", value=node_lb.load_balancer_dns_name, )
], public_subnet_ids=[ core.Fn.import_value('devops-rnd-vpc-PublicSubnet1ID'), core.Fn.import_value('devops-rnd-vpc-PublicSubnet2ID') ]) cluster = ecs.Cluster(stack, "wes-onetest-ecs", vpc=vpc) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.medium"), key_name='aws-eb', max_capacity=2) # Create a task definition with its own elastic network interface task_definition = ecs.Ec2TaskDefinition( stack, "west-onetest-task", #network_mode=ecs.NetworkMode.AWS_VPC, ) voncweb_container = task_definition.add_container( "west-onetest-voncwebserver", image=ecs.ContainerImage.from_registry(ecr_repo_voncweb), cpu=100, memory_limit_mib=256, essential=True, environment={ 'USE': 'me', 'LAGI': 'ddd' }) port_mapping = ecs.PortMapping(container_port=3000, host_port=443,
def __init__(self, parent, name, **kwargs): super().__init__(parent, name, **kwargs) vpc = ec2.Vpc(self, 'GreetingVpc', max_azs=2) # create an ECS cluster cluster = ecs.Cluster(self, "Cluster", vpc=vpc) # add capacity to id cluster.add_capacity('greeter-capacity', instance_type=ec2.InstanceType('t2.micro'), min_capacity=3, max_capacity=3 ) # Name service name_task_definition = ecs.Ec2TaskDefinition(self, "name-task-definition") name_container = name_task_definition.add_container( 'name', image=ecs.ContainerImage.from_registry('nathanpeck/name'), memory_limit_mib=128 ) name_container.add_port_mappings(ecs.PortMapping( container_port=3000 )) name_service = ecs.Ec2Service(self, "name-service", cluster=cluster, desired_count=2, task_definition=name_task_definition ) # Greeting service greeting_task_definition = ecs.Ec2TaskDefinition(self, "greeting-task-definition") greeting_container = greeting_task_definition.add_container( 'greeting', image=ecs.ContainerImage.from_registry('nathanpeck/greeting'), memory_limit_mib=128 ) greeting_container.add_port_mappings(ecs.PortMapping( container_port=3000 )) greeting_service = ecs.Ec2Service(self, "greeting-service", cluster=cluster, desired_count=1, task_definition=greeting_task_definition ) internal_lb = elbv2.ApplicationLoadBalancer(self, "internal", vpc=vpc, internet_facing=False ) # Internal load balancer for the backend services internal_listener = internal_lb.add_listener('PublicListener', port=80, open=True ) internal_listener.add_target_groups('default', target_groups=[elbv2.ApplicationTargetGroup( self, 'default', vpc=vpc, protocol=elbv2.ApplicationProtocol.HTTP, port=80 )] ) internal_listener.add_targets('name', port=80, path_pattern='/name*', priority=1, targets=[name_service] ) internal_listener.add_targets('greeting', port=80, path_pattern='/greeting*', priority=2, targets=[greeting_service] ) # Greeter service greeter_task_definition = ecs.Ec2TaskDefinition(self, "greeter-task-definition") greeter_container = greeter_task_definition.add_container( 'greeter', image=ecs.ContainerImage.from_registry('nathanpeck/greeter'), memory_limit_mib=128, environment={ "GREETING_URL": 'http://' + internal_lb.load_balancer_dns_name + '/greeting', "NAME_URL": 'http://' + internal_lb.load_balancer_dns_name + '/name' } ) greeter_container.add_port_mappings(ecs.PortMapping( container_port=3000 )) greeter_service = ecs.Ec2Service(self, "greeter-service", cluster=cluster, desired_count=2, task_definition=greeter_task_definition ) # Internet facing load balancer fo the frontend services external_lb = elbv2.ApplicationLoadBalancer(self, 'external', vpc=vpc, internet_facing=True ) external_listener = external_lb.add_listener('PublicListener', port=80, open=True ) external_listener.add_targets('greeter', port=80, targets=[greeter_service] ) # output dns addresses self.internal_dns = core.CfnOutput(self, 'InternalDNS', export_name='greeter-app-internal', value=internal_lb.load_balancer_dns_name ) self.external_dns = core.CfnOutput(self, 'ExternalDNS', export_name='ExternalDNS', value=external_lb.load_balancer_dns_name )
from aws_cdk import ( aws_ec2 as ec2, aws_ecs as ecs, aws_elasticloadbalancingv2 as elbv2, core, ) app = core.App() stack = core.Stack(app, "aws-ec2-integ-ecs") # Create a cluster vpc = ec2.Vpc(stack, "MyVpc", max_azs=2) cluster = ecs.Cluster(stack, 'EcsCluster', vpc=vpc) cluster.add_capacity("DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.micro")) # Create Task Definition task_definition = ecs.Ec2TaskDefinition(stack, "TaskDef") container = task_definition.add_container( "web", image=ecs.ContainerImage.from_registry("nginx:latest"), memory_limit_mib=256) port_mapping = ecs.PortMapping(container_port=80, host_port=8080, protocol=ecs.Protocol.TCP) container.add_port_mappings(port_mapping) # Create Service service = ecs.Ec2Service(stack, "Service", cluster=cluster, task_definition=task_definition) # Create ALB lb = elbv2.ApplicationLoadBalancer(stack, "LB", vpc=vpc, internet_facing=True) listener = lb.add_listener("PublicListener", port=80, open=True)
def __init__(self, scope: core.Stack, id: str, cluster, vpc, worker, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.cluster = cluster self.vpc = vpc self.worker = worker # Building a custom image for jenkins master. self.container_image = ecr.DockerImageAsset( self, "JenkinsMasterDockerImage", directory='./docker/master/') if config['DEFAULT']['fargate_enabled'] == "yes" or not config[ 'DEFAULT']['ec2_enabled'] == "yes": # Task definition details to define the Jenkins master container self.jenkins_task = ecs_patterns.ApplicationLoadBalancedTaskImageOptions( # image=ecs.ContainerImage.from_ecr_repository(self.container_image.repository), image=ecs.ContainerImage.from_docker_image_asset( self.container_image), container_port=8080, enable_logging=True, environment={ # https://github.com/jenkinsci/docker/blob/master/README.md#passing-jvm-parameters 'JAVA_OPTS': '-Djenkins.install.runSetupWizard=false', # https://github.com/jenkinsci/configuration-as-code-plugin/blob/master/README.md#getting-started 'CASC_JENKINS_CONFIG': '/config-as-code.yaml', 'network_stack': self.vpc.stack_name, 'cluster_stack': self.cluster.stack_name, 'worker_stack': self.worker.stack_name, 'cluster_arn': self.cluster.cluster.cluster_arn, 'aws_region': config['DEFAULT']['region'], 'jenkins_url': config['DEFAULT']['jenkins_url'], 'subnet_ids': ",".join( [x.subnet_id for x in self.vpc.vpc.private_subnets]), 'security_group_ids': self.worker.worker_security_group.security_group_id, 'execution_role_arn': self.worker.worker_execution_role.role_arn, 'task_role_arn': self.worker.worker_task_role.role_arn, 'worker_log_group': self.worker.worker_logs_group.log_group_name, 'worker_log_stream_prefix': self.worker.worker_log_stream.log_stream_name }, ) # Create the Jenkins master service self.jenkins_master_service_main = ecs_patterns.ApplicationLoadBalancedFargateService( self, "JenkinsMasterService", cpu=int(config['DEFAULT']['fargate_cpu']), memory_limit_mib=int( config['DEFAULT']['fargate_memory_limit_mib']), cluster=self.cluster.cluster, desired_count=1, enable_ecs_managed_tags=True, task_image_options=self.jenkins_task, cloud_map_options=ecs.CloudMapOptions( name="master", dns_record_type=sd.DnsRecordType('A'))) self.jenkins_master_service = self.jenkins_master_service_main.service self.jenkins_master_task = self.jenkins_master_service.task_definition if config['DEFAULT']['ec2_enabled'] == "yes": self.jenkins_load_balancer = elb.ApplicationLoadBalancer( self, "JenkinsMasterELB", vpc=self.vpc.vpc, internet_facing=True, ) self.listener = self.jenkins_load_balancer.add_listener("Listener", port=80) self.jenkins_master_task = ecs.Ec2TaskDefinition( self, "JenkinsMasterTaskDef", network_mode=ecs.NetworkMode.AWS_VPC, volumes=[ ecs.Volume(name="efs_mount", host=ecs.Host(source_path='/mnt/efs')) ], ) self.jenkins_master_task.add_container( "JenkinsMasterContainer", image=ecs.ContainerImage.from_ecr_repository( self.container_image.repository), cpu=int(config['DEFAULT']['ec2_cpu']), memory_limit_mib=int( config['DEFAULT']['ec2_memory_limit_mib']), environment={ # https://github.com/jenkinsci/docker/blob/master/README.md#passing-jvm-parameters 'JAVA_OPTS': '-Djenkins.install.runSetupWizard=false', 'CASC_JENKINS_CONFIG': '/config-as-code.yaml', 'network_stack': self.vpc.stack_name, 'cluster_stack': self.cluster.stack_name, 'worker_stack': self.worker.stack_name, 'cluster_arn': self.cluster.cluster.cluster_arn, 'aws_region': config['DEFAULT']['region'], 'jenkins_url': config['DEFAULT']['jenkins_url'], 'subnet_ids': ",".join( [x.subnet_id for x in self.vpc.vpc.private_subnets]), 'security_group_ids': self.worker.worker_security_group.security_group_id, 'execution_role_arn': self.worker.worker_execution_role.role_arn, 'task_role_arn': self.worker.worker_task_role.role_arn, 'worker_log_group': self.worker.worker_logs_group.log_group_name, 'worker_log_stream_prefix': self.worker.worker_log_stream.log_stream_name }, logging=ecs.LogDriver.aws_logs( stream_prefix="JenkinsMaster", log_retention=logs.RetentionDays.ONE_WEEK), ) self.jenkins_master_task.default_container.add_mount_points( ecs.MountPoint(container_path='/var/jenkins_home', source_volume="efs_mount", read_only=False)) self.jenkins_master_task.default_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080)) self.jenkins_master_service = ecs.Ec2Service( self, "EC2MasterService", task_definition=self.jenkins_master_task, cloud_map_options=ecs.CloudMapOptions( name="master", dns_record_type=sd.DnsRecordType('A')), desired_count=1, min_healthy_percent=0, max_healthy_percent=100, enable_ecs_managed_tags=True, cluster=self.cluster.cluster, ) self.target_group = self.listener.add_targets( "JenkinsMasterTarget", port=80, targets=[ self.jenkins_master_service.load_balancer_target( container_name=self.jenkins_master_task. default_container.container_name, container_port=8080, ) ], deregistration_delay=core.Duration.seconds(10)) # Opening port 5000 for master <--> worker communications self.jenkins_master_service.task_definition.default_container.add_port_mappings( ecs.PortMapping(container_port=50000, host_port=50000)) # Enable connection between Master and Worker self.jenkins_master_service.connections.allow_from( other=self.worker.worker_security_group, port_range=ec2.Port(protocol=ec2.Protocol.TCP, string_representation='Master to Worker 50000', from_port=50000, to_port=50000)) # Enable connection between Master and Worker on 8080 self.jenkins_master_service.connections.allow_from( other=self.worker.worker_security_group, port_range=ec2.Port(protocol=ec2.Protocol.TCP, string_representation='Master to Worker 8080', from_port=8080, to_port=8080)) # IAM Statements to allow jenkins ecs plugin to talk to ECS as well as the Jenkins cluster # self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement( actions=[ "ecs:RegisterTaskDefinition", "ecs:DeregisterTaskDefinition", "ecs:ListClusters", "ecs:DescribeContainerInstances", "ecs:ListTaskDefinitions", "ecs:DescribeTaskDefinition", "ecs:DescribeTasks" ], resources=["*"], )) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement(actions=["ecs:ListContainerInstances"], resources=[self.cluster.cluster.cluster_arn])) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement( actions=["ecs:RunTask"], resources=[ "arn:aws:ecs:{0}:{1}:task-definition/fargate-workers*". format( self.region, self.account, ) ])) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement(actions=["ecs:StopTask"], resources=[ "arn:aws:ecs:{0}:{1}:task/*".format( self.region, self.account) ], conditions={ "ForAnyValue:ArnEquals": { "ecs:cluster": self.cluster.cluster.cluster_arn } })) self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement(actions=["iam:PassRole"], resources=[ self.worker.worker_task_role.role_arn, self.worker.worker_execution_role.role_arn ])) # END OF JENKINS ECS PLUGIN IAM POLICIES # self.jenkins_master_task.add_to_task_role_policy( iam.PolicyStatement( actions=["*"], resources=[self.worker.worker_logs_group.log_group_arn]))
asg = autoscaling.AutoScalingGroup( stack, "DefaultAutoScalingGroup", instance_type=ec2.InstanceType("t2.micro"), machine_image=ecs.EcsOptimizedImage.amazon_linux2(), vpc=vpc, ) capacity_provider = ecs.AsgCapacityProvider(stack, "AsgCapacityProvider", auto_scaling_group=asg) cluster.add_asg_capacity_provider(capacity_provider) # Create a task definition with its own elastic network interface task_definition = ecs.Ec2TaskDefinition( stack, "nginx-awsvpc", network_mode=ecs.NetworkMode.AWS_VPC, ) web_container = task_definition.add_container( "nginx", image=ecs.ContainerImage.from_registry("nginx:latest"), cpu=100, memory_limit_mib=256, essential=True) port_mapping = ecs.PortMapping(container_port=80, protocol=ecs.Protocol.TCP) web_container.add_port_mappings(port_mapping) # Create a security group that allows HTTP traffic on port 80 for our # containers without modifying the security group on the instance security_group = ec2.SecurityGroup(stack,
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # TODO should this stuff be passed as inputs to the stack ? source_code_directory = "/opt/python" # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Cluster.html#aws_cdk.aws_ecs.Cluster.add_capacity asg_parameters = { "instance_type": ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO), "machine_image": ecs.EcsOptimizedImage.amazon_linux2(), "desired_capacity": 0, "max_capacity": 5, "min_capacity": 0, } # https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ecs/Ec2TaskDefinition.html#aws_cdk.aws_ecs.Ec2TaskDefinition.add_container container_settings = { "memory_limit_mib": 300, "logging": ecs.AwsLogDriver(stream_prefix="ecslogs"), } input_bucket_name = "cdkdemoinput" output_bucket_name = "cdkdemooutput" # Create an Docker image from an given directory, which will be later published to Amazon ECR # TODO can this be cleanup on destroy as well ? container_image = ecs.ContainerImage.from_asset( directory=source_code_directory) # Create an Amazon ECS cluster cluster = ecs.Cluster(self, "ecscluster") cluster.apply_removal_policy(cdk.RemovalPolicy.DESTROY) # Create an auto scaling group for the ECS cluster asg = cluster.add_capacity("ecsautoscalinggroup", **asg_parameters) # TODO check if needed asg.apply_removal_policy(cdk.RemovalPolicy.DESTROY) # Create a capacity provider for the ECS cluster based on the auto scaling group capacity_provider = ecs.CfnCapacityProvider( self, "ecscapacityprovider", # Name can't start with ecs... name="capacityproviderecs", auto_scaling_group_provider=ecs.CfnCapacityProvider. AutoScalingGroupProviderProperty( auto_scaling_group_arn=asg.auto_scaling_group_name, managed_scaling=ecs.CfnCapacityProvider.ManagedScalingProperty( status="ENABLED"), # TODO investigate this better managed_termination_protection="DISABLED", ), ) capacity_provider.apply_removal_policy(cdk.RemovalPolicy.DESTROY) # Currently the CDK checks if the string is FARGATE or FARGATE_SPOT and errors out # cluster.add_capacity_provider(capacity_provider.name) lame_hack = cr.AwsCustomResource( self, "lamehack", on_create={ "service": "ECS", "action": "putClusterCapacityProviders", "parameters": { "cluster": cluster.cluster_arn, "capacityProviders": [capacity_provider.name], "defaultCapacityProviderStrategy": [], }, "physical_resource_id": cr.PhysicalResourceId.of(str(int(time.time()))), }, on_delete={ "service": "ECS", "action": "putClusterCapacityProviders", "parameters": { "cluster": cluster.cluster_arn, "capacityProviders": [], "defaultCapacityProviderStrategy": [], }, }, # TODO lower this permissions policy=cr.AwsCustomResourcePolicy.from_sdk_calls( resources=cr.AwsCustomResourcePolicy.ANY_RESOURCE), ) lame_hack.node.add_dependency(capacity_provider) lame_hack.node.add_dependency(cluster) # Create an ECS task definition with our Docker image task_definition = ecs.Ec2TaskDefinition(self, "ecstaskdefinition") container_definition = task_definition.add_container( "ecscontainer", image=container_image, **container_settings) # TODO lower this permissions task_definition.task_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess")) task_definition.apply_removal_policy(cdk.RemovalPolicy.DESTROY) # Create the Amazon S3 input and output buckets input_bucket = s3.Bucket( self, "bucketinput", bucket_name=input_bucket_name, versioned=False, removal_policy=cdk.RemovalPolicy.DESTROY, auto_delete_objects=True, ) output_bucket = s3.Bucket( self, "bucketoutput", bucket_name=output_bucket_name, versioned=False, removal_policy=cdk.RemovalPolicy.DESTROY, auto_delete_objects=True, ) # Create the Amazon Lambda function for transforming the input from bucket information to the container inputs function = lambda_.Function( self, "inputlambda", code=lambda_.Code.from_inline(lambda_function_code), handler="index.lambda_handler", runtime=lambda_.Runtime.PYTHON_3_8, environment={ "CAPACITY_PROVIDER_NAME": capacity_provider.name, "CLUSTER_NAME": cluster.cluster_arn, "CONTAINER_NAME": container_definition.container_name, "REGION_NAME": self.region, # TODO flaky, why can't we pass the ARN directly ? "OUTPUT_BUCKET_NAME": output_bucket.bucket_name, "TASK_DEFINITION": task_definition.task_definition_arn, }, ) # Add an S3 object creation trigger for the function function.add_event_source( lambda_event_sources.S3EventSource( input_bucket, events=[s3.EventType.OBJECT_CREATED])) # TODO fix this for less permissions function.role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonECS_FullAccess")) function.apply_removal_policy(cdk.RemovalPolicy.DESTROY)