def register_lambda_target_group_with_cognito_auth_rule( scope: core.Construct, fn: aws_lambda.IFunction, vpc: aws_ec2.IVpc, listener: elbv2.ApplicationListener, user_pool: aws_cognito.CfnUserPool, user_pool_app_client: aws_cognito.CfnUserPoolClient, user_pool_domain: aws_cognito.CfnUserPoolDomain, path_pattern_values: [str], target_group_id: str = 'lambdatg', listener_rule_id: str = 'lambdaalblrule') -> None: """ Registers the lambda function as target group in the ALB listener and adds authentication rule before access through the provided AWS cognito user pool. The security group of the lambda function is configured to allow connections from the ALB listener on port 443. The path_pattern_values provide the paths for the forwarding rule in the listener, ie. /mylambda, /mylambda/* target_group_id and listener_rule_id can be customized if the uniqueness of the ids in the stack is violated. """ target = elvb2_targets.LambdaTarget(fn) target_group = elbv2.ApplicationTargetGroup( scope=scope, id=target_group_id, targets=[target], vpc=vpc, target_type=elbv2.TargetType.LAMBDA) # this is necessary due to a bug in the CDK - TODO: add reference to the issue target_group.node.default_child.node.add_dependency(fn) elbv2.CfnListenerRule( scope=scope, id=listener_rule_id, actions=[{ 'type': 'authenticate-cognito', 'authenticateCognitoConfig': elbv2.CfnListenerRule.AuthenticateCognitoConfigProperty( user_pool_arn=user_pool.attr_arn, user_pool_client_id=user_pool_app_client.ref, user_pool_domain=user_pool_domain.ref, scope='openid', session_timeout=3600, on_unauthenticated_request='authenticate'), 'order': 10 }, { 'type': 'forward', 'order': 20, 'targetGroupArn': target_group.target_group_arn }], conditions=[{ 'field': 'path-pattern', 'values': path_pattern_values }], listener_arn=listener.listener_arn, priority=1000) fn.connections.allow_from(listener, aws_ec2.Port.tcp(443))
def alb_tg(self, vpc, alb_tg_port=None, targets=None, adl_tg_name=None): alb_tg = elbv2.ApplicationTargetGroup(self, "alb-tg" + adl_tg_name, port=alb_tg_port, target_group_name=adl_tg_name, vpc=vpc['vpc'], targets=targets) return alb_tg
def create_target_group(self, vpc, tg_name): tg = _elbv2.ApplicationTargetGroup( self, tg_name, port=80, target_type=_elbv2.TargetType.IP, target_group_name=tg_name, vpc=vpc, health_check=_elbv2.HealthCheck(path='/login'), ) tg.enable_cookie_stickiness(core.Duration.seconds(1800)) return tg
def alb_listener(self, alb, vpc, alb_port, alb_cert=None): alb_listener = elbv2.ApplicationListener( self, "alb-listener", load_balancer=alb, certificate_arns=alb_cert, open=True, default_target_groups=[ elbv2.ApplicationTargetGroup(self, "alb-default-tg", port=alb_port, vpc=vpc['vpc']) ], port=alb_port) return alb_listener
def __create_application_target_group(self, asg: autoscaling.AutoScalingGroup, vpc: ec2.Vpc): target_group = elbv2.ApplicationTargetGroup( self, id=common.generate_id("ImagizerTargetGroup"), targets=[asg], port=variables.PUBLIC_PORT, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc, health_check=elbv2.HealthCheck(path="/health", healthy_threshold_count=2, interval=core.Duration.seconds(10))) common.add_tags(self, target_group, variables.IMAGIZER_CLUSTER_TAGS) return target_group
def register_ec2_as_alb_target( scope: core.Construct, ec2: aws_ec2.Instance, listener: elbv2.ApplicationListener, vpc: aws_ec2.IVpc, path_pattern_values: [str], port: int, protocol: elbv2.ApplicationProtocol = elbv2.ApplicationProtocol.HTTPS, listener_rule_id: str = 'ec2alblrule', target_group_id: str = 'ec2tg', ): """ Registers a given EC2 instance as an ALB listener target. The security group of the EC2 is configured to allow connections from the ALB listener on the provided port. The path_pattern_values provide the paths for the forwarding rule in the listener, ie. /myec2, /myec2/* target_group_id and listener_rule_id can be customized if the uniqueness of the ids in the stack is violated. """ target = elvb2_targets.InstanceTarget(ec2) target_group = elbv2.ApplicationTargetGroup( scope=scope, id=target_group_id, vpc=vpc, port=port, targets=[target], target_type=elbv2.TargetType.INSTANCE, protocol=protocol) elbv2.CfnListenerRule(scope=scope, id=listener_rule_id, actions=[{ 'type': 'forward', 'order': 20, 'targetGroupArn': target_group.target_group_arn }], conditions=[{ 'field': 'path-pattern', 'values': path_pattern_values }], listener_arn=listener.listener_arn, priority=2000) ec2.connections.allow_from(listener, aws_ec2.Port.tcp(port)) listener.add_target_groups(id=target_group_id + 'tg', target_groups=[target_group])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.alb = elbv2.ApplicationLoadBalancer(self, "ALB", internet_facing=True, vpc=scope.vpc) self.alb.connections.allow_from_any_ipv4(ec2.Port.tcp(80), "Internet access ALB 80") self.alb.connections.allow_from_any_ipv4(ec2.Port.tcp(443), "Internet access ALB 443") self.listener = self.alb.add_listener("ALBListener", port=80, open=True) self.https_listener = self.alb.add_listener( "HTTPSListener", port=443, certificates=[scope.certificate], open=True, ) # self.listener.add_redirect_response( # 'RedirectNonHttpsTraffic', status_code="HTTP_301", port="443" # ) self.default_target_group = elbv2.ApplicationTargetGroup( self, "DefaultTargetGroup", port=80, protocol=elbv2.ApplicationProtocol.HTTP, vpc=scope.vpc, target_type=elbv2.TargetType.IP, ) self.listener.add_target_groups( "DefaultTargetGroup", target_groups=[self.default_target_group]) self.https_listener.add_target_groups( "HTTPSDefaultTargetGroup", target_groups=[self.default_target_group], )
def __init__(self, scope: core.Construct, construct_id: str, vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self.elb_security_group = ec2.SecurityGroup( self, "ELBSG", vpc=vpc, allow_all_outbound=True, description="Group for the ELB") self.elb_security_group.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(80), description="Allow HTTP access") self.elb_security_group.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(443), description="Allow HTTPS access") lb = elbv2.ApplicationLoadBalancer( self, "ELB", vpc=vpc, internet_facing=True, vpc_subnets=ec2.SubnetSelection(subnets=vpc.public_subnets)) health_check = elbv2.HealthCheck(enabled=True, healthy_http_codes="200", healthy_threshold_count=3, interval=core.Duration.seconds(15), path="/pi", timeout=core.Duration.seconds(10), unhealthy_threshold_count=3) blue_target_group = elbv2.ApplicationTargetGroup( self, "BlueTargetGroup", port=8224, protocol=elbv2.ApplicationProtocol.HTTP, stickiness_cookie_duration=core.Duration.days(30), health_check=health_check, target_type=elbv2.TargetType.IP, vpc=vpc)
def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, instances: list, certificate_arn: str, **kwargs): super().__init__(scope, id, **kwargs) health_check = elbv2.HealthCheck(path="/", healthy_http_codes="200-399") public_target_group = elbv2.ApplicationTargetGroup( self, "PublicTG", port=8080, vpc=vpc, health_check=health_check) for instance in instances: public_target_group.add_target( elbv2.InstanceTarget(instance.instance_id, port=8080)) self._public_security_group = ec2.SecurityGroup(self, "PublicLBSG", vpc=vpc) self._public_security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443)) self._public_lb = elbv2.ApplicationLoadBalancer( self, "PublicLB", vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnets=vpc.select_subnets( subnet_type=ec2.SubnetType.PUBLIC).subnets), internet_facing=True, security_group=self._public_security_group) self._public_lb.add_listener( "PublicLBListener", certificates=[elbv2.ListenerCertificate(certificate_arn)], port=443, default_target_groups=[public_target_group]) core.CfnOutput(self, "CloudIDE URL", value="https://{}".format( self._public_lb.load_balancer_dns_name))
def configure_load_balancers(self, vpc: ec2.Vpc, publoadbal: elbv2.ApplicationLoadBalancer): tgroups = {} hc = elbv2.HealthCheck() hc['intervalSecs'] = 10 hc['protocol'] = elbv2.ApplicationProtocol.Http hc['healthyThresholdCount'] = 10 hc['unhealthyThresholdCount'] = 10 hc['timeoutSeconds'] = 5 hc['path'] = '/' targetgroups = [ {'name': 'grafana', 'httpcode': '302', 'port': 3000}, {'name': 'prometheus', 'httpcode': '405', 'port': 9090}, {'name': 'colorgateway', 'httpcode': '200', 'port': 9080}] for tgs in targetgroups: tgname = tgs['name'] code = tgs['httpcode'] port = tgs['port'] hc['healthyHttpCodes'] = code atg = elbv2.ApplicationTargetGroup(self, id=tgname + 'TargetGroup', protocol=elbv2.ApplicationProtocol.Http, port=port, deregistration_delay_sec=30, vpc=vpc, target_group_name='appmeshdemo-' + tgname + '-1', health_check=hc, target_type=elbv2.TargetType.Ip) lbl = elbv2.ApplicationListener(self, tgname + 'LoadBalancerListener', port=port, protocol=elbv2.ApplicationProtocol.Http, default_target_groups=[atg], load_balancer=publoadbal) elbv2.ApplicationListenerRule(self, tgname + 'LoadBalancerRule', listener=lbl, target_groups=[atg], priority=1, path_pattern='*') tgroups[tgname] = atg return tgroups
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here # Create a VPC myvpc = ec2.Vpc(self, "CDKVPC", cidr=vars.cidr) # SG for ELB creation websitefrontendSG = ec2.SecurityGroup( self, 'websitefrontendSG', vpc=myvpc, security_group_name='websitefrontendSG') websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(80)) websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(443)) # Create ALB in VPC alb = elb.ApplicationLoadBalancer( self, 'websitefrontend-public', vpc=myvpc, load_balancer_name='websitefrontend-public', security_group=websitefrontendSG, internet_facing=True) # Add target group to ALB catalogtargetgroup = elb.ApplicationTargetGroup( self, 'CatalogTargetGroup', port=80, vpc=myvpc, target_type=elb.TargetType.IP) if not vars.sslcert: # Add http listener to ALB alblistenerhttp = elb.ApplicationListener( self, 'alblistenerhttp', load_balancer=alb, default_target_groups=[catalogtargetgroup], port=80) if vars.sslcert: # Add http listener to ALB alblistenerhttp = elb.ApplicationListener(self, 'alblistenerhttp', load_balancer=alb, port=80) elb.ApplicationListenerRule(self, 'httpredirectionrule', listener=alblistenerhttp, redirect_response=elb.RedirectResponse( status_code='HTTP_301', port='443', protocol='HTTPS')) # OPTIONAL - Add https listener to ALB & attach certificate alblistenerhttps = elb.ApplicationListener( self, 'alblistenerhttps', load_balancer=alb, default_target_groups=[catalogtargetgroup], port=443, certificate_arns=[vars.sslcert_arn]) # OPTIONAL - Redirect HTTP to HTTPS alblistenerhttp.add_redirect_response(id='redirectionrule', port='443', status_code='HTTP_301', protocol='HTTPS') if vars.customdomain: # OPTIONAL - Update DNS with ALB webshopxyz_zone = r53.HostedZone.from_hosted_zone_attributes( self, id='customdomain', hosted_zone_id=vars.hosted_zone_id, zone_name=vars.zone_name) webshop_root_record = r53.ARecord( self, 'ALBAliasRecord', zone=webshopxyz_zone, target=r53.RecordTarget.from_alias( alias.LoadBalancerTarget(alb))) # SG for ECS creation ECSSG = ec2.SecurityGroup(self, 'ECSSecurityGroup', vpc=myvpc, security_group_name='ECS') ECSSG.add_ingress_rule(peer=websitefrontendSG, connection=ec2.Port.tcp(80)) # SG for MySQL creation MySQLSG = ec2.SecurityGroup(self, 'DBSecurityGroup', vpc=myvpc, security_group_name='DB') MySQLSG.add_ingress_rule(peer=ECSSG, connection=ec2.Port.tcp(3306)) # Create DB subnet group subnetlist = [] for subnet in myvpc.private_subnets: subnetlist.append(subnet.subnet_id) subnetgr = rds.CfnDBSubnetGroup( self, 'democlustersubnetgroup', db_subnet_group_name='democlustersubnetgroup', db_subnet_group_description='DemoCluster', subnet_ids=subnetlist) # Create secret db passwd secret = sm.SecretStringGenerator( exclude_characters="\"'@/\\", secret_string_template='{"username": "******"}', generate_string_key='password', password_length=40) dbpass = sm.Secret(self, 'democlusterpass', secret_name='democlusterpass', generate_secret_string=secret) # Create Aurora serverless MySQL instance dbcluster = rds.CfnDBCluster( self, 'DemoCluster', engine='aurora', engine_mode='serverless', engine_version='5.6', db_cluster_identifier='DemoCluster', master_username=dbpass.secret_value_from_json( 'username').to_string(), master_user_password=dbpass.secret_value_from_json( 'password').to_string(), storage_encrypted=True, port=3306, vpc_security_group_ids=[MySQLSG.security_group_id], scaling_configuration=rds.CfnDBCluster. ScalingConfigurationProperty(auto_pause=True, max_capacity=4, min_capacity=1, seconds_until_auto_pause=300), db_subnet_group_name=subnetgr.db_subnet_group_name) dbcluster.add_override('DependsOn', 'democlustersubnetgroup') # Attach database to secret attach = sm.CfnSecretTargetAttachment( self, 'RDSAttachment', secret_id=dbpass.secret_arn, target_id=dbcluster.ref, target_type='AWS::RDS::DBCluster') # Upload image into ECR repo ecrdemoimage = ecra.DockerImageAsset(self, 'ecrdemoimage', directory='../', repository_name='demorepo', exclude=['cdk.out']) # Create ECS fargate cluster ecscluster = ecs.Cluster(self, "ecsCluster", vpc=myvpc) # Create task role for productsCatalogTask getsecretpolicystatement = iam.PolicyStatement(actions=[ "secretsmanager:GetResourcePolicy", "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret", "secretsmanager:ListSecretVersionIds" ], resources=[ dbpass.secret_arn ], effect=iam.Effect.ALLOW) getsecretpolicydocument = iam.PolicyDocument( statements=[getsecretpolicystatement]) taskrole = iam.Role( self, 'TaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name='TaskRoleforproductsCatalogTask', inline_policies=[getsecretpolicydocument]) # Create task definition taskdefinition = ecs.FargateTaskDefinition(self, 'productsCatalogTask', cpu=1024, memory_limit_mib=2048, task_role=taskrole) # Add container to task definition productscatalogcontainer = taskdefinition.add_container( 'productscatalogcontainer', image=ecs.ContainerImage.from_docker_image_asset( asset=ecrdemoimage), environment={ "region": vars.region, "secretname": "democlusterpass" }) productscatalogcontainer.add_port_mappings( ecs.PortMapping(container_port=80, host_port=80)) # Create service and associate it with the cluster catalogservice = ecs.FargateService( self, 'catalogservice', task_definition=taskdefinition, assign_public_ip=False, security_group=ECSSG, vpc_subnets=ec2.SubnetSelection(subnets=myvpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE).subnets), cluster=ecscluster, desired_count=2) # Add autoscaling to the service scaling = catalogservice.auto_scale_task_count(max_capacity=20, min_capacity=1) scaling.scale_on_cpu_utilization( 'ScaleOnCPU', target_utilization_percent=70, scale_in_cooldown=core.Duration.seconds(amount=1), scale_out_cooldown=core.Duration.seconds(amount=0)) # Associate the fargate service with load balancer targetgroup catalogservice.attach_to_application_target_group(catalogtargetgroup)
targets=[service_varsleuth], health_check=health_check, protocol=elbv2.ApplicationProtocol.HTTP) listener_spliceai.add_targets( "spliceai_target", target_group_name='Proj-VONC-VISTA-SpliceAI-Blue', port=80, targets=[service_spliceai], health_check=health_check, protocol=elbv2.ApplicationProtocol.HTTP) voncweb_target_green = elbv2.ApplicationTargetGroup( stack, "voncweb_target_green", target_group_name='Proj-VONC-VISTA-VoncWeb-Green', port=443, target_type=elbv2.TargetType.IP, health_check=health_check, vpc=vpc) listener_voncweb_green.add_target_groups("voncweb_green", target_groups=[voncweb_target_green]) dispatcher_target_green = elbv2.ApplicationTargetGroup( stack, "dispatcher_target_green", target_group_name='Proj-VONC-VISTA-Dispatcher-Green', port=3000, protocol=elbv2.ApplicationProtocol.HTTP, target_type=elbv2.TargetType.IP, health_check=dispatcher_health_check, vpc=vpc)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) #arn = elbv2.ApplicationListener.from_application_listener_attributes(self, "test", listener_arn=) # importing security Group from exesting resources mysg = ec2.SecurityGroup.from_security_group_id( self, "sg", security_group_id=data['security_grp']) # importting Vpc from exesting resources vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id=data['vpc_id']) # creating loadbalancer woth exesting resources lb = elbv2.ApplicationLoadBalancer( self, "LB", vpc=vpc, security_group=mysg, internet_facing=True, load_balancer_name="myloadbalancer", vpc_subnets=ec2.SubnetSelection( availability_zones=["ap-south-1a", "ap-south-1b"], one_per_az=True)) # creating Target Group1 mytarget_group = elbv2.ApplicationTargetGroup( self, "targetGroup", target_group_name="mytarget-group", protocol=elbv2.ApplicationProtocol.HTTP, target_type=elbv2.TargetType.INSTANCE, port=80, vpc=vpc, health_check=elbv2.HealthCheck(enabled=True, healthy_http_codes="200", path="/", port="80")) # creating target group 2 mytarget_group2 = elbv2.ApplicationTargetGroup( self, "targetGroup2", target_group_name="mytarget-group2", protocol=elbv2.ApplicationProtocol.HTTP, target_type=elbv2.TargetType.INSTANCE, port=80, vpc=vpc, health_check=elbv2.HealthCheck(enabled=True, healthy_http_codes="200", path="/home", port="80")) # adding a loadbalancer default listener listener = lb.add_listener("listener", port=80, default_target_groups=[mytarget_group]) # adding loadbalancer listener Rule if version == "versionone": slect_target = mytarget_group else: slect_target = mytarget_group2 listenerRule = elbv2.ApplicationListenerRule( self, "listenerRule", listener=listener, priority=1, path_pattern="/home", target_groups=[slect_target]) # Output the DNS name of loadbalancer output_1 = core.CfnOutput(self, "mybucketoutput1", value=lb.load_balancer_dns_name, export_name="mybucketoutput1") core.Tag.add(lb, "Name", "naresh")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here EcsStack.readConfig(0) vpc = ec.Vpc( self, "Main", cidr="11.0.0.0/26", max_azs=2, nat_gateways=1, subnet_configuration=[ ec.SubnetConfiguration(name="public", cidr_mask=28, subnet_type=ec.SubnetType.PUBLIC), ec.SubnetConfiguration(name="private", cidr_mask=28, subnet_type=ec.SubnetType.PRIVATE) ]) cluster = ecs.Cluster(self, "TestingCluster", vpc=vpc) # defining the task iam role taskRole = iam.Role( self, id="taskRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal(service='ecs-tasks.amazonaws.com'), iam.ServicePrincipal(service='ec2.amazonaws.com')), role_name="webmaintaskRole", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonRDSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSQSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3FullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonDynamoDBFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonRedshiftFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonKinesisFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonECSTaskExecutionRolePolicy"), iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSNSFullAccess"), iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AWSLambdaRole"), iam.ManagedPolicy(self, id="ManagedPolicy", managed_policy_name="Grant_dev", statements=[ iam.PolicyStatement(actions=[ "kms:Decrypt", "secretemanager:GetSecreteValue" ], resources=["*"]) ]) ]) # taskRole.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonRDSFullAccess")) # WebApp Main task Defenition & Service webmain_task_definition = ecs.FargateTaskDefinition( self, "WebAppMain", memory_limit_mib=512, cpu=256, task_role=taskRole, execution_role=taskRole) webmain_container = webmain_task_definition.add_container( "webapp-mainContainer", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"), environment=EcsStack.commands, docker_labels={ "com.datadoghq.ad.instances": "[{\"host\": \"%%host%%\", \"port\": 80}]", "com.datadoghq.ad.check_names": "[\"ecs_fargate\"]", "com.datadoghq.ad.init_configs": "[{}]" }, logging=ecs.LogDriver.aws_logs(stream_prefix="awslogs")) # Clearing the environment vairables from the commands(Map) EcsStack.commands.clear() EcsStack.readConfig(1) webmain_datadog_container = webmain_task_definition.add_container( "webapp-main_datadog_Container", image=ecs.ContainerImage.from_registry("amazon/amazon-ecs-sample"), environment=EcsStack.commands) webmain_port_mapping = ecs.PortMapping(container_port=80, host_port=80, protocol=ecs.Protocol.TCP) datadog_port_mapping1 = ecs.PortMapping(container_port=8126, host_port=8126, protocol=ecs.Protocol.TCP) datadog_port_mapping2 = ecs.PortMapping(container_port=8125, host_port=8125, protocol=ecs.Protocol.TCP) webmain_container.add_port_mappings(webmain_port_mapping) webmain_datadog_container.add_port_mappings(datadog_port_mapping1) webmain_datadog_container.add_port_mappings(datadog_port_mapping2) # Security group for service webmain_sg = ec.SecurityGroup(self, "webmain_sg", vpc=vpc, allow_all_outbound=True, security_group_name="WebAppMain") webmain_sg.add_ingress_rule(peer=Peer.ipv4("202.65.133.194/32"), connection=Port.tcp(5432)) webmain_service = ecs.FargateService( self, "webapp-main", cluster=cluster, task_definition=webmain_task_definition, desired_count=1, security_group=webmain_sg) # defining the load balancer webmain_lb = elbv2.ApplicationLoadBalancer( self, "LB", vpc=vpc, internet_facing=True, load_balancer_name="WebAppMain", # security_group= vpc_subnets=ec.SubnetSelection(subnet_type=ec.SubnetType.PUBLIC)) webmain_target_grp = elbv2.ApplicationTargetGroup( self, id="webapp-main-target", port=80, protocol=elbv2.ApplicationProtocol.HTTP, health_check=elbv2.HealthCheck(healthy_http_codes="200-399", healthy_threshold_count=2, unhealthy_threshold_count=2, port="traffic-port", protocol=elbv2.Protocol.HTTP, timeout=core.Duration.seconds(6), interval=core.Duration.seconds(10)), targets=[webmain_service], target_group_name="WebAppMain", target_type=elbv2.TargetType.IP, vpc=vpc) listener = webmain_lb.add_listener( "webMain_Listener", port=443, open=True, default_target_groups=[webmain_target_grp], certificate_arns=[ "arn:aws:acm:us-west-2:384853870836:certificate/182c0fdd-813f-4bd3-aee1-0b4543cfb52b" ]) listener2 = webmain_lb.add_listener( "webMain_Listener2", port=80, # default_target_groups=[webmain_target_grp] ) # elbv2.ApplicationListenerCertificate(self,"WebAppMAin_Certificate",listener=listener,certificate_arns=["arn:aws:acm:us-west-2:384853870836:certificate/182c0fdd-813f-4bd3-aee1-0b4543cfb52b"]) listener2.add_redirect_response(id="HttptoHttps", status_code="HTTP_301", port="443", protocol="HTTPS")
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Set up VPC vpc = ec2.Vpc(self, 'CDKVPC', cidr=config['cidr']) # SG for ELB webSG = ec2.SecurityGroup(self, 'webSG', vpc=vpc, security_group_name='WebSG') webSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(80)) webSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(443)) # Create ALB alb = elb.ApplicationLoadBalancer(self, 'webALB-public', vpc=vpc, load_balancer_name='webALB-public', security_group=webSG, internet_facing=True) # Add listener at port 80 with default action alblistener = alb.add_listener( 'webALB-Listener', port=80, open=True, default_action=elb.ListenerAction.fixed_response( status_code=200, content_type='text/plain', message_body='default action')) # SG for ECS Fargate fargateSG = ec2.SecurityGroup(self, 'fargateSG', vpc=vpc, security_group_name='FargateSG') fargateSG.add_ingress_rule(peer=webSG, connection=ec2.Port.tcp(80)) fargateSG.add_ingress_rule(peer=fargateSG, connection=ec2.Port.tcp(80)) # Create fargate cluster fargate_cluster = ecs.Cluster( self, 'FargateCluster', vpc=vpc, cluster_name='FargateCluster', ) # Set up service discovery namespace = sd.PrivateDnsNamespace( self, 'PrivateDNSNamespace', name=config['service_discovery_namespace'], vpc=vpc) # Create fargate resources for each microservice for indx, s in enumerate(config['services']): # Create task definition and add the container from the repo task_definition = ecs.FargateTaskDefinition( self, 'ServiceTaskDefinition' + str(indx), cpu=1024, memory_limit_mib=2048) cont = task_definition.add_container( 'ServiceContainer' + str(indx), image=ecs.ContainerImage.from_registry(s['repo']), environment={"REGION": config['region']}) cont.add_port_mappings( ecs.PortMapping(container_port=80, host_port=80)) # Create service in private subnets service = ecs.FargateService( self, 'ServiceFargateService' + str(indx), task_definition=task_definition, assign_public_ip=False, security_group=fargateSG, vpc_subnets=ec2.SubnetSelection(subnets=vpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE).subnets), cluster=fargate_cluster, desired_count=s['num_tasks']) service.enable_cloud_map(cloud_map_namespace=namespace, dns_record_type=sd.DnsRecordType.SRV, name=s['service_discovery_service_name']) # Set up ALB target group and set Fargate service as target target_group = elb.ApplicationTargetGroup( self, 'ServiceTargetGroup' + str(indx), port=80, vpc=vpc, target_type=elb.TargetType.IP, target_group_name=s['service_name'] + 'TargetGroup', targets=[ service.load_balancer_target( container_name='ServiceContainer' + str(indx), container_port=80) ]) # Add the path pattern rule for the listener alblistenerrule = elb.ApplicationListenerRule( self, 'ListenerRule' + str(indx), path_pattern=s['alb_routing_path'], priority=indx + 1, listener=alblistener, target_groups=[target_group])
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = ec2.Vpc( self, "MyVpc", max_azs=2 ) cluster = ecs.Cluster( self, "EC2Cluster", vpc=vpc ) security_group = ec2.SecurityGroup( self, "SecurityGroup", vpc=vpc, allow_all_outbound=True, ) security_group.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.all_tcp(), description="Allow all traffic" ) app_target_group = elbv2.ApplicationTargetGroup( self, "AppTargetGroup", port=http_port, vpc=vpc, target_type=elbv2.TargetType.IP, ) elastic_loadbalancer = elbv2.ApplicationLoadBalancer( self, "ALB", vpc=vpc, internet_facing=True, security_group=security_group, ) app_listener = elbv2.ApplicationListener( self, "AppListener", load_balancer=elastic_loadbalancer, port=http_port, default_target_groups=[app_target_group], ) task_definition = ecs.TaskDefinition( self, "TaskDefenition", compatibility=ecs.Compatibility.FARGATE, cpu=task_def_cpu, memory_mib=task_def_memory_mb, ) container_defenition = ecs.ContainerDefinition( self, "ContainerDefenition", image=ecs.ContainerImage.from_registry("vulnerables/web-dvwa"), task_definition=task_definition, logging=ecs.AwsLogDriver( stream_prefix="DemoContainerLogs", log_retention=logs.RetentionDays.ONE_DAY, ), ) container_defenition.add_port_mappings( ecs.PortMapping( container_port=http_port, ) ) fargate_service = ecs.FargateService( self, "FargateService", task_definition=task_definition, cluster=cluster, security_group=security_group, ) fargate_service.attach_to_application_target_group( target_group=app_target_group, ) core.CfnOutput( self, "LoadBalancerDNS", value=elastic_loadbalancer.load_balancer_dns_name )
def __init__(self, app: cdk.App, id: str, vpc: ec2.Vpc, servicedomain: str, **kwargs) -> None: super().__init__(app, id) cluster = ecs.Cluster(self, id, vpc=vpc) cluster.add_default_cloud_map_namespace( name=servicedomain, type=ecs.NamespaceType.PrivateDns) self._cluster = cluster ecssg = ec2.SecurityGroup(self, 'ECSServiceSecurityGroup', vpc=vpc) ecssg.add_ingress_rule(peer=ec2.CidrIPv4(vpc.vpc_cidr_block), connection=ec2.TcpAllPorts()) self._clustersg = ecssg # Bastion host stuff ------------------------------------------------------------------------------------- # BastionInstanceRole pd = pu.PolicyUtils.createpolicyfromfile( './appmeshdemo/policydocs/appmesh.json') bir = iam.Role( self, 'BastionInstanceRole', assumed_by=iam.ServicePrincipal('ec2'), inline_policies={'appmesh': pd}, managed_policy_arns=[ 'arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM' ]) bip = iam.CfnInstanceProfile(self, 'BastionInstanceProfile', roles=[bir.role_name]) # Bastion EC2 instance bsg = ec2.SecurityGroup(self, 'BastionSG', vpc=vpc) bsg.add_ingress_rule(peer=ec2.AnyIPv4(), connection=ec2.TcpAllPorts()) ni = ec2.CfnNetworkInterfaceProps() ni['associatePublicIpAddress'] = True ni['deviceIndex'] = '0' ni['groupSet'] = [bsg.security_group_name] ni['subnetId'] = vpc.public_subnets[0].subnet_id bhi = ec2.CfnInstance( self, 'BastionInstance', instance_type='t2.micro', iam_instance_profile=bip.instance_profile_name, image_id=ec2.AmazonLinuxImage().get_image(self).image_id, network_interfaces=[ni]) # Load-Balancer stuff ------------------------------------------------------------------------------------ plbsg = ec2.SecurityGroup(self, 'PublicLoadBalancerSG', vpc=vpc) plbsg.add_ingress_rule(peer=ec2.AnyIPv4(), connection=ec2.TcpPortRange(0, 65535)) plb = elbv2.ApplicationLoadBalancer(self, 'PublicLoadBalancer', internet_facing=True, load_balancer_name='appmeshdemo', security_group=plbsg, vpc=vpc, idle_timeout_secs=30) self._publoadbal = plb healthchk = elbv2.HealthCheck() healthchk['intervalSecs'] = 6 healthchk['healthyThresholdCount'] = 2 healthchk['unhealthyThresholdCount'] = 2 dtg = elbv2.ApplicationTargetGroup( self, 'DummyTargetGroupPublic', vpc=vpc, port=80, protocol=elbv2.ApplicationProtocol.Http, health_check=healthchk, target_group_name='appmeshdemo-drop-1') plbl = elbv2.ApplicationListener( self, 'PublicLoadBalancerListener', load_balancer=plb, port=80, protocol=elbv2.ApplicationProtocol.Http, default_target_groups=[dtg]) cdk.CfnOutput(self, id='External URL', value='http://' + plb.load_balancer_dns_name)
def __init__(self, parent, name, **kwargs): super().__init__(parent, name, **kwargs) vpc = ec2.Vpc(self, 'GreetingVpc', max_azs=2) # create an ECS cluster cluster = ecs.Cluster(self, "Cluster", vpc=vpc) # add capacity to id cluster.add_capacity('greeter-capacity', instance_type=ec2.InstanceType('t2.micro'), min_capacity=3, max_capacity=3 ) # Name service name_task_definition = ecs.Ec2TaskDefinition(self, "name-task-definition") name_container = name_task_definition.add_container( 'name', image=ecs.ContainerImage.from_registry('nathanpeck/name'), memory_limit_mib=128 ) name_container.add_port_mappings(ecs.PortMapping( container_port=3000 )) name_service = ecs.Ec2Service(self, "name-service", cluster=cluster, desired_count=2, task_definition=name_task_definition ) # Greeting service greeting_task_definition = ecs.Ec2TaskDefinition(self, "greeting-task-definition") greeting_container = greeting_task_definition.add_container( 'greeting', image=ecs.ContainerImage.from_registry('nathanpeck/greeting'), memory_limit_mib=128 ) greeting_container.add_port_mappings(ecs.PortMapping( container_port=3000 )) greeting_service = ecs.Ec2Service(self, "greeting-service", cluster=cluster, desired_count=1, task_definition=greeting_task_definition ) internal_lb = elbv2.ApplicationLoadBalancer(self, "internal", vpc=vpc, internet_facing=False ) # Internal load balancer for the backend services internal_listener = internal_lb.add_listener('PublicListener', port=80, open=True ) internal_listener.add_target_groups('default', target_groups=[elbv2.ApplicationTargetGroup( self, 'default', vpc=vpc, protocol=elbv2.ApplicationProtocol.HTTP, port=80 )] ) internal_listener.add_targets('name', port=80, path_pattern='/name*', priority=1, targets=[name_service] ) internal_listener.add_targets('greeting', port=80, path_pattern='/greeting*', priority=2, targets=[greeting_service] ) # Greeter service greeter_task_definition = ecs.Ec2TaskDefinition(self, "greeter-task-definition") greeter_container = greeter_task_definition.add_container( 'greeter', image=ecs.ContainerImage.from_registry('nathanpeck/greeter'), memory_limit_mib=128, environment={ "GREETING_URL": 'http://' + internal_lb.load_balancer_dns_name + '/greeting', "NAME_URL": 'http://' + internal_lb.load_balancer_dns_name + '/name' } ) greeter_container.add_port_mappings(ecs.PortMapping( container_port=3000 )) greeter_service = ecs.Ec2Service(self, "greeter-service", cluster=cluster, desired_count=2, task_definition=greeter_task_definition ) # Internet facing load balancer fo the frontend services external_lb = elbv2.ApplicationLoadBalancer(self, 'external', vpc=vpc, internet_facing=True ) external_listener = external_lb.add_listener('PublicListener', port=80, open=True ) external_listener.add_targets('greeter', port=80, targets=[greeter_service] ) # output dns addresses self.internal_dns = core.CfnOutput(self, 'InternalDNS', export_name='greeter-app-internal', value=internal_lb.load_balancer_dns_name ) self.external_dns = core.CfnOutput(self, 'ExternalDNS', export_name='ExternalDNS', value=external_lb.load_balancer_dns_name )
certificate_arns=[certificate_arn_sema4]) health_check = elbv2.HealthCheck(interval=core.Duration.seconds(60), path="/", timeout=core.Duration.seconds(5)) listener_vista.add_targets("vistaweb_target", target_group_name='Vista-temp-blue', port=80, targets=[service_vistaweb], health_check=health_check, protocol=elbv2.ApplicationProtocol.HTTP) vistaweb_target_green = elbv2.ApplicationTargetGroup( stack, "vistaweb_target_green", port=80, target_group_name='Vista-temp-Green', protocol=elbv2.ApplicationProtocol.HTTP, target_type=elbv2.TargetType.IP, health_check=health_check, vpc=vpc) listener_vista_green.add_target_groups("vistaweb_green", target_groups=[vistaweb_target_green]) core.CfnOutput(stack, "Vista-temp-LoadBalancerDNS_vista", value=lb_vista.load_balancer_dns_name) app.synth()
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.platform_resources = ImportedResources(self, self.stack_name) group_name = self.node.try_get_context("group") group_name = "OctankSupport" if not group_name else group_name # ============================================================================= # Application Loadbalancer configuration # ============================================================================= self.alb_security_group = aws_ec2.SecurityGroup( self, "octicketing-alb-sg-bg", vpc=self.platform_resources.vpc, allow_all_outbound=True) # Creating an application load balancer, listener and two target groups for Blue/Green deployment self.alb = elbv2.ApplicationLoadBalancer( self, "octicketing-bg", load_balancer_name='octicketing-bg', vpc=self.platform_resources.vpc, security_group=self.alb_security_group, internet_facing=True) self.albProdListener = self.alb.add_listener('albProdListener', port=80) self.albTestListener = self.alb.add_listener('albTestListener', port=8080) self.albProdListener.connections.allow_default_port_from_any_ipv4( 'Allow traffic from everywhere on port 80') self.albTestListener.connections.allow_default_port_from_any_ipv4( 'Allow traffic from everywhere on port 8080') # Target group 1 self.blueGroup = elbv2.ApplicationTargetGroup( self, "blueGroup", vpc=self.platform_resources.vpc, protocol=elbv2.ApplicationProtocol.HTTP, port=8080, target_type=elbv2.TargetType.IP, health_check={ "path": "/", "timeout": core.Duration.seconds(10), "interval": core.Duration.seconds(15), "healthy_http_codes": "200,404" }) # Target group 2 self.greenGroup = elbv2.ApplicationTargetGroup( self, "greenGroup", vpc=self.platform_resources.vpc, protocol=elbv2.ApplicationProtocol.HTTP, port=8080, target_type=elbv2.TargetType.IP, health_check={ "path": "/", "timeout": core.Duration.seconds(10), "interval": core.Duration.seconds(15), "healthy_http_codes": "200,404" }) # Registering the blue target group with the production listener of load balancer self.albProdListener.add_target_groups( "blueTarget", # priority=1, path_patterns=["/"], target_groups=[self.blueGroup]) # Registering the green target group with the test listener of load balancer self.albTestListener.add_target_groups( "greenTarget", # priority=1, path_patterns=["/"], target_groups=[self.greenGroup]) core.CfnOutput(self, 'ALBARN', value=self.alb.load_balancer_arn, export_name=group_name + 'ALBarn') core.CfnOutput(self, 'ALBName', value=self.alb.load_balancer_name, export_name=group_name + 'ALBName') core.CfnOutput(self, 'ALBFullName', value=self.alb.load_balancer_full_name, export_name=group_name + 'ALBFullName') core.CfnOutput(self, 'ALBSGID', value=core.Fn.select( 0, self.alb.load_balancer_security_groups), export_name=group_name + 'ALBSgId') core.CfnOutput(self, 'BlueTgARN', value=self.blueGroup.target_group_arn, export_name=group_name + 'BlueTgARN') core.CfnOutput(self, 'GreenTgARN', value=self.greenGroup.target_group_arn, export_name=group_name + 'GreenTgARN') core.CfnOutput(self, 'BlueTgName', value=self.blueGroup.target_group_name, export_name=group_name + 'BlueTgName') core.CfnOutput(self, 'GreenTgName', value=self.greenGroup.target_group_name, export_name=group_name + 'GreenTgName') core.CfnOutput(self, 'BlueTgFullName', value=self.greenGroup.target_group_full_name, export_name=group_name + 'BlueTgFullName') core.CfnOutput(self, 'GreenTgFullName', value=self.greenGroup.target_group_full_name, export_name=group_name + 'GreenTgFullName') core.CfnOutput(self, 'ProdListenerARN', value=self.albProdListener.listener_arn, export_name=group_name + 'ProdListenerARN') core.CfnOutput(self, 'TestListenerARN', value=self.albTestListener.listener_arn, export_name=group_name + 'TestListenerARN')
def create_jupyter_service(self): jupyter_task_definition = ecs.FargateTaskDefinition( self, id="jupyterTaskDefinition", cpu=1024, memory_limit_mib=2048) container = jupyter_task_definition.add_container( id="jupyterContainer", cpu=1024, memory_limit_mib=2048, essential=True, image=ecs.ContainerImage.from_registry(Config.JUPYTER_IMG.value), command=[ "jupyter", "notebook", f"--NotebookApp.token={Config.JUPYTER_SECRET.value}", "--ip", "0.0.0.0", "--no-browser", "--allow-root", ], logging=ecs.LogDriver.aws_logs( stream_prefix="jupyter-", log_retention=logs.RetentionDays.ONE_DAY), ) container.add_port_mappings( ecs.PortMapping(container_port=8888, host_port=8888, protocol=ecs.Protocol.TCP)) self.jupyter_service = ecs.FargateService( self, id="jupyter", cluster=self.cluster, desired_count=1, service_name="jupyter", task_definition=jupyter_task_definition, health_check_grace_period=core.Duration.seconds(120), assign_public_ip=True, ) self.jupyter_service.enable_cloud_map( dns_record_type=servicediscovery.DnsRecordType.A, name="jupyter") healthcheck = elb.HealthCheck( interval=core.Duration.seconds(60), path="/", timeout=core.Duration.seconds(40), port="8888", healthy_http_codes="200-399", ) jatg = elb.ApplicationTargetGroup( self, id="jupyterTargetGroup", port=8888, vpc=self.vpc, protocol=elb.ApplicationProtocol.HTTP, targets=[self.jupyter_service], health_check=healthcheck, ) listener = self.elb.add_listener("jupyterPublicListener", port=80, open=True) listener.add_target_groups(id="jupyterTargetGroups", target_groups=[jatg])
def create_scheduler_service(self): scheduler_task_definition = ecs.FargateTaskDefinition( self, id="schedulerTaskDefinition", cpu=2048, memory_limit_mib=4096) container = scheduler_task_definition.add_container( id="schedulerContainer", cpu=2048, memory_limit_mib=4096, essential=True, image=ecs.ContainerImage.from_registry(Config.DASK_IMG.value), command=["dask-scheduler", "--interface", "eth0"], logging=ecs.LogDriver.aws_logs( stream_prefix="scheduler-", log_retention=logs.RetentionDays.ONE_DAY), ) container.add_port_mappings( ecs.PortMapping(container_port=8787, host_port=8787, protocol=ecs.Protocol.TCP), ecs.PortMapping(container_port=8786, host_port=8786, protocol=ecs.Protocol.TCP), ecs.PortMapping(container_port=9000, host_port=9000, protocol=ecs.Protocol.TCP), ) self.scheduler_service = ecs.FargateService( self, id="scheduler", cluster=self.cluster, desired_count=1, service_name="scheduler", task_definition=scheduler_task_definition, ) self.scheduler_service.enable_cloud_map( dns_record_type=servicediscovery.DnsRecordType.A, name="scheduler") healthcheck = elb.HealthCheck( interval=core.Duration.seconds(60), path="/status", timeout=core.Duration.seconds(40), port="8787", healthy_http_codes="200-399", ) satg = elb.ApplicationTargetGroup( self, id="schedulerTargetGroup", port=8787, vpc=self.vpc, protocol=elb.ApplicationProtocol.HTTP, targets=[self.scheduler_service], health_check=healthcheck, ) listener = self.elb.add_listener( "schedulerPublicListener", port=8787, open=True, protocol=elb.ApplicationProtocol.HTTP, ) listener.add_target_groups(id="schedulerTargetgroups", target_groups=[satg])
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) pearson_vpn_connection = ec2.Peer.ipv4('159.182.0.0/16') # Props Setup stage = scope.node.try_get_context('stage') my_service_name = scope.node.try_get_context('serviceName') api_health_path = props['apiHealthPath'] tca_health_path = props['tcaHealthPath'] # Setup IAM user for logs vpc_flow_role = iam.Role( self, 'FlowLog', assumed_by=iam.ServicePrincipal('vpc-flow-logs.amazonaws.com')) vpc_flow_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ 'iam:PassRole', 'logs:CreateLogGroup', 'logs:DescribeLogGroups', 'logs:CreateLogStream', 'logs:PutLogEvents' ], resources=["*"])) # Create Cloudwatch log group log_group = logs.LogGroup(self, 'LogGroup', log_group_name="{0}-{1}".format( my_service_name, stage), retention=logs.RetentionDays('ONE_YEAR'), removal_policy=core.RemovalPolicy('DESTROY')) # Setup VPC resource vpc = ec2.Vpc(self, '{0}-{1}-vpc'.format(my_service_name, stage), cidr=props['cidr'], max_azs=props['vpcAzCount']) # Setup VPC flow logs vpc_log = ec2.CfnFlowLog( self, 'FlowLogs', resource_id=vpc.vpc_id, resource_type='VPC', traffic_type='ALL', deliver_logs_permission_arn=vpc_flow_role.role_arn, log_destination_type='cloud-watch-logs', log_group_name="{0}-{1}".format(log_group.log_group_name, stage)) # Setup Security Group in VPC vpc_sg = ec2.SecurityGroup(self, 'EcSSG', vpc=vpc, allow_all_outbound=None, description="Security Group for Oculus vpc", security_group_name="{0}-{1}-vpc-sg".format( my_service_name, stage)) # Add Rules to Security Group vpc_sg.add_ingress_rule(peer=pearson_vpn_connection, connection=ec2.Port.tcp(22)) # ALB Security Group alb_sg = ec2.SecurityGroup(self, 'AlbSG', vpc=vpc, allow_all_outbound=None, description="Security group for oculus ALB", security_group_name="{0}-{1}-alb-sg".format( my_service_name, stage)) # Add HTTPS Rule to Security Group alb_sg.add_ingress_rule(peer=pearson_vpn_connection, connection=ec2.Port.tcp(443)) # Setup ALB alb = elbv2.ApplicationLoadBalancer(self, 'ALB', vpc=vpc, internet_facing=True, security_group=alb_sg) # Setup API Target Group api_tg = elbv2.ApplicationTargetGroup( self, 'ApiTargetGroup', port=8080, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup Web Target Group web_tg = elbv2.ApplicationTargetGroup( self, 'WebTargetGroup', port=3030, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup API Target Group tca_tg = elbv2.ApplicationTargetGroup( self, 'TcaTargetGroup', port=8080, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup ECS Cluster ecs_cluster = ecs.Cluster(self, 'ECSCluster', vpc=vpc, cluster_name="{0}-{1}".format( my_service_name, stage)) # ECS Execution Role - Grants ECS agent to call AWS APIs ecs_execution_role = iam.Role( self, 'ECSExecutionRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name="{0}-{1}-execution-role".format(my_service_name, stage)) # Setup Role Permissions ecs_execution_role.add_to_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer', 'elasticloadbalancing:DeregisterTargets', 'elasticloadbalancing:Describe*', 'elasticloadbalancing:RegisterInstancesWithLoadBalancer', 'elasticloadbalancing:RegisterTargets', 'ec2:Describe*', 'ec2:AuthorizeSecurityGroupIngress', 'sts:AssumeRole', 'ssm:GetParameters', 'secretsmanager:GetSecretValue', 'ecr:GetAuthorizationToken', 'ecr:BatchCheckLayerAvailability', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'logs:CreateLogStream', 'logs:PutLogEvents', "application-autoscaling:*", "cloudwatch:DescribeAlarms", "cloudwatch:PutMetricAlarm" ], resources=["*"])) # ECS Task Role - Grants containers in task permission to AWS APIs ecs_task_role = iam.Role( self, 'ECSTaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name="{0}-{1}-task-role".format(my_service_name, stage)) # Setup Role Permissions ecs_task_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ 'logs:CreateLogStream', 'logs:PutLogEvents', 'dynamodb:Query', 'dynamodb:ListTables', 'secretsmanager:GetSecretValue', 'kms:Decrypt' ], resources=["*"])) # Setup API Task Definition api_taskdef = ecs.FargateTaskDefinition( self, 'APIFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-api".format(my_service_name, stage)) # Setup Web Task Definition web_taskdef = ecs.FargateTaskDefinition( self, 'WebFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-web".format(my_service_name, stage)) # # Setup TCA Task Definition tca_taskdef = ecs.FargateTaskDefinition( self, 'TcaFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-tca".format(my_service_name, stage)) api_repo = ecr.Repository.from_repository_arn( self, 'ApiImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-api") web_repo = ecr.Repository.from_repository_arn( self, 'WebImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-web") tca_repo = ecr.Repository.from_repository_arn( self, 'TcaImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-tca-api") # Add Container API to Task api_container = api_taskdef.add_container( "oculus-cdk-{}-api".format(stage), image=ecs.EcrImage(repository=api_repo, tag="devqaurl"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-api".format( my_service_name, stage), log_group=log_group)) # Add Container Web to Task web_container = web_taskdef.add_container( "oculus-cdk-{}-web".format(stage), image=ecs.EcrImage(repository=web_repo, tag="removeMetaMockup"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-web".format( my_service_name, stage), log_group=log_group)) # # Add Container TCA to Task tca_container = tca_taskdef.add_container( "oculus-cdk-{}-tca".format(stage), image=ecs.EcrImage(repository=tca_repo, tag="ocu-1109"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-tca".format( my_service_name, stage), log_group=log_group)) # Setup API Port Mappings api_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP)) # Setup Web Port Mappings web_container.add_port_mappings( ecs.PortMapping(container_port=3030, host_port=3030, protocol=ecs.Protocol.TCP)) # # Setup TCA Port Mappings tca_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP)) # Setup API Fargate Service api_service = ecs.FargateService(self, "FargateServiceAPI", task_definition=api_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-api".format( my_service_name, stage)) api_scaling = api_service.auto_scale_task_count(max_capacity=5) api_scaling.scale_on_cpu_utilization('ApiCpuScaling', target_utilization_percent=50) # Setup Web Fargate Service web_service = ecs.FargateService(self, "FargateServiceWeb", task_definition=web_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-web".format( my_service_name, stage)) web_scaling = web_service.auto_scale_task_count(max_capacity=5) web_scaling.scale_on_cpu_utilization('WebCpuScaling', target_utilization_percent=50) # # Setup TCA Fargate Service tca_service = ecs.FargateService(self, "FargateServiceTCA", task_definition=tca_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-tca".format( my_service_name, stage)) tca_scaling = tca_service.auto_scale_task_count(max_capacity=5) tca_scaling.scale_on_cpu_utilization('TcaCpuScaling', target_utilization_percent=50) # Setup ALB Listener alb_listener = alb.add_listener( 'Listener', certificate_arns=[ "arn:aws:acm:us-east-1:829809672214:certificate/a84bb369-03ce-4e5e-9d32-8c84609cad1e" ], port=443, open=False, protocol=elbv2.ApplicationProtocol.HTTPS) # Attach ALB to ECS API Service api_target = alb_listener.add_targets( 'ECSAPI', port=8080, priority=1, targets=[api_service], health_check=elbv2.HealthCheck(path=api_health_path), path_pattern='/oculus-api/*') # # Attach ALB to ECS TCA Service tca_target = alb_listener.add_targets( 'ECSTCA', port=8080, priority=2, targets=[tca_service], health_check=elbv2.HealthCheck(path=tca_health_path), path_pattern='/tca/*') # Attach ALB to ECS Web Service web_target = alb_listener.add_targets( 'ECSWeb', port=3030, protocol=elbv2.ApplicationProtocol.HTTP, targets=[web_service], health_check=elbv2.HealthCheck(path='/'), ) core.CfnOutput(self, 'LoadBalancerDNS', value=alb.load_balancer_dns_name) zone = route53.HostedZone.from_lookup(self, 'MyHostedZone', domain_name=props['zoneDomain']) route53.ARecord( self, 'ServiceAliasRecord', record_name=props['siteDomain'], target=route53.RecordTarget( alias_target=aws_route53_targets.LoadBalancerTarget( load_balancer=alb)), zone=zone)
def __init__(self, scope: core.Construct, id: str, acmcert, vpc: aws_ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) # ============================================================================= # Nginx Application Loadbalancer configuration # ============================================================================= # Creating an application load balancer, listener and two target groups for Blue/Green deployment self.alb = elbv2.ApplicationLoadBalancer(self, "alb", vpc=vpc, internet_facing=True) self.albProdListener = self.alb.add_listener('albProdListener', port=80) self.albTestListener = self.alb.add_listener('albTestListener', port=8080) self.https_listener = self.alb.add_listener( "HTTPSListener", port=443, certificates=[elbv2.ListenerCertificate(acmcert.certificate_arn)], open=True, ) self.albProdListener.connections.allow_default_port_from_any_ipv4( 'Allow traffic from everywhere on port 80') self.albTestListener.connections.allow_default_port_from_any_ipv4( 'Allow traffic from everywhere on port 8080') self.https_listener.connections.allow_from_any_ipv4( aws_ec2.Port.tcp(443), 'Allow traffic from everywhere on port 443') # Target group 1 self.blueGroup = elbv2.ApplicationTargetGroup( self, "blueGroup", vpc=vpc, protocol=elbv2.ApplicationProtocol.HTTP, port=80, target_type=elbv2.TargetType.IP, health_check={ "path": "/", "timeout": core.Duration.seconds(10), "interval": core.Duration.seconds(15), "healthy_http_codes": "200,404" }) # Target group 2 self.greenGroup = elbv2.ApplicationTargetGroup( self, "greenGroup", vpc=vpc, protocol=elbv2.ApplicationProtocol.HTTP, port=80, target_type=elbv2.TargetType.IP, health_check={ "path": "/", "timeout": core.Duration.seconds(10), "interval": core.Duration.seconds(15), "healthy_http_codes": "200,404" }) # Registering the blue target group with the production listener of load balancer self.albProdListener.add_target_groups("blueTarget", priority=1, path_patterns=["/nginx/*"], target_groups=[self.greenGroup]) self.https_listener.add_target_groups("HTTPSDefaultTargetGroup", priority=1, path_patterns=["/nginx/*"], target_groups=[self.greenGroup]) # Registering the green target group with the test listener of load balancer self.albTestListener.add_target_groups("greenTarget", priority=1, path_patterns=["/nginx/*"], target_groups=[self.blueGroup]) # ============================================================================= # Flask Application Loadbalancer configuration # ============================================================================= # Target Group 1 self.FlaskBlueGroup = elbv2.ApplicationTargetGroup( self, "FlaskBlueGroup", vpc=vpc, protocol=elbv2.ApplicationProtocol.HTTP, port=80, target_type=elbv2.TargetType.IP, health_check={ "path": "/api/test", "timeout": core.Duration.seconds(10), "interval": core.Duration.seconds(15), "healthy_http_codes": "200,404" }) # Target Group 2 self.FlaskGreenGroup = elbv2.ApplicationTargetGroup( self, "FlaskGreenGroup", vpc=vpc, protocol=elbv2.ApplicationProtocol.HTTP, port=80, target_type=elbv2.TargetType.IP, health_check={ "path": "/api/test", "timeout": core.Duration.seconds(10), "interval": core.Duration.seconds(15), "healthy_http_codes": "200,404" }) self.albProdListener.add_target_groups( "FlaskblueTarget", target_groups=[self.FlaskGreenGroup]) self.https_listener.add_target_groups( "HTTPSDefaultTargetGroup", target_groups=[self.FlaskGreenGroup], ) # Registering the green target group with the test listener of load balancer self.albTestListener.add_target_groups( "FlaskgreenTarget", target_groups=[self.FlaskBlueGroup])
def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, instances: list, **kwargs): super().__init__(scope, id, **kwargs) health_check = elbv2.HealthCheck(path="/login") public_target_group = elbv2.ApplicationTargetGroup( self, "PublicTG", port=8080, vpc=vpc, health_check=health_check) for instance in instances: public_target_group.add_target( elbv2.InstanceTarget(instance.instance_id, port=8080)) private_target_group = elbv2.ApplicationTargetGroup( self, "PrivateTG", port=8080, vpc=vpc, health_check=health_check) for instance in instances: private_target_group.add_target( elbv2.InstanceTarget(instance.instance_id, port=8080)) self._public_security_group = ec2.SecurityGroup(self, "PublicLBSG", vpc=vpc) self._public_security_group.add_ingress_rule( ec2.Peer.any_ipv4(), ec2.Port.tcp(80), ) self._public_lb = elbv2.ApplicationLoadBalancer( self, "PublicLB", vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnets=vpc.select_subnets( subnet_type=ec2.SubnetType.PUBLIC).subnets), internet_facing=True, security_group=self._public_security_group) self._public_lb.add_listener( "PublicLBListener", port=80, default_target_groups=[public_target_group]) self._private_security_group = ec2.SecurityGroup(self, "PrivateLBSG", vpc=vpc) self._private_security_group.add_ingress_rule( ec2.Peer.any_ipv4(), ec2.Port.tcp(80), ) self._private_lb = elbv2.ApplicationLoadBalancer( self, "PrivateLB", vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnets=vpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE).subnets), internet_facing=False, security_group=self._private_security_group) self._private_lb.add_listener( "PrivateLBListener", port=80, default_target_groups=[private_target_group])
def __init__(self, scope: core.Construct, id: str, sg_id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Networking Constructs vpc = ec2.Vpc.from_lookup(self, "vpc", is_default=True) security_group = ec2.SecurityGroup.from_security_group_id( self, "sg", security_group_id=sg_id) # ECS Constructs cluster = ecs.Cluster(self, "cluster", cluster_name="FargateCluster", vpc=vpc) taskdef = ecs.TaskDefinition(self, "blue-task-definition", compatibility=ecs.Compatibility.FARGATE, family="sample", network_mode=ecs.NetworkMode.AWS_VPC, memory_mib="512", cpu="256") taskdef.add_container( "blue", image=ecs.ContainerImage.from_registry("kovvuri/training:blue"), essential=True, ).add_port_mappings( ecs.PortMapping(container_port=80, host_port=80, protocol=ecs.Protocol.TCP)) # Load Balancer Constructs alb = elb.ApplicationLoadBalancer( self, "alb", security_group=security_group, vpc_subnets=ec2.SubnetSelection(subnets=vpc.public_subnets), vpc=vpc) blue = elb.ApplicationTargetGroup(self, "blue", target_group_name="swap1", port=80, protocol=elb.Protocol.HTTP, target_type=elb.TargetType.IP, vpc=vpc) green = elb.ApplicationTargetGroup(self, "green", target_group_name="swap2", port=80, protocol=elb.Protocol.HTTP, target_type=elb.TargetType.IP, vpc=vpc) alb.add_listener("80_listener", default_target_groups=[blue], protocol=elb.Protocol.HTTP, port=80) alb.add_listener( "3000_listener", default_target_groups=[green], protocol=elb.Protocol.HTTP, port=3000, ) # ECS Service service = ecs.FargateService( self, "service", cluster=cluster, task_definition=taskdef, desired_count=1, platform_version=ecs.FargatePlatformVersion.VERSION1_3, deployment_controller=ecs.DeploymentController( type=ecs.DeploymentControllerType.CODE_DEPLOY), assign_public_ip=False, enable_ecs_managed_tags=True, propagate_task_tags_from=ecs.PropagatedTagSource.TASK_DEFINITION, security_group=security_group, vpc_subnets=ec2.SubnetSelection(subnets=vpc.private_subnets)) service.attach_to_application_target_group(target_group=blue) CustomCodeDeploy.EcsDeploymentGroup( self, "DeploymentGroup", ecs_service=service.service_name, ecs_cluster=cluster.cluster_name, production_target_group=blue.target_group_name, test_target_group=green.target_group_name, production_port=80, test_port=3000)
def __init__(self, app: core.App, id: str, **kwargs) -> None: super().__init__(app, id, **kwargs) # -- VPC vpc = ec2.Vpc(self, "vpc_airflow") # ecr ecr_repo = ecr.Repository.from_repository_name(self, "ecr_repo_airflow", "airflow") # rds sg_airflow_backend_db = ec2.SecurityGroup( self, "sg_airflow_backend_database", vpc=vpc, description="Airflow backend database", security_group_name="sg_airflow_backend_database", ) db = rds.DatabaseInstance( self, "rds_airfow_backend", master_username="******", master_user_password=core.SecretValue.plain_text("postgres"), database_name="airflow", engine=rds.DatabaseInstanceEngine.postgres( version=rds.PostgresEngineVersion.VER_11_8), vpc=vpc, instance_type=ec2.InstanceType.of( ec2.InstanceClass.BURSTABLE3, ec2.InstanceSize.MICRO, ), instance_identifier="airflow-backend", removal_policy=core.RemovalPolicy.DESTROY, deletion_protection=False, security_groups=[sg_airflow_backend_db], vpc_placement=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PUBLIC), ) # -- ElasticCache Redis sg_redis = ec2.SecurityGroup( self, "sg_redis", vpc=vpc, description="Airflow redis", security_group_name="sg_redis", ) redis_subnet_group = ec.CfnSubnetGroup( self, "airflow-redis-subnet-group", description="For Airflow Task Queue", subnet_ids=vpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE).subnet_ids, cache_subnet_group_name="airflow-redis-task-queue", ) redis = ec.CfnCacheCluster( self, "redis", cluster_name="airflow-redis", cache_node_type="cache.t2.micro", engine="redis", num_cache_nodes=1, auto_minor_version_upgrade=True, engine_version="5.0.6", port=REDIS_PORT, cache_subnet_group_name=redis_subnet_group.ref, vpc_security_group_ids=[sg_redis.security_group_id], ) # ECS cluster cluster = ecs.Cluster( self, "ecs_airflow", cluster_name="airflow", vpc=vpc, container_insights=True, ) # scheduler scheduler_task_role = iam.Role( self, "iam_role_scheduler", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), description="IAM role for ECS Scheduler service", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEC2ContainerRegistryReadOnly"), iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchLogsFullAccess"), ], role_name="airflow-ecs-scheduler-task", ) scheduler_task = ecs.FargateTaskDefinition( self, "ecs_task_scheduler", cpu=512, memory_limit_mib=2048, task_role=scheduler_task_role, ) scheduler_task.add_container( "scheduler", command=["scheduler"], # credentials should be provided from Secrets Manager environment={ "LOAD_EX": "n", "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=", "EXECUTOR": "Celery", "POSTGRES_HOST": db.db_instance_endpoint_address, "POSTGRES_USER": "******", "POSTGRES_PASSWORD": "******", "POSTGRES_DB": "airflow", "REDIS_HOST": redis.attr_redis_endpoint_address, }, image=ecs.ContainerImage.from_ecr_repository( ecr_repo, "1.10.9", ), logging=ecs.LogDriver.aws_logs( stream_prefix="scheduler", log_group=logs.LogGroup( self, "log-airflow-scheduler", log_group_name="ecs/airflow/scheduler", retention=logs.RetentionDays.ONE_WEEK, ), ), ) sg_airflow_scheduler = ec2.SecurityGroup( self, "sg_airflow_scheduler", vpc=vpc, description="Airflow Scheduler service", security_group_name="sg_airflow_scheduler", ) sg_redis.add_ingress_rule( peer=sg_airflow_scheduler, connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="from scheduler", from_port=REDIS_PORT, to_port=REDIS_PORT, ), description="from scheduler service", ) sg_airflow_backend_db.add_ingress_rule( peer=sg_airflow_scheduler, connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="from home", from_port=POSTGRES_PORT, to_port=POSTGRES_PORT, ), description="home", ) scheduler_service = ecs.FargateService( self, "ecs_service_scheduler", cluster=cluster, task_definition=scheduler_task, desired_count=1, security_groups=[sg_airflow_scheduler], service_name="scheduler", ) # flower flower_task_role = iam.Role( self, "iam_role_flower", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), description="IAM role for ECS Flower service", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEC2ContainerRegistryReadOnly"), iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchLogsFullAccess"), ], role_name="airflow-ecs-flower-task", ) flower_task = ecs.FargateTaskDefinition( self, "ecs_task_flower", cpu=512, memory_limit_mib=1024, task_role=scheduler_task_role, ) flower_task.add_container( "flower", command=["flower"], # credentials should be provided from Secrets Manager environment={ "LOAD_EX": "n", "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=", "EXECUTOR": "Celery", "REDIS_HOST": redis.attr_redis_endpoint_address, }, image=ecs.ContainerImage.from_ecr_repository( ecr_repo, "1.10.9", ), logging=ecs.LogDriver.aws_logs( stream_prefix="flower", log_group=logs.LogGroup( self, "log-airflow-flower", log_group_name="ecs/airflow/flower", retention=logs.RetentionDays.ONE_WEEK, ), ), ).add_port_mappings( ecs.PortMapping( container_port=FLOWER_PORT, host_port=FLOWER_PORT, protocol=ecs.Protocol.TCP, )) sg_airflow_flower = ec2.SecurityGroup( self, "sg_airflow_flower", vpc=vpc, description="Airflow Flower service", security_group_name="sg_airflow_flower", ) sg_airflow_flower.add_ingress_rule( peer=ec2.Peer.ipv4("115.66.217.45/32"), connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="from homr", from_port=FLOWER_PORT, to_port=FLOWER_PORT, ), description="from home", ) sg_redis.add_ingress_rule( peer=sg_airflow_flower, connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="from flower", from_port=REDIS_PORT, to_port=REDIS_PORT, ), description="from flower", ) flower_service = ecs.FargateService( self, "ecs_service_flower", cluster=cluster, task_definition=flower_task, desired_count=1, security_groups=[sg_airflow_flower], service_name="flower", ) # worker worker_task_role = iam.Role( self, "iam_role_worker", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), description="IAM role for ECS worker service", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEC2ContainerRegistryReadOnly"), iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchLogsFullAccess"), ], role_name="airflow-ecs-worker-task", ) worker_task = ecs.FargateTaskDefinition( self, "ecs_task_worker", cpu=1024, memory_limit_mib=3072, task_role=worker_task_role, ) worker_task.add_container( "worker", command=["worker"], # credentials should be provided from Secrets Manager environment={ "LOAD_EX": "n", "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=", "EXECUTOR": "Celery", "POSTGRES_HOST": db.db_instance_endpoint_address, "POSTGRES_USER": "******", "POSTGRES_PASSWORD": "******", "POSTGRES_DB": "airflow", "REDIS_HOST": redis.attr_redis_endpoint_address, }, image=ecs.ContainerImage.from_ecr_repository( ecr_repo, "1.10.9", ), logging=ecs.LogDriver.aws_logs( stream_prefix="worker", log_group=logs.LogGroup( self, "log-airflow-worker", log_group_name="ecs/airflow/worker", retention=logs.RetentionDays.ONE_WEEK, ), ), ) sg_airflow_worker = ec2.SecurityGroup( self, "sg_airflow_worker", vpc=vpc, description="Airflow worker service", security_group_name="sg_airflow_worker", ) sg_redis.add_ingress_rule( peer=sg_airflow_worker, connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="from worker", from_port=REDIS_PORT, to_port=REDIS_PORT, ), description="from worker service", ) sg_airflow_backend_db.add_ingress_rule( peer=sg_airflow_worker, connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="from worker", from_port=POSTGRES_PORT, to_port=POSTGRES_PORT, ), description="From worker", ) worker_service = ecs.FargateService( self, "ecs_service_worker", cluster=cluster, task_definition=worker_task, desired_count=1, security_groups=[sg_airflow_worker], service_name="worker", ) # web server web_server_task_role = iam.Role( self, "iam_role_web_server", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), description="IAM role for ECS web server service", managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonEC2ContainerRegistryReadOnly"), iam.ManagedPolicy.from_aws_managed_policy_name( "CloudWatchLogsFullAccess"), ], role_name="airflow-ecs-web-server-task", ) web_server_task = ecs.FargateTaskDefinition( self, "ecs_task_web_server", cpu=512, memory_limit_mib=1024, task_role=web_server_task_role, ) web_server_task.add_container( "web_server", command=["webserver"], # credentials should be provided from Secrets Manager environment={ "LOAD_EX": "n", "FERNET_KEY": "46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=", "EXECUTOR": "Celery", "POSTGRES_HOST": db.db_instance_endpoint_address, "POSTGRES_USER": "******", "POSTGRES_PASSWORD": "******", "POSTGRES_DB": "airflow", "REDIS_HOST": redis.attr_redis_endpoint_address, }, image=ecs.ContainerImage.from_ecr_repository( ecr_repo, "1.10.9", ), logging=ecs.LogDriver.aws_logs( stream_prefix="web_server", log_group=logs.LogGroup( self, "log-airflow-web-server", log_group_name="ecs/airflow/web-server", retention=logs.RetentionDays.ONE_WEEK, ), ), ).add_port_mappings( ecs.PortMapping( container_port=WEB_SERVER_PORT, host_port=WEB_SERVER_PORT, protocol=ecs.Protocol.TCP, )) sg_airflow_web_server = ec2.SecurityGroup( self, "sg_airflow_web_server", vpc=vpc, description="Airflow web server service", security_group_name="sg_airflow_web_server", ) sg_airflow_backend_db.add_ingress_rule( peer=sg_airflow_web_server, connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="From web server", from_port=POSTGRES_PORT, to_port=POSTGRES_PORT, ), description="From web server", ) sg_airflow_backend_db.add_ingress_rule( peer=sg_airflow_web_server, connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="From web server", from_port=POSTGRES_PORT, to_port=POSTGRES_PORT, ), description="From web server", ) sg_redis.add_ingress_rule( peer=sg_airflow_web_server, connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="from web server", from_port=REDIS_PORT, to_port=REDIS_PORT, ), description="from web server", ) web_server_service = ecs.FargateService( self, "ecs_service_web_server", cluster=cluster, task_definition=web_server_task, desired_count=1, security_groups=[sg_airflow_web_server], service_name="web_server", ) # Load balancer sg_airflow_alb = ec2.SecurityGroup( self, "sg_airflow_alb", vpc=vpc, description="Airflow ALB", security_group_name="sg_airflow_alb", ) # ALB -> web server sg_airflow_web_server.add_ingress_rule( peer=sg_airflow_alb, connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="From ALB", from_port=WEB_SERVER_PORT, to_port=WEB_SERVER_PORT, ), description="From ALB", ) # ALB -> flower sg_airflow_flower.add_ingress_rule( peer=sg_airflow_alb, connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="From ALB", from_port=FLOWER_PORT, to_port=FLOWER_PORT, ), description="From ALB", ) # Home -> ALB sg_airflow_alb.add_ingress_rule( peer=ec2.Peer.ipv4(MY_IP_CIDR), connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="From Home", from_port=ALB_PORT, to_port=ALB_PORT, ), description="From Home", ) # Home -> ALB sg_airflow_alb.add_ingress_rule( peer=ec2.Peer.ipv4(MY_IP_CIDR), connection=ec2.Port( protocol=ec2.Protocol.TCP, string_representation="From Home", from_port=FLOWER_PORT, to_port=FLOWER_PORT, ), description="From Home", ) alb = elb.ApplicationLoadBalancer( self, "alb_airflow", internet_facing=True, security_group=sg_airflow_alb, vpc=vpc, load_balancer_name="alb-airflow", ) listener1 = alb.add_listener( "alb_airflow_listener1", open=False, port=ALB_PORT, protocol=elb.ApplicationProtocol.HTTP, default_target_groups=[ elb.ApplicationTargetGroup( self, "alb_airflow_target_group_web_server", port=WEB_SERVER_PORT, protocol=elb.ApplicationProtocol.HTTP, target_group_name="alb-tg-airflow-web-server", targets=[web_server_service], vpc=vpc, ) ], ) alb.add_listener( "alb_airflow_listener2", open=False, port=FLOWER_PORT, protocol=elb.ApplicationProtocol.HTTP, default_target_groups=[ elb.ApplicationTargetGroup( self, "alb_airflow_target_group_flower", port=FLOWER_PORT, protocol=elb.ApplicationProtocol.HTTP, target_group_name="alb-tg-aiflow-flower", targets=[flower_service], vpc=vpc, ) ], )
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here ############################################# #Import resorce and custom setting part start ############################################# #cn-north-1 impRes={ "vpc":"vpc-0883083ff3a10c1ec", "SvcSG":"sg-04d3b60e954c1c1ef", "ALBSG":"sg-0b6d093d52d48bba9", "ALBInternet":True, "taskRole":"arn:aws-cn:iam::627484392488:role/ecsTaskExecutionRole", "AlbSubnet":[ {"subnetId":"subnet-0d16fa0c969f234d3", "routeTabId":"rtb-074c6b532f3030ad6"}, {"subnetId":"subnet-0f28a97c04d3b11cd", "routeTabId":"rtb-074c6b532f3030ad6"} ], #"SvcSubNet":[{"subnetId":"subnet-0d16fa0c969f234d3","routeTabId":"rtb-074c6b532f3030ad6"}] "SvcSubNet":[{"subnetId":"subnet-0f28a97c04d3b11cd","routeTabId":"rtb-0587cc522717461cd"}, {"subnetId":"subnet-0d16fa0c969f234d3","routeTabId":"rtb-0587cc522717461cd"}] } newRes={ "TG":{"HealthPath":"/test.html","Port":80,"containPort":80}, "Listener":{"Port":80}, "TaskFamily":"tsFargate", "ImageAsset1":{"DockfilePath":"httpd-ssh", "BuildArgs":{"HTTP_PROXY":"http://YOUR_PROXY_SERVER:80"} } } MyTaskDefinition=[{"Cpu":512,"MemLimitMib":1024}] MyContainerDefinition=[ {"containerName":"MyContainer1", "cpu":256, "essential":True, "portMappings":[ecs.PortMapping(container_port=80,host_port=80)], #"portMappings":[ecs.PortMapping(container_port=80,host_port=80),ecs.PortMapping(container_port=22,host_port=22)], "environment":{"SSH_PUBLIC_KEY":"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC/alWrS+HH5KkPbso+Tsy+Z0WGTX5wvXvon5OacLMyOU3gj2mbbIifasXf/RadpuywuyW3uFirtRlPmSb5Q0PVLODku503Xettw+u6/Z22VV7F2ACgg4iHaCo2SR4L8saUrLLfcKXKr/WCn3w7uYcqGsXEcSFCCSZgn4BoZJqP4Q=="}, "LogMountPoint":["/usr/local/apache2/logs"] } ] MySvc={"AssignPubIp":True, "desiredCount":1} ############################################# #Import resorce and custom setting part end ############################################# #if you import external resource app you cannot set destory policy #import VPC, Private Subnet, SG vpc = ec2.Vpc.from_lookup(self, "vpc", vpc_id=impRes["vpc"]) #import SG mysvcsg = ec2.SecurityGroup.from_security_group_id(self, "svcsg", impRes["SvcSG"], mutable=False) #import Role taskRole = iam.Role.from_role_arn(self, "TaskRole",impRes["taskRole"]) #create ALB mytargetGrp = elbv2.ApplicationTargetGroup(self, "targetGrp", target_type=elbv2.TargetType.IP, port=newRes["TG"]["Port"], vpc=vpc, health_check=elbv2.HealthCheck(path=newRes["TG"]["HealthPath"])) #target group cannot use .apply_removal_policy directly cfn_mytargetGrp=mytargetGrp.node.find_child("Resource") cfn_mytargetGrp.apply_removal_policy(cdk.RemovalPolicy.DESTROY) #import public subnet for alb albsubnets = [ ec2.Subnet.from_subnet_attributes(self,'albsubnetid1', subnet_id = impRes["AlbSubnet"][0]["subnetId"], route_table_id=impRes["AlbSubnet"][0]["routeTabId"] ), ec2.Subnet.from_subnet_attributes(self,'albsubnetid2', subnet_id = impRes["AlbSubnet"][1]["subnetId"], route_table_id=impRes["AlbSubnet"][1]["routeTabId"] ) ] vpc_subnets_selection = ec2.SubnetSelection(subnets=albsubnets) #create new ALB myalb = elbv2.ApplicationLoadBalancer(self, "ALBv2", vpc=vpc, security_group=ec2.SecurityGroup.from_security_group_id(self, "ALBSG", impRes["ALBSG"],mutable=False), internet_facing=impRes["ALBInternet"], vpc_subnets=vpc_subnets_selection) myalb.apply_removal_policy(cdk.RemovalPolicy.DESTROY) #create new ALB listener myalblistener = elbv2.ApplicationListener(self, "ALBlistenter", load_balancer=myalb, port=newRes["Listener"]["Port"]) myalblistener.apply_removal_policy(cdk.RemovalPolicy.DESTROY) myalblistener.add_target_groups("albaddtg", target_groups=[mytargetGrp]) #create new ECS Cluster mycluster = ecs.Cluster(self, "cluster", vpc=vpc) mycluster.apply_removal_policy(cdk.RemovalPolicy.DESTROY) fargatetaskDefinition = ecs.FargateTaskDefinition(self, "fargatetaskDefinition", cpu=MyTaskDefinition[0]["Cpu"], memory_limit_mib=MyTaskDefinition[0]["MemLimitMib"], execution_role=taskRole, family=newRes["TaskFamily"], task_role=taskRole) #volumes=myEfsVols) fargatetaskDefinition.apply_removal_policy(cdk.RemovalPolicy.DESTROY) #defind docker image asset dirname = os.path.dirname(__file__) #for container 1 normally httpd #create Image assent image will generated locally then push to ecr asset1 = DockerImageAsset(self, "ImageAsset1", directory=os.path.join(dirname, "../..", newRes["ImageAsset1"]["DockfilePath"]), build_args=newRes["ImageAsset1"]["BuildArgs"] ) #create container definition for task definition MyContainer1def = ecs.ContainerDefinition(self, "MyContainer1def", task_definition=fargatetaskDefinition, linux_parameters=ecs.LinuxParameters(self,"LinuxPara1",init_process_enabled=True), image=ecs.ContainerImage.from_ecr_repository(asset1.repository, asset1.image_uri.rpartition(":")[-1]), container_name=MyContainerDefinition[0]["containerName"], essential=MyContainerDefinition[0]["essential"], port_mappings=MyContainerDefinition[0]["portMappings"], environment=MyContainerDefinition[0]["environment"] ) #import service private subnet mysvcprivateSNs = [ ec2.Subnet.from_subnet_attributes(self,'svcprivateSN1', subnet_id = impRes["SvcSubNet"][0]["subnetId"], route_table_id=impRes["SvcSubNet"][0]["routeTabId"]), ec2.Subnet.from_subnet_attributes(self,'svcprivateSN2', subnet_id = impRes["SvcSubNet"][1]["subnetId"], route_table_id=impRes["SvcSubNet"][1]["routeTabId"]) ] #create service myservice=ecs.FargateService(self,"service", task_definition=fargatetaskDefinition, assign_public_ip=MySvc["AssignPubIp"], platform_version=ecs.FargatePlatformVersion.VERSION1_4, vpc_subnets=ec2.SubnetSelection(subnets=mysvcprivateSNs), security_group=mysvcsg, cluster=mycluster, desired_count=MySvc["desiredCount"]) mytargetGrp.add_target(myservice.load_balancer_target(container_name="MyContainer1",container_port=newRes["TG"]["containPort"], protocol=ecs.Protocol.TCP))
def __init__(self, scope: core.Stack, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.environment_name = 'ecsworkshop' ## Importing existing VPC and ECS Cluster ## self.vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_name='{}-base/BaseVPC'.format( self.environment_name)) self.sd_namespace = sd.PrivateDnsNamespace.from_private_dns_namespace_attributes( self, "SDNamespace", namespace_name=core.Fn.import_value('NSNAME'), namespace_arn=core.Fn.import_value('NSARN'), namespace_id=core.Fn.import_value('NSID')) self.ecs_cluster = ecs.Cluster.from_cluster_attributes( self, "ECSCluster", cluster_name=core.Fn.import_value('ECSClusterName'), security_groups=[], vpc=self.vpc, default_cloud_map_namespace=self.sd_namespace) ## End VPC and ECS Cluster ## ## Load balancer for ECS service ## self.frontend_sec_grp = ec2.SecurityGroup( self, "FrontendIngress", vpc=self.vpc, allow_all_outbound=True, description="Frontend Ingress All port 80", ) self.load_balancer = elbv2.ApplicationLoadBalancer( self, "ALB", security_group=self.frontend_sec_grp, internet_facing=True, vpc=self.vpc) self.target_group = elbv2.ApplicationTargetGroup( self, "ALBTG", port=8000, target_group_name="ECSDemoFargateEFS", vpc=self.vpc, target_type=elbv2.TargetType.IP) self.load_balancer.add_listener( "FrontendListener", default_target_groups=[self.target_group], port=80) ## End Load balancer ## ## EFS Setup ## self.service_sec_grp = ec2.SecurityGroup( self, "EFSSecGrp", vpc=self.vpc, description="Allow access to self on NFS Port", ) self.service_sec_grp.connections.allow_from( other=self.service_sec_grp, port_range=ec2.Port(protocol=ec2.Protocol.TCP, string_representation="Self", from_port=2049, to_port=2049)) # TODO: possibly create another sec grp for 8000 self.service_sec_grp.connections.allow_from( other=self.frontend_sec_grp, port_range=ec2.Port(protocol=ec2.Protocol.TCP, string_representation="LB2Service", from_port=8000, to_port=8000)) self.shared_fs = efs.FileSystem( self, "SharedFS", vpc=self.vpc, security_group=self.service_sec_grp, ) ## End EFS Setup ## ## TODO: IAM Role to access EFS access points for task ## # Task execution role self.task_execution_role = iam.Role( self, "TaskExecutionRole", assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), description="Task execution role for ecs services", managed_policies=[ iam.ManagedPolicy.from_managed_policy_arn( self, 'arn', managed_policy_arn= 'arn:aws:iam::aws:policy/service-role/AmazonECSTaskExecutionRolePolicy' ) ]) ## END IAM ## ## Logging ## self.service_log_group = logs.LogGroup(self, "ECSEFSDemoLogGrp") ## END Logging ## # Cloudformation Outputs core.CfnOutput(self, "ExecutionRoleARN", value=self.task_execution_role.role_arn, export_name="ECSFargateEFSDemoTaskExecutionRoleARN") core.CfnOutput(self, "EFSID", value=self.shared_fs.file_system_id, export_name="ECSFargateEFSDemoFSID") core.CfnOutput(self, "LBName", value=self.load_balancer.load_balancer_name, export_name="ECSFargateEFSDemoLBName") core.CfnOutput(self, "TargetGroupArn", value=self.target_group.target_group_arn, export_name="ECSFargateEFSDemoTGARN") core.CfnOutput(self, "VPCPrivateSubnets", value=",".join( [x.subnet_id for x in self.vpc.private_subnets]), export_name="ECSFargateEFSDemoPrivSubnets") core.CfnOutput(self, "SecurityGroups", value="{},{}".format( self.frontend_sec_grp.security_group_id, self.service_sec_grp.security_group_id), export_name="ECSFargateEFSDemoSecGrps") core.CfnOutput(self, "LBURL", value=self.load_balancer.load_balancer_dns_name, export_name="ECSFargateEFSDemoLBURL") core.CfnOutput(self, "LogGroupName", value=self.service_log_group.log_group_name, export_name="ECSFargateEFSDemoLogGroupName")
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) alb_subnet = ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC, name="ALB", cidr_mask=24) db_subnet = ec2.SubnetConfiguration( subnet_type=ec2.SubnetType.ISOLATED, name="DB", cidr_mask=24) # VPC vpc = ec2.Vpc( self, "VPC", max_azs=2, cidr="10.10.0.0/16", # configuration will create 2 groups in 2 AZs = 4 subnets. subnet_configuration=[alb_subnet, db_subnet], nat_gateway_provider=ec2.NatProvider.gateway(), nat_gateways=1, ) # Security groups # Create Security group that allows traffic into the ALB alb_security_group = ec2.SecurityGroup( self, "ALBSecurityGroup", description="Ghost ALB Security Group", vpc=vpc) alb_security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(80), "allow HTTP to ALB") alb_security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(443), "allow HTTPS to ALB") # Create Security group for the host/ENI/Fargate that allows 2368 fargate_security_group = ec2.SecurityGroup( self, "FargateSecurityGroup", description="Ghost ECS Fargate Security Group", vpc=vpc) fargate_security_group.add_ingress_rule( alb_security_group, ec2.Port.tcp(2368), "allow ghost default 2368 to fargate") # Create the DB's Security group which only allows access to memebers of the Ghost Fargate SG db_security_group = ec2.SecurityGroup( self, "DBSecurityGroup", description="Security group for RDS DB Instance for ghost cms", vpc=vpc) db_security_group.add_ingress_rule( fargate_security_group, ec2.Port.tcp(3306), "allow ghost fargate host to connect to db") ghost_alb = elb.ApplicationLoadBalancer( self, "GhostALB", internet_facing=True, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), security_group=alb_security_group, vpc=vpc) ghost_target_health_check = elb.HealthCheck( interval=core.Duration.seconds(30), protocol=elb.Protocol.HTTP, timeout=core.Duration.seconds(10), healthy_threshold_count=4, unhealthy_threshold_count=3, healthy_http_codes="200,301") ghost_target_group = elb.ApplicationTargetGroup( self, "GhostTargetGroup", port=2368, protocol=elb.Protocol.HTTP, vpc=vpc, health_check=ghost_target_health_check, target_type=elb.TargetType.IP) ghost_alb_listener = elb.ApplicationListener( self, "Listener80", port=80, protocol=elb.Protocol.HTTP, load_balancer=ghost_alb, default_target_groups=[ghost_target_group]) core.CfnOutput(self, "vpcid", value=vpc.vpc_id) core.CfnOutput(self, "alb_url", description="ALB URL", value=ghost_alb.load_balancer_dns_name) self.output_props = props.copy() self.output_props['vpc'] = vpc self.output_props['subnets'] = vpc.public_subnets self.output_props['alb_security_group'] = alb_security_group self.output_props['fargate_security_group'] = fargate_security_group self.output_props['db_security_group'] = db_security_group