def airflow_web_service(self, environment): service_name = get_webserver_service_name(self.deploy_env) family = get_webserver_taskdef_family_name(self.deploy_env) task_def = ecs.FargateTaskDefinition(self, family, cpu=512, memory_limit_mib=1024, family=family) task_def.add_container(f"WebWorker-{self.deploy_env}", image=self.image, environment=environment, secrets=self.secrets, logging=ecs.LogDrivers.aws_logs( stream_prefix=family, log_retention=RetentionDays.ONE_DAY)) task_def.default_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ec2.Protocol.TCP)) # we want only 1 instance of the web server so when new versions are deployed max_healthy_percent=100 # you have to manually stop the current version and then it should start a new version - done by deploy task lb_security_group = ec2.SecurityGroup( self, f"lb-sec-group-{self.deploy_env}", vpc=self.vpc) service = ecs_patterns.ApplicationLoadBalancedFargateService( self, service_name, cluster=self.cluster, # Required service_name=service_name, platform_version=ecs.FargatePlatformVersion.VERSION1_4, cpu=512, # Default is 256 desired_count=1, # Default is 1 task_definition=task_def, memory_limit_mib=2048, # Default is 512 public_load_balancer=True, security_groups=[lb_security_group], certificate=Certificate.from_certificate_arn( self, f"lb-cert-{self.deploy_env}", certificate_arn=self.config["lb_certificate_arn"]), max_healthy_percent=100) service.target_group.configure_health_check(path="/health") # restrict access to the load balancer to only VPN lb_security_group.connections.allow_from( ec2.Peer.ipv4(self.config["lb_vpn_addresses"]), ec2.Port.tcp(443)) # configure DNS alias for the load balancer route53.ARecord(self, f"lb-record-{self.deploy_env}", zone=route53.HostedZone.from_hosted_zone_attributes( self, f"Zone-{self.deploy_env}", zone_name=f"Zone-{self.deploy_env}", hosted_zone_id=self.config["route53_zone_id"]), record_name=self.config["lb_dns_name"], target=route53.RecordTarget.from_alias( targets.LoadBalancerTarget(service.load_balancer))) return service
def createResources(self, ns): # Get Hosted Zone hostedZone = route53.HostedZone.from_lookup( self, 'Bento-Hosted-Zone', domain_name=self.config[ns]['domain_name']) route53.ARecord(self, 'Bento-Alias-Record', record_name=ns, target=route53.RecordTarget.from_alias( targets.LoadBalancerTarget(self.bentoALB)), zone=hostedZone)
def create_route53_record(self): """ Create Route53 entries """ zone = route53.HostedZone.from_lookup(self, "quake_services", domain_name="quake.services") target = route53.AddressRecordTarget.from_alias( route53_targets.LoadBalancerTarget(self.nlb)) route53.ARecord(self, "alias", zone=zone, record_name="master", target=target)
def configure_domain( scope: core.Construct, load_balancer: elbv2.ApplicationLoadBalancer, config: StackConfig, ): # // DNS record zone = route53.HostedZone.from_hosted_zone_attributes( scope, 'dns', zone_name=config.dns_name, hosted_zone_id=config.dns_zone_id, ) target = route53.RecordTarget.from_alias( route53_targets.LoadBalancerTarget(load_balancer)) route53.ARecord(scope, 'stack-domain', zone=zone, record_name=config.dns_stack_subdomain, target=target)
def __init__( self, scope: cdk.Construct, id: str, consoleme_alb: lb.ApplicationLoadBalancer, **kwargs ) -> None: super().__init__(scope, id, **kwargs) hosted_zone = route53.PublicHostedZone.from_hosted_zone_attributes( self, "HostedZone", hosted_zone_id=HOSTED_ZONE_ID, zone_name=HOSTED_ZONE_NAME, ) route53_record = route53.ARecord( self, "LBRecord", zone=hosted_zone, record_name=APPLICATION_PREFIX, target=route53.RecordTarget( alias_target=(route53_targets.LoadBalancerTarget(consoleme_alb)) ), ) verify_ses_identity = cr.AwsCustomResource( self, "VerifySESIdentityResource", policy=cr.AwsCustomResourcePolicy.from_statements( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["ses:VerifyDomainIdentity", "ses:DeleteIdentity"], resources=["*"], ) ] ), on_create=cr.AwsSdkCall( service="SES", action="verifyDomainIdentity", parameters={"Domain": route53_record.domain_name}, physical_resource_id=cr.PhysicalResourceId.from_response( "VerificationToken" ), ), on_delete=cr.AwsSdkCall( service="SES", action="deleteIdentity", parameters={"Identity": route53_record.domain_name}, ), install_latest_aws_sdk=True, log_retention=logs.RetentionDays.ONE_WEEK, ) add_ses_dkim = cr.AwsCustomResource( self, "VerifySESDKIMResource", policy=cr.AwsCustomResourcePolicy.from_statements( statements=[ iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["ses:VerifyDomainDkim"], resources=["*"], ) ] ), on_create=cr.AwsSdkCall( service="SES", action="verifyDomainDkim", parameters={"Domain": route53_record.domain_name}, physical_resource_id=cr.PhysicalResourceId.of( HOSTED_ZONE_ID + "VerifyDomainDKIM" ), ), install_latest_aws_sdk=True, log_retention=logs.RetentionDays.ONE_WEEK, ) add_ses_dkim.node.add_dependency(verify_ses_identity) certificate = acm.Certificate( self, "Certificate", domain_name="*." + hosted_zone.zone_name, validation=acm.CertificateValidation.from_dns(hosted_zone=hosted_zone), ) self.hosted_zone = hosted_zone self.certificate = certificate self.route53_record = route53_record
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) ''' create a vpc for the splunk environment, cdk will take care of subnetting additionally create a sg for the splunk instance that only accepts request from the alb the redirect compensates for splunk trying to redirect to http on every response ''' vpc = ec2.Vpc(self, 'vpc', max_azs=2) instance_type = ec2.InstanceType('t2.micro') ami = ec2.LookupMachineImage(name='splunk_AMI_8.2.0_2021*') splunk_sg = ec2.SecurityGroup(self, 'splunk_sg', vpc=vpc) splunk_instance = ec2.Instance(self, 'splunk', instance_type=instance_type, machine_image=ami, vpc=vpc, security_group=splunk_sg) alb = lb.ApplicationLoadBalancer(self, 'alb', vpc=vpc, internet_facing=True) splunk_sg.connections.allow_from(alb, ec2.Port.tcp(8000)) splunk_sg.connections.allow_from(alb, ec2.Port.tcp(8088)) alb.add_redirect() # import existing hosted zone and create certificate using dns based validation my_hosted_zone = route53.HostedZone.from_lookup( self, 'importedzone', domain_name='vosskuhler.com') certificate = acm.Certificate( self, "Certificate", domain_name="splunk.vosskuhler.com", validation=acm.CertificateValidation.from_dns(my_hosted_zone)) ''' configure listeners on the alb, by default splunk uses http on 8000 and https on 8088 ssl offloading will take care off the TLS certificate and allows us to not have to reconfigure splunk to utilize https on port 8000. To check HEC health you can visit <url>:8088/services/collector/health/1.0 ''' listener = alb.add_listener( "Listener", certificates=[lb.ListenerCertificate(certificate.certificate_arn)], port=443, open=True) listener.add_targets("splunk", port=8000, targets=[lbt.InstanceTarget(splunk_instance)]) listener_hec = alb.add_listener( "Listener_hec", certificates=[lb.ListenerCertificate(certificate.certificate_arn)], port=8088, open=True, protocol=lb.ApplicationProtocol('HTTPS')) listener_hec.add_targets("splunk_hec", port=8088, protocol=lb.ApplicationProtocol('HTTPS'), targets=[lbt.InstanceTarget(splunk_instance)]) # configure dns to forward traffic to the alb route53.ARecord(self, "cnamerecord", zone=my_hosted_zone, target=route53.RecordTarget.from_alias( alias.LoadBalancerTarget(alb)), record_name='splunk.vosskuhler.com')
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) pearson_vpn_connection = ec2.Peer.ipv4('159.182.0.0/16') # Props Setup stage = scope.node.try_get_context('stage') my_service_name = scope.node.try_get_context('serviceName') api_health_path = props['apiHealthPath'] tca_health_path = props['tcaHealthPath'] # Setup IAM user for logs vpc_flow_role = iam.Role( self, 'FlowLog', assumed_by=iam.ServicePrincipal('vpc-flow-logs.amazonaws.com')) vpc_flow_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ 'iam:PassRole', 'logs:CreateLogGroup', 'logs:DescribeLogGroups', 'logs:CreateLogStream', 'logs:PutLogEvents' ], resources=["*"])) # Create Cloudwatch log group log_group = logs.LogGroup(self, 'LogGroup', log_group_name="{0}-{1}".format( my_service_name, stage), retention=logs.RetentionDays('ONE_YEAR'), removal_policy=core.RemovalPolicy('DESTROY')) # Setup VPC resource vpc = ec2.Vpc(self, '{0}-{1}-vpc'.format(my_service_name, stage), cidr=props['cidr'], max_azs=props['vpcAzCount']) # Setup VPC flow logs vpc_log = ec2.CfnFlowLog( self, 'FlowLogs', resource_id=vpc.vpc_id, resource_type='VPC', traffic_type='ALL', deliver_logs_permission_arn=vpc_flow_role.role_arn, log_destination_type='cloud-watch-logs', log_group_name="{0}-{1}".format(log_group.log_group_name, stage)) # Setup Security Group in VPC vpc_sg = ec2.SecurityGroup(self, 'EcSSG', vpc=vpc, allow_all_outbound=None, description="Security Group for Oculus vpc", security_group_name="{0}-{1}-vpc-sg".format( my_service_name, stage)) # Add Rules to Security Group vpc_sg.add_ingress_rule(peer=pearson_vpn_connection, connection=ec2.Port.tcp(22)) # ALB Security Group alb_sg = ec2.SecurityGroup(self, 'AlbSG', vpc=vpc, allow_all_outbound=None, description="Security group for oculus ALB", security_group_name="{0}-{1}-alb-sg".format( my_service_name, stage)) # Add HTTPS Rule to Security Group alb_sg.add_ingress_rule(peer=pearson_vpn_connection, connection=ec2.Port.tcp(443)) # Setup ALB alb = elbv2.ApplicationLoadBalancer(self, 'ALB', vpc=vpc, internet_facing=True, security_group=alb_sg) # Setup API Target Group api_tg = elbv2.ApplicationTargetGroup( self, 'ApiTargetGroup', port=8080, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup Web Target Group web_tg = elbv2.ApplicationTargetGroup( self, 'WebTargetGroup', port=3030, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup API Target Group tca_tg = elbv2.ApplicationTargetGroup( self, 'TcaTargetGroup', port=8080, protocol=elbv2.ApplicationProtocol.HTTP, vpc=vpc) # Setup ECS Cluster ecs_cluster = ecs.Cluster(self, 'ECSCluster', vpc=vpc, cluster_name="{0}-{1}".format( my_service_name, stage)) # ECS Execution Role - Grants ECS agent to call AWS APIs ecs_execution_role = iam.Role( self, 'ECSExecutionRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name="{0}-{1}-execution-role".format(my_service_name, stage)) # Setup Role Permissions ecs_execution_role.add_to_policy( iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer', 'elasticloadbalancing:DeregisterTargets', 'elasticloadbalancing:Describe*', 'elasticloadbalancing:RegisterInstancesWithLoadBalancer', 'elasticloadbalancing:RegisterTargets', 'ec2:Describe*', 'ec2:AuthorizeSecurityGroupIngress', 'sts:AssumeRole', 'ssm:GetParameters', 'secretsmanager:GetSecretValue', 'ecr:GetAuthorizationToken', 'ecr:BatchCheckLayerAvailability', 'ecr:GetDownloadUrlForLayer', 'ecr:BatchGetImage', 'logs:CreateLogStream', 'logs:PutLogEvents', "application-autoscaling:*", "cloudwatch:DescribeAlarms", "cloudwatch:PutMetricAlarm" ], resources=["*"])) # ECS Task Role - Grants containers in task permission to AWS APIs ecs_task_role = iam.Role( self, 'ECSTaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name="{0}-{1}-task-role".format(my_service_name, stage)) # Setup Role Permissions ecs_task_role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=[ 'logs:CreateLogStream', 'logs:PutLogEvents', 'dynamodb:Query', 'dynamodb:ListTables', 'secretsmanager:GetSecretValue', 'kms:Decrypt' ], resources=["*"])) # Setup API Task Definition api_taskdef = ecs.FargateTaskDefinition( self, 'APIFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-api".format(my_service_name, stage)) # Setup Web Task Definition web_taskdef = ecs.FargateTaskDefinition( self, 'WebFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-web".format(my_service_name, stage)) # # Setup TCA Task Definition tca_taskdef = ecs.FargateTaskDefinition( self, 'TcaFargateTask', memory_limit_mib=512, cpu=256, execution_role=ecs_execution_role, task_role=ecs_task_role, family="{0}-{1}-tca".format(my_service_name, stage)) api_repo = ecr.Repository.from_repository_arn( self, 'ApiImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-api") web_repo = ecr.Repository.from_repository_arn( self, 'WebImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-web") tca_repo = ecr.Repository.from_repository_arn( self, 'TcaImage', repository_arn= "arn:aws:ecr:us-east-1:346147488134:repository/oculus-tca-api") # Add Container API to Task api_container = api_taskdef.add_container( "oculus-cdk-{}-api".format(stage), image=ecs.EcrImage(repository=api_repo, tag="devqaurl"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-api".format( my_service_name, stage), log_group=log_group)) # Add Container Web to Task web_container = web_taskdef.add_container( "oculus-cdk-{}-web".format(stage), image=ecs.EcrImage(repository=web_repo, tag="removeMetaMockup"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-web".format( my_service_name, stage), log_group=log_group)) # # Add Container TCA to Task tca_container = tca_taskdef.add_container( "oculus-cdk-{}-tca".format(stage), image=ecs.EcrImage(repository=tca_repo, tag="ocu-1109"), logging=ecs.LogDriver.aws_logs(stream_prefix="{0}-{1}-tca".format( my_service_name, stage), log_group=log_group)) # Setup API Port Mappings api_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP)) # Setup Web Port Mappings web_container.add_port_mappings( ecs.PortMapping(container_port=3030, host_port=3030, protocol=ecs.Protocol.TCP)) # # Setup TCA Port Mappings tca_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ecs.Protocol.TCP)) # Setup API Fargate Service api_service = ecs.FargateService(self, "FargateServiceAPI", task_definition=api_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-api".format( my_service_name, stage)) api_scaling = api_service.auto_scale_task_count(max_capacity=5) api_scaling.scale_on_cpu_utilization('ApiCpuScaling', target_utilization_percent=50) # Setup Web Fargate Service web_service = ecs.FargateService(self, "FargateServiceWeb", task_definition=web_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-web".format( my_service_name, stage)) web_scaling = web_service.auto_scale_task_count(max_capacity=5) web_scaling.scale_on_cpu_utilization('WebCpuScaling', target_utilization_percent=50) # # Setup TCA Fargate Service tca_service = ecs.FargateService(self, "FargateServiceTCA", task_definition=tca_taskdef, cluster=ecs_cluster, desired_count=1, service_name="{0}-{1}-tca".format( my_service_name, stage)) tca_scaling = tca_service.auto_scale_task_count(max_capacity=5) tca_scaling.scale_on_cpu_utilization('TcaCpuScaling', target_utilization_percent=50) # Setup ALB Listener alb_listener = alb.add_listener( 'Listener', certificate_arns=[ "arn:aws:acm:us-east-1:829809672214:certificate/a84bb369-03ce-4e5e-9d32-8c84609cad1e" ], port=443, open=False, protocol=elbv2.ApplicationProtocol.HTTPS) # Attach ALB to ECS API Service api_target = alb_listener.add_targets( 'ECSAPI', port=8080, priority=1, targets=[api_service], health_check=elbv2.HealthCheck(path=api_health_path), path_pattern='/oculus-api/*') # # Attach ALB to ECS TCA Service tca_target = alb_listener.add_targets( 'ECSTCA', port=8080, priority=2, targets=[tca_service], health_check=elbv2.HealthCheck(path=tca_health_path), path_pattern='/tca/*') # Attach ALB to ECS Web Service web_target = alb_listener.add_targets( 'ECSWeb', port=3030, protocol=elbv2.ApplicationProtocol.HTTP, targets=[web_service], health_check=elbv2.HealthCheck(path='/'), ) core.CfnOutput(self, 'LoadBalancerDNS', value=alb.load_balancer_dns_name) zone = route53.HostedZone.from_lookup(self, 'MyHostedZone', domain_name=props['zoneDomain']) route53.ARecord( self, 'ServiceAliasRecord', record_name=props['siteDomain'], target=route53.RecordTarget( alias_target=aws_route53_targets.LoadBalancerTarget( load_balancer=alb)), zone=zone)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here # Create a VPC myvpc = ec2.Vpc(self, "CDKVPC", cidr=vars.cidr) # SG for ELB creation websitefrontendSG = ec2.SecurityGroup( self, 'websitefrontendSG', vpc=myvpc, security_group_name='websitefrontendSG') websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(80)) websitefrontendSG.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(443)) # Create ALB in VPC alb = elb.ApplicationLoadBalancer( self, 'websitefrontend-public', vpc=myvpc, load_balancer_name='websitefrontend-public', security_group=websitefrontendSG, internet_facing=True) # Add target group to ALB catalogtargetgroup = elb.ApplicationTargetGroup( self, 'CatalogTargetGroup', port=80, vpc=myvpc, target_type=elb.TargetType.IP) if not vars.sslcert: # Add http listener to ALB alblistenerhttp = elb.ApplicationListener( self, 'alblistenerhttp', load_balancer=alb, default_target_groups=[catalogtargetgroup], port=80) if vars.sslcert: # Add http listener to ALB alblistenerhttp = elb.ApplicationListener(self, 'alblistenerhttp', load_balancer=alb, port=80) elb.ApplicationListenerRule(self, 'httpredirectionrule', listener=alblistenerhttp, redirect_response=elb.RedirectResponse( status_code='HTTP_301', port='443', protocol='HTTPS')) # OPTIONAL - Add https listener to ALB & attach certificate alblistenerhttps = elb.ApplicationListener( self, 'alblistenerhttps', load_balancer=alb, default_target_groups=[catalogtargetgroup], port=443, certificate_arns=[vars.sslcert_arn]) # OPTIONAL - Redirect HTTP to HTTPS alblistenerhttp.add_redirect_response(id='redirectionrule', port='443', status_code='HTTP_301', protocol='HTTPS') if vars.customdomain: # OPTIONAL - Update DNS with ALB webshopxyz_zone = r53.HostedZone.from_hosted_zone_attributes( self, id='customdomain', hosted_zone_id=vars.hosted_zone_id, zone_name=vars.zone_name) webshop_root_record = r53.ARecord( self, 'ALBAliasRecord', zone=webshopxyz_zone, target=r53.RecordTarget.from_alias( alias.LoadBalancerTarget(alb))) # SG for ECS creation ECSSG = ec2.SecurityGroup(self, 'ECSSecurityGroup', vpc=myvpc, security_group_name='ECS') ECSSG.add_ingress_rule(peer=websitefrontendSG, connection=ec2.Port.tcp(80)) # SG for MySQL creation MySQLSG = ec2.SecurityGroup(self, 'DBSecurityGroup', vpc=myvpc, security_group_name='DB') MySQLSG.add_ingress_rule(peer=ECSSG, connection=ec2.Port.tcp(3306)) # Create DB subnet group subnetlist = [] for subnet in myvpc.private_subnets: subnetlist.append(subnet.subnet_id) subnetgr = rds.CfnDBSubnetGroup( self, 'democlustersubnetgroup', db_subnet_group_name='democlustersubnetgroup', db_subnet_group_description='DemoCluster', subnet_ids=subnetlist) # Create secret db passwd secret = sm.SecretStringGenerator( exclude_characters="\"'@/\\", secret_string_template='{"username": "******"}', generate_string_key='password', password_length=40) dbpass = sm.Secret(self, 'democlusterpass', secret_name='democlusterpass', generate_secret_string=secret) # Create Aurora serverless MySQL instance dbcluster = rds.CfnDBCluster( self, 'DemoCluster', engine='aurora', engine_mode='serverless', engine_version='5.6', db_cluster_identifier='DemoCluster', master_username=dbpass.secret_value_from_json( 'username').to_string(), master_user_password=dbpass.secret_value_from_json( 'password').to_string(), storage_encrypted=True, port=3306, vpc_security_group_ids=[MySQLSG.security_group_id], scaling_configuration=rds.CfnDBCluster. ScalingConfigurationProperty(auto_pause=True, max_capacity=4, min_capacity=1, seconds_until_auto_pause=300), db_subnet_group_name=subnetgr.db_subnet_group_name) dbcluster.add_override('DependsOn', 'democlustersubnetgroup') # Attach database to secret attach = sm.CfnSecretTargetAttachment( self, 'RDSAttachment', secret_id=dbpass.secret_arn, target_id=dbcluster.ref, target_type='AWS::RDS::DBCluster') # Upload image into ECR repo ecrdemoimage = ecra.DockerImageAsset(self, 'ecrdemoimage', directory='../', repository_name='demorepo', exclude=['cdk.out']) # Create ECS fargate cluster ecscluster = ecs.Cluster(self, "ecsCluster", vpc=myvpc) # Create task role for productsCatalogTask getsecretpolicystatement = iam.PolicyStatement(actions=[ "secretsmanager:GetResourcePolicy", "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret", "secretsmanager:ListSecretVersionIds" ], resources=[ dbpass.secret_arn ], effect=iam.Effect.ALLOW) getsecretpolicydocument = iam.PolicyDocument( statements=[getsecretpolicystatement]) taskrole = iam.Role( self, 'TaskRole', assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'), role_name='TaskRoleforproductsCatalogTask', inline_policies=[getsecretpolicydocument]) # Create task definition taskdefinition = ecs.FargateTaskDefinition(self, 'productsCatalogTask', cpu=1024, memory_limit_mib=2048, task_role=taskrole) # Add container to task definition productscatalogcontainer = taskdefinition.add_container( 'productscatalogcontainer', image=ecs.ContainerImage.from_docker_image_asset( asset=ecrdemoimage), environment={ "region": vars.region, "secretname": "democlusterpass" }) productscatalogcontainer.add_port_mappings( ecs.PortMapping(container_port=80, host_port=80)) # Create service and associate it with the cluster catalogservice = ecs.FargateService( self, 'catalogservice', task_definition=taskdefinition, assign_public_ip=False, security_group=ECSSG, vpc_subnets=ec2.SubnetSelection(subnets=myvpc.select_subnets( subnet_type=ec2.SubnetType.PRIVATE).subnets), cluster=ecscluster, desired_count=2) # Add autoscaling to the service scaling = catalogservice.auto_scale_task_count(max_capacity=20, min_capacity=1) scaling.scale_on_cpu_utilization( 'ScaleOnCPU', target_utilization_percent=70, scale_in_cooldown=core.Duration.seconds(amount=1), scale_out_cooldown=core.Duration.seconds(amount=0)) # Associate the fargate service with load balancer targetgroup catalogservice.attach_to_application_target_group(catalogtargetgroup)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) eks_vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/16") self.eks_vpc = eks_vpc # Create IAM Role For code-server bastion bastion_role = iam.Role( self, "BastionRole", assumed_by=iam.CompositePrincipal( iam.ServicePrincipal("ec2.amazonaws.com"), iam.AccountRootPrincipal()), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( "AdministratorAccess") ]) self.bastion_role = bastion_role # Create EC2 Instance Profile for that Role instance_profile = iam.CfnInstanceProfile( self, "InstanceProfile", roles=[bastion_role.role_name]) # Create SecurityGroup for the Control Plane ENIs eks_security_group = ec2.SecurityGroup(self, "EKSSecurityGroup", vpc=eks_vpc, allow_all_outbound=True) eks_security_group.add_ingress_rule(ec2.Peer.ipv4('10.0.0.0/16'), ec2.Port.all_traffic()) # Create an EKS Cluster eks_cluster = eks.Cluster( self, "cluster", cluster_name="cluster", vpc=eks_vpc, masters_role=bastion_role, default_capacity_type=eks.DefaultCapacityType.NODEGROUP, default_capacity_instance=ec2.InstanceType("m5.large"), default_capacity=2, security_group=eks_security_group, endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE, version=eks.KubernetesVersion.V1_17) self.cluster_cert = eks_cluster.cluster_certificate_authority_data # Deploy ALB Ingress Controller # Create the k8s Service account and corresponding IAM Role mapped via IRSA alb_service_account = eks_cluster.add_service_account( "alb-ingress-controller", name="alb-ingress-controller", namespace="kube-system") # Create the PolicyStatements to attach to the role # I couldn't find a way to get this to work with a PolicyDocument and there are 10 of these alb_policy_statement_json_1 = { "Effect": "Allow", "Action": [ "acm:DescribeCertificate", "acm:ListCertificates", "acm:GetCertificate" ], "Resource": "*" } alb_policy_statement_json_2 = { "Effect": "Allow", "Action": [ "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup", "ec2:CreateTags", "ec2:DeleteTags", "ec2:DeleteSecurityGroup", "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses", "ec2:DescribeInstances", "ec2:DescribeInstanceStatus", "ec2:DescribeInternetGateways", "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcs", "ec2:ModifyInstanceAttribute", "ec2:ModifyNetworkInterfaceAttribute", "ec2:RevokeSecurityGroupIngress" ], "Resource": "*" } alb_policy_statement_json_3 = { "Effect": "Allow", "Action": [ "elasticloadbalancing:AddListenerCertificates", "elasticloadbalancing:AddTags", "elasticloadbalancing:CreateListener", "elasticloadbalancing:CreateLoadBalancer", "elasticloadbalancing:CreateRule", "elasticloadbalancing:CreateTargetGroup", "elasticloadbalancing:DeleteListener", "elasticloadbalancing:DeleteLoadBalancer", "elasticloadbalancing:DeleteRule", "elasticloadbalancing:DeleteTargetGroup", "elasticloadbalancing:DeregisterTargets", "elasticloadbalancing:DescribeListenerCertificates", "elasticloadbalancing:DescribeListeners", "elasticloadbalancing:DescribeLoadBalancers", "elasticloadbalancing:DescribeLoadBalancerAttributes", "elasticloadbalancing:DescribeRules", "elasticloadbalancing:DescribeSSLPolicies", "elasticloadbalancing:DescribeTags", "elasticloadbalancing:DescribeTargetGroups", "elasticloadbalancing:DescribeTargetGroupAttributes", "elasticloadbalancing:DescribeTargetHealth", "elasticloadbalancing:ModifyListener", "elasticloadbalancing:ModifyLoadBalancerAttributes", "elasticloadbalancing:ModifyRule", "elasticloadbalancing:ModifyTargetGroup", "elasticloadbalancing:ModifyTargetGroupAttributes", "elasticloadbalancing:RegisterTargets", "elasticloadbalancing:RemoveListenerCertificates", "elasticloadbalancing:RemoveTags", "elasticloadbalancing:SetIpAddressType", "elasticloadbalancing:SetSecurityGroups", "elasticloadbalancing:SetSubnets", "elasticloadbalancing:SetWebAcl" ], "Resource": "*" } alb_policy_statement_json_4 = { "Effect": "Allow", "Action": [ "iam:CreateServiceLinkedRole", "iam:GetServerCertificate", "iam:ListServerCertificates" ], "Resource": "*" } alb_policy_statement_json_5 = { "Effect": "Allow", "Action": ["cognito-idp:DescribeUserPoolClient"], "Resource": "*" } alb_policy_statement_json_6 = { "Effect": "Allow", "Action": [ "waf-regional:GetWebACLForResource", "waf-regional:GetWebACL", "waf-regional:AssociateWebACL", "waf-regional:DisassociateWebACL" ], "Resource": "*" } alb_policy_statement_json_7 = { "Effect": "Allow", "Action": ["tag:GetResources", "tag:TagResources"], "Resource": "*" } alb_policy_statement_json_8 = { "Effect": "Allow", "Action": ["waf:GetWebACL"], "Resource": "*" } alb_policy_statement_json_9 = { "Effect": "Allow", "Action": [ "wafv2:GetWebACL", "wafv2:GetWebACLForResource", "wafv2:AssociateWebACL", "wafv2:DisassociateWebACL" ], "Resource": "*" } alb_policy_statement_json_10 = { "Effect": "Allow", "Action": [ "shield:DescribeProtection", "shield:GetSubscriptionState", "shield:DeleteProtection", "shield:CreateProtection", "shield:DescribeSubscription", "shield:ListProtections" ], "Resource": "*" } # Attach the necessary permissions alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_1)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_2)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_3)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_4)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_5)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_6)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_7)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_8)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_9)) alb_service_account.add_to_policy( iam.PolicyStatement.from_json(alb_policy_statement_json_10)) # Deploy the ALB Ingress Controller from the Helm chart eks_cluster.add_helm_chart( "aws-alb-ingress-controller", chart="aws-alb-ingress-controller", repository= "http://storage.googleapis.com/kubernetes-charts-incubator", namespace="kube-system", values={ "clusterName": "cluster", "awsRegion": os.environ["CDK_DEFAULT_REGION"], "awsVpcID": eks_vpc.vpc_id, "rbac": { "create": True, "serviceAccount": { "create": False, "name": "alb-ingress-controller" } } }) # Create code-server bastion # Get Latest Amazon Linux AMI amzn_linux = ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, edition=ec2.AmazonLinuxEdition.STANDARD, virtualization=ec2.AmazonLinuxVirt.HVM, storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE) # Create SecurityGroup for code-server security_group = ec2.SecurityGroup(self, "SecurityGroup", vpc=eks_vpc, allow_all_outbound=True) security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8080)) # Create our EC2 instance running CodeServer code_server_instance = ec2.Instance( self, "CodeServerInstance", instance_type=ec2.InstanceType("t3.large"), machine_image=amzn_linux, role=bastion_role, vpc=eks_vpc, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), security_group=security_group, block_devices=[ ec2.BlockDevice(device_name="/dev/xvda", volume=ec2.BlockDeviceVolume.ebs(20)) ]) # Add UserData code_server_instance.user_data.add_commands( "mkdir -p ~/.local/lib ~/.local/bin ~/.config/code-server") code_server_instance.user_data.add_commands( "curl -fL https://github.com/cdr/code-server/releases/download/v3.5.0/code-server-3.5.0-linux-amd64.tar.gz | tar -C ~/.local/lib -xz" ) code_server_instance.user_data.add_commands( "mv ~/.local/lib/code-server-3.5.0-linux-amd64 ~/.local/lib/code-server-3.5.0" ) code_server_instance.user_data.add_commands( "ln -s ~/.local/lib/code-server-3.5.0/bin/code-server ~/.local/bin/code-server" ) code_server_instance.user_data.add_commands( "echo \"bind-addr: 0.0.0.0:8080\" > ~/.config/code-server/config.yaml" ) code_server_instance.user_data.add_commands( "echo \"auth: password\" >> ~/.config/code-server/config.yaml") code_server_instance.user_data.add_commands( "echo \"password: $(curl -s http://169.254.169.254/latest/meta-data/instance-id)\" >> ~/.config/code-server/config.yaml" ) code_server_instance.user_data.add_commands( "echo \"cert: false\" >> ~/.config/code-server/config.yaml") code_server_instance.user_data.add_commands( "~/.local/bin/code-server &") code_server_instance.user_data.add_commands( "yum -y install jq gettext bash-completion moreutils") code_server_instance.user_data.add_commands( "sudo pip install --upgrade awscli && hash -r") code_server_instance.user_data.add_commands( "echo 'export ALB_INGRESS_VERSION=\"v1.1.8\"' >> ~/.bash_profile") code_server_instance.user_data.add_commands( "curl --silent --location -o /usr/local/bin/kubectl \"https://amazon-eks.s3.us-west-2.amazonaws.com/1.17.9/2020-08-04/bin/linux/amd64/kubectl\"" ) code_server_instance.user_data.add_commands( "chmod +x /usr/local/bin/kubectl") code_server_instance.user_data.add_commands( "curl -L https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash" ) code_server_instance.user_data.add_commands( "export ACCOUNT_ID=$(aws sts get-caller-identity --output text --query Account)" ) code_server_instance.user_data.add_commands( "export AWS_REGION=$(curl -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region')" ) code_server_instance.user_data.add_commands( "echo \"export ACCOUNT_ID=${ACCOUNT_ID}\" | tee -a ~/.bash_profile" ) code_server_instance.user_data.add_commands( "echo \"export AWS_REGION=${AWS_REGION}\" | tee -a ~/.bash_profile" ) code_server_instance.user_data.add_commands( "aws configure set default.region ${AWS_REGION}") code_server_instance.user_data.add_commands( "curl --silent --location https://rpm.nodesource.com/setup_12.x | bash -" ) code_server_instance.user_data.add_commands("yum -y install nodejs") code_server_instance.user_data.add_commands( "amazon-linux-extras enable python3") code_server_instance.user_data.add_commands( "yum install -y python3 --disablerepo amzn2-core") code_server_instance.user_data.add_commands("yum install -y git") code_server_instance.user_data.add_commands( "rm /usr/bin/python && ln -s /usr/bin/python3 /usr/bin/python && ln -s /usr/bin/pip3 /usr/bin/pip" ) code_server_instance.user_data.add_commands("npm install -g aws-cdk") code_server_instance.user_data.add_commands( "echo 'export KUBECONFIG=~/.kube/config' >> ~/.bash_profile") code_server_instance.user_data.add_commands( "git clone https://github.com/jasonumiker/eks-school.git") # Add ALB lb = elbv2.ApplicationLoadBalancer(self, "LB", vpc=eks_vpc, internet_facing=True) listener = lb.add_listener("Listener", port=80) listener.connections.allow_default_port_from_any_ipv4( "Open to the Internet") listener.connections.allow_to_any_ipv4( port_range=ec2.Port(string_representation="TCP 8080", protocol=ec2.Protocol.TCP, from_port=8080, to_port=8080)) listener.add_targets( "Target", port=8080, targets=[ elbv2.InstanceTarget( instance_id=code_server_instance.instance_id, port=8080) ]) # If a Hosted Zone exists, setup HTTPS for coderserver.domainname.xyz if "CDK_HOSTEDZONEID" in os.environ and "CDK_HOSTEDZONENAME" in os.environ: hostedzone = route53.HostedZone.from_hosted_zone_attributes( self, "dnszone", hosted_zone_id=os.environ["CDK_HOSTEDZONEID"], zone_name=os.environ["CDK_HOSTEDZONENAME"]) arecord = route53.ARecord( self, 'CodeServer AliasRecord', zone=hostedzone, target=route53.RecordTarget.from_alias( route53_targets.LoadBalancerTarget(lb)), record_name="codeserver") cert = acm.DnsValidatedCertificate(self, "codeserver cert", domain_name="codeserver." + hostedzone.zone_name, hosted_zone=hostedzone) listenerHttps = lb.add_listener("HTTPS Listener", port=443) listenerHttps.connections.allow_default_port_from_any_ipv4( "Open to the Internet") listenerHttps.connections.allow_to_any_ipv4( port_range=ec2.Port(string_representation="TCP 8080", protocol=ec2.Protocol.TCP, from_port=8080, to_port=8080)) listenerHttps.add_certificates("LB Certificates", [cert]) listenerHttps.add_targets( "Target", port=8080, targets=[ elbv2.InstanceTarget( instance_id=code_server_instance.instance_id, port=8080) ])
def __init__(self, scope: core.Construct, construct_id: str, env, **kwargs) -> None: super().__init__(scope, construct_id, env=env, **kwargs) # The code that defines your stack goes here if self.node.try_get_context("tags"): self.user_defined_tags = self.node.try_get_context("tags").split(' ') else: self.user_defined_tags = None vpc = ec2.Vpc(self, "VPC_EMQ", max_azs=2, cidr="10.10.0.0/16", # configuration will create 3 groups in 2 AZs = 6 subnets. subnet_configuration=[ec2.SubnetConfiguration( subnet_type=ec2.SubnetType.PUBLIC, name="Public", cidr_mask=24 ), ec2.SubnetConfiguration( subnet_type=ec2.SubnetType.PRIVATE, name="Private", cidr_mask=24 ), ec2.SubnetConfiguration( subnet_type=ec2.SubnetType.ISOLATED, name="DB", cidr_mask=24 ) ], nat_gateways=2 ) self.vpc = vpc # Route53 int_zone = r53.PrivateHostedZone(self, r53_zone_name, zone_name = 'int.emqx', vpc = vpc ) self.int_zone = int_zone # Define cfn parameters # ec2_type = CfnParameter(self, "ec2-instance-type", # type="String", default="m5.2xlarge", # description="Specify the instance type you want").value_as_string key_name = CfnParameter(self, "ssh key", type="String", default="key_ireland", description="Specify your SSH key").value_as_string sg = ec2.SecurityGroup(self, id = 'sg_int', vpc = vpc) self.sg = sg sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), 'SSH frm anywhere') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(1883), 'MQTT TCP Port') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8883), 'MQTT TCP/TLS Port') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.udp(14567), 'MQTT Quic Port') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(18083), 'WEB UI') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(4369), 'EMQX dist port 1') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(4370), 'EMQX dist port 2') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(8081), 'EMQX dashboard') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2379), 'etcd client port') sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(2380), 'etcd peer port') # Create Bastion Server bastion = ec2.BastionHostLinux(self, "Bastion", vpc=vpc, subnet_selection=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), instance_name="BastionHostLinux", instance_type=ec2.InstanceType(instance_type_identifier="t3.nano")) bastion.instance.instance.add_property_override("KeyName", key_name) bastion.connections.allow_from_any_ipv4( ec2.Port.tcp(22), "Internet access SSH") # Create NLB nlb = elb.NetworkLoadBalancer(self, "emq-elb", vpc=vpc, internet_facing=False, cross_zone_enabled=True, load_balancer_name="emq-nlb") r53.ARecord(self, "AliasRecord", zone = int_zone, record_name = loadbalancer_dnsname, target = r53.RecordTarget.from_alias(r53_targets.LoadBalancerTarget(nlb)) ) self.nlb = nlb listener = nlb.add_listener("port1883", port=1883) listenerTLS = nlb.add_listener("port8883", port=8883) # TLS, emqx terminataion listenerQuic = nlb.add_listener("port14567", port=14567, protocol=elbv2.Protocol.UDP) listenerUI = nlb.add_listener("port80", port=80) # Create Autoscaling Group with desired 2*EC2 hosts # asg = autoscaling.AutoScalingGroup(self, "emq-asg", # vpc=vpc, # vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE), # instance_type=ec2.InstanceType( # instance_type_identifier=ec2_type), # machine_image=linux_ami, # security_group = sg, # key_name=key_name, # user_data=ec2.UserData.custom(user_data), # health_check=HealthCheck.elb(grace=Duration.seconds(60)), # desired_capacity=3, # min_capacity=2, # max_capacity=4 # ) # if self.user_defined_tags: # core.Tags.of(asg).add(*self.user_defined_tags) # # NLB cannot associate with a security group therefore NLB object has no Connection object # # Must modify manuall inbound rule of the newly created asg security group to allow access # # from NLB IP only # asg.connections.allow_from_any_ipv4( # ec2.Port.tcp(1883), "Allow NLB access 1883 port of EC2 in Autoscaling Group") # asg.connections.allow_from_any_ipv4( # ec2.Port.tcp(18083), "Allow NLB access WEB UI") # asg.connections.allow_from_any_ipv4( # ec2.Port.tcp(4369), "Allow emqx cluster distribution port 1") # asg.connections.allow_from_any_ipv4( # ec2.Port.tcp(4370), "Allow emqx cluster distribution port 2") # asg.connections.allow_from_any_ipv4( # ec2.Port.udp(4369), "Allow emqx cluster discovery port 1") # asg.connections.allow_from_any_ipv4( # ec2.Port.udp(4370), "Allow emqx cluster discovery port 2") # asg.connections.allow_from_any_ipv4( # ec2.Port.tcp(8081), "Allow emqx cluster dashboard access") # asg.connections.allow_from_any_ipv4( # ec2.Port.tcp(2379), "Allow emqx cluster discovery port (etcd)") # asg.connections.allow_from_any_ipv4( # ec2.Port.tcp(2380), "Allow emqx cluster discovery port (etcd)") # asg.connections.allow_from(bastion, # ec2.Port.tcp(22), "Allow SSH from the bastion only") self.setup_emqx(numEmqx, vpc, int_zone, sg, key_name) listener.add_targets('ec2', port=1883, targets= [ target.InstanceTarget(x) for x in self.emqx_vms]) # @todo we need ssl terminataion listenerUI.add_targets('ec2', port=18083, targets=[ target.InstanceTarget(x) for x in self.emqx_vms]) listenerQuic.add_targets('ec2', port=14567, protocol=elbv2.Protocol.UDP, targets=[ target.InstanceTarget(x) for x in self.emqx_vms]) listenerTLS.add_targets('ec2', port=8883, targets=[ target.InstanceTarget(x) for x in self.emqx_vms]) """ db_mysql = rds.DatabaseInstance(self, "EMQ_MySQL_DB", engine=rds.DatabaseInstanceEngine.mysql( version=rds.MysqlEngineVersion.VER_5_7_30), instance_type=ec2.InstanceType.of( ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.SMALL), vpc=vpc, multi_az=True, allocated_storage=100, storage_type=rds.StorageType.GP2, cloudwatch_logs_exports=["audit", "error", "general", "slowquery"], deletion_protection=False, delete_automated_backups=False, backup_retention=core.Duration.days(7), parameter_group=rds.ParameterGroup.from_parameter_group_name( self, "para-group-mysql", parameter_group_name="default.mysql5.7"), ) asg_security_groups = asg.connections.security_groups for asg_sg in asg_security_groups: db_mysql.connections.allow_default_port_from(asg_sg, "EC2 Autoscaling Group access MySQL") """ #self.setup_monitoring() self.setup_etcd(vpc, int_zone, sg, key_name) self.setup_loadgen(numLg, vpc, int_zone, sg, key_name, nlb.load_balancer_dns_name) self.setup_monitoring() core.CfnOutput(self, "Output", value=nlb.load_balancer_dns_name) core.CfnOutput(self, "SSH Entrypoint", value=bastion.instance_public_ip) core.CfnOutput(self, "SSH cmds", value="ssh -A -l ec2-user %s -L8888:%s:80 -L 9999:%s:80 -L 13000:%s:3000" % (bastion.instance_public_ip, nlb.load_balancer_dns_name, self.mon_lb, self.mon_lb) )
def __init__( self, scope: core.Construct, id_: str, ontoserver_repo_name: str, ontoserver_tag: str, dns_record_name: str, service_count: int = 1, service_memory: int = 2048, service_cpu: int = 1024, **kwargs, ) -> None: """ A stack that spins up a Fargate cluster running Ontoserver with an ALB in-front. Args: scope: id_: ontoserver_repo_name: ontoserver_tag: dns_record_name: The DNS entry to make for the ALB service_count: The count of services to spin up behind the ALB in Fargate service_memory: The memory assigned to each Fargate instance service_cpu: The CPU assigned to each Fargate instance **kwargs: """ super().__init__(scope, id_, **kwargs) # given the complexity of building our ontoserver image - we don't build here in the CDK, but assume # it has been built separately into a repo managed elsewhere repo: ecr.Repository = ecr.Repository.from_repository_name( self, "Repo", ontoserver_repo_name) # --- Query deployment env specific config from SSM Parameter Store cert_apse2_arn = ssm.StringParameter.from_string_parameter_name( self, "SSLCertAPSE2ARN", # re-using the wildcard as used by htsget.. FIX?? string_parameter_name="/htsget/acm/apse2_arn", ) cert_apse2 = acm.Certificate.from_certificate_arn( self, "SSLCertAPSE2", certificate_arn=cert_apse2_arn.string_value, ) hosted_zone_id = ssm.StringParameter.from_string_parameter_name( self, "HostedZoneID", string_parameter_name="hosted_zone_id") hosted_zone_name = ssm.StringParameter.from_string_parameter_name( self, "HostedZoneName", string_parameter_name="hosted_zone_name") # --- Query main VPC and setup Security Groups vpc = ec2.Vpc.from_lookup( self, "VPC", vpc_name="main-vpc", tags={ "Stack": "networking", }, ) # if we were to want high availability - we would want the instances spread over our zones private_subnets = ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE, availability_zones=[ "ap-southeast-2a", "ap-southeast-2b", "ap-southeast-2c" ], ) sg_elb = ec2.SecurityGroup( self, "ELBSecurityGroup", vpc=vpc, description=f"Security Group for ELB in {id_} stack", security_group_name=f"{id_} ELB Security Group", allow_all_outbound=False, ) # the ontoserver backend is pre-loaded - so does not actually need any outgoing egress rules sg_ecs_service = ec2.SecurityGroup( self, "ECSServiceSecurityGroup", vpc=vpc, description=f"Security Group for ECS Service in {id_} stack", security_group_name=f"{id_} ECS Security Group", allow_all_outbound=False) sg_ecs_service.add_ingress_rule( peer=sg_elb, connection=ec2.Port.tcp(8080), description="Allow traffic from Load balancer to ECS service", ) # --- Setup ECS Fargate cluster task_execution_role = iam.Role( self, "ECSTaskExecutionRole", assumed_by=iam.ServicePrincipal("ecs-tasks.amazonaws.com"), ) task_execution_role.add_to_policy( iam.PolicyStatement( actions=[ "logs:CreateLogStream", "logs:PutLogEvents", "ssm:GetParameterHistory", "ssm:GetParametersByPath", "ssm:GetParameters", "ssm:GetParameter", ], resources=["*"], )) task_execution_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonECSTaskExecutionRolePolicy")) task = ecs.FargateTaskDefinition( self, f"Task", cpu=service_cpu, memory_limit_mib=service_memory, task_role=task_execution_role, execution_role=task_execution_role, ) # the ontoserver docker image we build is built with no security enabled # putting this env variable switches on security and enables the FHIR API in read only mode ontoserver_settings = { "ontoserver": { "security": { "enabled": True, "readOnly": { "fhir": True } } } } main_container: ecs.ContainerDefinition = task.add_container( "Container", image=ecs.ContainerImage.from_ecr_repository( repository=repo, tag=ontoserver_tag, ), essential=True, environment={ "SPRING_APPLICATION_JSON": json.dumps(ontoserver_settings, separators=(",", ":")), "LOG4J_FORMAT_MSG_NO_LOOKUPS": "true" }, logging=ecs.LogDriver.aws_logs(stream_prefix=f"{id_}", ), ) main_container.add_port_mappings( ecs.PortMapping( container_port=8080, protocol=ecs.Protocol.TCP, )) cluster = ecs.Cluster(self, f"Cluster", vpc=vpc) service = ecs.FargateService( self, f"Service", platform_version=ecs.FargatePlatformVersion.VERSION1_4, task_definition=task, cluster=cluster, vpc_subnets=private_subnets, desired_count=service_count, security_groups=[ sg_ecs_service, ], ) # --- Setup Application Load Balancer in front of ECS cluster lb = elbv2.ApplicationLoadBalancer( self, f"ALB", vpc=vpc, internet_facing=True, security_group=sg_elb, deletion_protection=False, ) https_listener = lb.add_listener("HttpsLBListener", port=443, certificates=[cert_apse2]) health_check = elbv2.HealthCheck( interval=core.Duration.seconds(60), path="/fhir/metadata", timeout=core.Duration.seconds(10), ) https_listener.add_targets( "LBtoECS", port=8080, protocol=elbv2.ApplicationProtocol.HTTP, targets=[service], health_check=health_check, ) # DNS hosted_zone = route53.HostedZone.from_hosted_zone_attributes( self, "HostedZone", hosted_zone_id=hosted_zone_id.string_value, zone_name=hosted_zone_name.string_value, ) route53.ARecord( self, "AlbDomainAlias", zone=hosted_zone, record_name=dns_record_name, target=route53.RecordTarget.from_alias( route53t.LoadBalancerTarget(lb)), )
def __init__(self, scope: core.Construct, id: str, pub_hosted_zone: object, **kwargs) -> None: super().__init__(scope, id, **kwargs) sts = boto3.client("sts") deploy_account_id = sts.get_caller_identity()["Account"] deploy_region = sts.meta.region_name #TODO ADD an ASG vpc = ec2.Vpc( self, "iais-public", nat_gateways=0, subnet_configuration=[ ec2.SubnetConfiguration(name="public", subnet_type=ec2.SubnetType.PUBLIC), ec2.SubnetConfiguration(name="private", subnet_type=ec2.SubnetType.ISOLATED), ]) sg = ec2.SecurityGroup(self, f"iais-sg-{str}", vpc=vpc, allow_all_outbound=True, description="For HTTPS access.") sg.add_ingress_rule(peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(443)) self.sg = sg self.vpc = vpc instance_ami = ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, edition=ec2.AmazonLinuxEdition.STANDARD, virtualization=ec2.AmazonLinuxVirt.HVM, storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE) role = iam.Role(self, "iais-web-server-roles", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")) role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonEC2RoleforSSM")) role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonRekognitionFullAccess")) instance = ec2.Instance(self, "iais-web-server-instance", instance_type=ec2.InstanceType("t2.micro"), machine_image=instance_ami, vpc=vpc, role=role, security_group=sg) instance_target = targets.InstanceIdTarget( instance_id=instance.instance_id, port=443) lb = elbv2.NetworkLoadBalancer(self, f"iais-lb-{str}", vpc=vpc, internet_facing=True) lb_tg = elbv2.NetworkTargetGroup(self, vpc=vpc, id=f"iais-tg-{str}", port=443, targets=[instance_target]) lb_listener = lb.add_listener(f"iais-listener-{str}", port=443, default_target_groups=[lb_tg]) r53.ARecord(self, "AliasRecord", zone=pub_hosted_zone, target=r53.RecordTarget.from_alias( r53t.LoadBalancerTarget(lb))) r53.ARecord(self, "AliasRecordWww", zone=pub_hosted_zone, record_name="www.imageaisearch.com", target=r53.RecordTarget.from_alias( r53t.LoadBalancerTarget(lb))) secrets_man_policy = iam.Policy( self, "iais", roles=[role], policy_name="iais-web-server-secrets-manager", statements=[ iam.PolicyStatement( actions=[ "secretsmanager:GetResourcePolicy", "secretsmanager:GetSecretValue", "secretsmanager:DescribeSecret", "secretsmanager:ListSecretVersionIds" ], resources=[ f"arn:aws:secretsmanager:{deploy_region}:{deploy_account_id}:secret:DJANGO_SECRET_KEY-mHAOZX" ]) ]) secrets_man_policy.attach_to_role(role)