def __init__(self, scope: core.Construct, id: str, domain_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.hosted_zone = HostedZone(self, 'TheBlueDevHostedZone', zone_name=domain_name)
def __init__(self, scope: core.Construct, id: str, domain: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.stack_module_path: str = os.path.dirname(__file__) self.construct_id: str = id self.domain: str = domain # Create the hosted zone. self.zone: HostedZone = HostedZone.from_lookup(self, "HostedZone", domain_name=domain, private_zone=False) kix.info(f"Zone from look-up: {self.zone.zone_name}") # Create the data table. public_crud_table_name: str = "public_crud" + domain.replace(".", "_") self.public_table: aws_dynamodb.Table = self.create_table( public_crud_table_name, "PublicCrudTable") # Create the user pool. self.user_pool: aws_cognito.UserPool = self._create_user_pool() # Create the API Gateway. self._rest_api: Optional[aws_apigateway.RestApi] = None self._api_authorizer = None self._api_map: Dict[str, aws_apigateway.Resource] = {}
async def create_hosted_zone(self) -> HostedZone: self.hosted_zone_id = 'Z2X9F83UNB4V52' self.zone_name = 'guyandjaella.com' print('HostedZone Found:{}\n{}'.format(self.zone_name, self.hosted_zone_id)) self.zone = HostedZone.from_hosted_zone_attributes( self, '{}HostedZone'.format(id), hosted_zone_id=self.hosted_zone_id, zone_name=self.zone_name)
def __init__(self, scope: core.Construct, id: str, webflow_aws_setup_bucket: str, configuration: dict, **kwargs) -> None: super().__init__(scope, id, **kwargs) route_53_hosted_zone = HostedZone.from_hosted_zone_attributes( self, 'HostedZone', hosted_zone_id=configuration['route_53_hosted_zone_id'], zone_name=configuration['route_53_hosted_zone_name']) cloud_front_lambda_execution_role = self.__create_cloud_front_lambda_execution_role( ) cloud_front_www_edit_path_for_origin_lambda = self.__create_cloud_front_www_edit_path_for_origin_lambda( webflow_aws_setup_bucket=webflow_aws_setup_bucket, lambda_execution_role=cloud_front_lambda_execution_role) cloud_front_www_edit_path_for_origin_lambda_version = \ self.__create_cloud_front_www_edit_path_for_origin_lambda_version( cloud_front_www_edit_path_for_origin_lambda=cloud_front_www_edit_path_for_origin_lambda) cloud_front_origin_access_identity = self.__create_cloud_front_origin_access_identity( ) cloud_front_cache_policy = self.__create_cloud_front_cache_policy() ssl_certificate = self.__create_ssl_certificate( route_53_hosted_zone=route_53_hosted_zone, domain_name=configuration['domain_name'], alternative_domain_names=configuration['CNAMEs']) cloud_front_www = self.__create_cloud_front_www( origin_bucket_name=configuration['bucket_name'], cache_policy=cloud_front_cache_policy, origin_access_identity=cloud_front_origin_access_identity, ssl_certificate=ssl_certificate, domain_name=configuration['domain_name'], alternative_domain_names=configuration['CNAMEs'], edge_lambda_viewer_request= cloud_front_www_edit_path_for_origin_lambda_version) s3_trigger_lambda_execution_role = self.__create_s3_trigger_lambda_execution_role( bucket_name=configuration['bucket_name'], cloudfront_distribution=cloud_front_www) s3_trigger_lambda_function = self.__create_s3_trigger_lambda_function( webflow_aws_setup_bucket=webflow_aws_setup_bucket, execution_role=s3_trigger_lambda_execution_role, cloud_front_distribution=cloud_front_www) s3_source_bucket = self.__create_s3_source_bucket( bucket_name=configuration['bucket_name'], s3_trigger_lambda_function=s3_trigger_lambda_function) self.__create_s3_trigger_lambda_invoke_permission( bucket_name=configuration['bucket_name'], s3_trigger_lambda_function=s3_trigger_lambda_function) self.__create_s3_source_bucket_policy( s3_source_bucket=s3_source_bucket, cloud_front_origin_access_identity= cloud_front_origin_access_identity) self.__create_route_53_record_group( route_53_hosted_zone=route_53_hosted_zone, domain_name=configuration['domain_name'], alternative_domain_names=configuration['CNAMEs'], cloud_front_distribution=cloud_front_www)
def __init__(self, scope: core.Construct, id: str) -> None: super().__init__(scope, id) hosted_zone = HostedZone.from_lookup(scope=self, id="DomainHostedZone", domain_name=KESHER_DOMAIN_NAME, private_zone=False) region = core.Stack.of(self).region # host_name = f'inbound-smtp.{region}InboundUrl.amazonaws.com' host_name = f'inbound-smtp.{region}.amazonaws.com' # https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-mx-record.html MxRecord(scope=self, id='DomainMXRecord', values=[MxRecordValue(host_name=host_name, priority=10)], zone=hosted_zone)
def create_route53(stack, domain, distrubution): hosted_zone = HostedZone.from_lookup( stack, 'RadiantLoungeZone', domain_name=domain ) ARecord( stack, f'RadiantLoungeARecord', zone=hosted_zone, target=RecordTarget.from_alias(CloudFrontTarget(distrubution)) ) AaaaRecord( stack, f'RadiantLoungeAaaaRecord', zone=hosted_zone, target=RecordTarget.from_alias(CloudFrontTarget(distrubution)) )
def __init__(self, app: App, id: str, env: Environment) -> None: super().__init__(app, id, env=env) # start by getting the DNS zone we're going to work with zone = HostedZone.from_lookup(self, "Dominick", domain_name=DOMAIN) # create a certificate for the web service which matches its hostname cert = Certificate(self, "Cletus", domain_name=HOSTNAME, validation=CertificateValidation.from_dns(zone)) # the services will live in a vpc, of course vpc = ec2.Vpc(self, "Virgil") # we're going to scale this web-service automatically asg = AutoScalingGroup( self, "Alice", vpc=vpc, user_data=http_service(), instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO), machine_image=ec2.AmazonLinuxImage( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2)) # explicitly allow internal access from the vpc just to be safe asg.connections.allow_internally(Port.tcp(WEB_PORT), "web-service") asg.connections.allow_internally(Port.tcp(NOT_WEB), "not-web") # expose the scaling group ports and permit egress asg.connections.allow_from_any_ipv4(Port.tcp(WEB_PORT)) asg.connections.allow_from_any_ipv4(Port.tcp(NOT_WEB)) # create a health check for the not-web service that currently if NOT_WEB_HEALTH_CHECKS: # points to the not-web service checker = HealthCheck(interval=Duration.seconds(10), port=NOT_WEB, protocol=Protocol.TCP) else: # points to the web port where our demo server listens checker = HealthCheck(interval=Duration.seconds(10), port=str(WEB_PORT), protocol=WEB_PROT) # put the scaling group behind a network target group for the LB notwebish = NetworkTargetGroup(self, "Allison", vpc=vpc, health_check=checker, targets=[asg], port=NOT_WEB, protocol=Protocol.TCP) # for the web-like ports, we can use the default health check webish = NetworkTargetGroup( self, "Alicen", vpc=vpc, health_check=HealthCheck(interval=Duration.seconds(10)), targets=[asg], port=WEB_PORT, protocol=WEB_PROT) if True: # create the load balancer and put it into dns lb = NetworkLoadBalancer(self, "Lisa", vpc=vpc, internet_facing=True) # create a hostname for the service CnameRecord(self, "Carl", domain_name=lb.load_balancer_dns_name, zone=zone, record_name=HOSTNAME.split('.')[0], ttl=Duration.seconds(60)) else: # a multi-step deployment could allow using an alias in R53 lb = NetworkLoadBalancer.from_network_load_balancer_attributes( self, "Larry", vpc=vpc, load_balancer_arn=some.load_balancer_arn, load_balancer_dns_name=HOSTNAME, load_balancer_canonical_hosted_zone_id=zone.hosted_zone_id) # create a hostname for the service AaaaRecord(self, "Eric", zone=zone, record_name=HOSTNAME.split('.')[0], target=RecordTarget.from_alias(LoadBalancerTarget(lb))) # point the load balancer to the target group for the ssl service # # TODO: determine if we need to use the same cert for pub-facing # and internal service listener_cert = ListenerCertificate(cert.certificate_arn) lb.add_listener("Cecil", port=443, certificates=[listener_cert], default_target_groups=[webish]) # point the load balancer to the target group for the web service lb.add_listener("Webster", port=80, default_target_groups=[webish]) # point the load balancer to the group for the not-web service lb.add_listener("NotWeb", default_target_groups=[notwebish], port=NOT_WEB, protocol=Protocol.TCP) # auto scale the, uh, autoscaling group asg.scale_on_cpu_utilization("ScaleCPU", target_utilization_percent=80) # emit some output values, largely for console use CfnOutput(self, "LB", export_name="LB", value=lb.load_balancer_dns_name) CfnOutput(self, "HTTP", export_name="HTTP", value="http://{}/".format(HOSTNAME)) CfnOutput(self, "HTTPS", export_name="HTTPS", value="https://{}/".format(HOSTNAME)) CfnOutput(self, "TCP", export_name="TCP", value="tcp://{}:{}/".format(HOSTNAME, NOT_WEB)) CfnOutput(self, "Cert", export_name="Cert", value=cert.certificate_arn)
def __init__( self, scope: Construct, id: str, cluster: ICluster, ecs_security_group: SecurityGroup, ecs_source_security_group: SecurityGroup, vpc: IVpc, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) global g_nlb Tags.of(self).add("Stack", "Common-Nlb") # TODO -- You need to do some manual actions: # TODO -- 1) enable auto-assign IPv6 address on public subnets # TODO -- 2) add to the Outbound rules of "Live-Common-Nlb/ASG/InstanceSecurityGroup" the destination "::/0" self.private_zone = HostedZone.from_lookup( self, "PrivateZone", domain_name="openttd.internal", private_zone=True, ) user_data = UserData.for_linux(shebang="#!/bin/bash -ex") asset = Asset(self, "NLB", path="user_data/nlb/") user_data.add_commands( "echo 'Extracting user-data files'", "mkdir /nlb", "cd /nlb", ) user_data.add_s3_download_command( bucket=asset.bucket, bucket_key=asset.s3_object_key, local_file="/nlb/files.zip", ) user_data.add_commands("unzip files.zip", ) user_data.add_commands( "echo 'Setting up configuration'", f"echo '{self.region}' > /etc/.region", f"echo '{cluster.cluster_name}' > /etc/.cluster", ) user_data.add_commands( "echo 'Installing nginx'", "amazon-linux-extras install epel", "yum install nginx -y", "cp /nlb/nginx.conf /etc/nginx/nginx.conf", "mkdir /etc/nginx/nlb.d", ) user_data.add_commands( "echo 'Installing Python3'", "yum install python3 -y", "python3 -m venv /venv", "/venv/bin/pip install -r /nlb/requirements.txt", ) user_data.add_commands( "echo 'Generating nginx configuration'", "cd /etc/nginx/nlb.d", "/venv/bin/python /nlb/nginx.py", "systemctl start nginx", ) user_data.add_commands( "echo 'Setting up SOCKS proxy'", "useradd pproxy", "cp /nlb/pproxy.service /etc/systemd/system/", "systemctl daemon-reload", "systemctl enable pproxy.service", "systemctl start pproxy.service", ) asg = AutoScalingGroup( self, "ASG", vpc=vpc, instance_type=InstanceType("t3a.nano"), machine_image=MachineImage.latest_amazon_linux( generation=AmazonLinuxGeneration.AMAZON_LINUX_2), min_capacity=2, vpc_subnets=SubnetSelection(subnet_type=SubnetType.PUBLIC, one_per_az=True), user_data=user_data, health_check=HealthCheck.elb(grace=Duration.seconds(0)), ) asg.add_security_group(ecs_security_group) asg.role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( "AmazonSSMManagedInstanceCore")) asset.grant_read(asg.role) policy = ManagedPolicy(self, "Policy") policy_statement = PolicyStatement( actions=[ "ec2:DescribeInstances", "ecs:DescribeContainerInstances", "ecs:DescribeTasks", "ecs:ListContainerInstances", "ecs:ListServices", "ecs:ListTagsForResource", "ecs:ListTasks", ], resources=["*"], ) policy.add_statements(policy_statement) asg.role.add_managed_policy(policy) # We could also make an additional security-group and add that to # the ASG, but it keeps adding up. This makes it a tiny bit # easier to get an overview what traffic is allowed from the # console on AWS. assert isinstance(asg.node.children[0], SecurityGroup) self.security_group = asg.node.children[0] listener_https.add_targets( subdomain_name=self.admin_subdomain_name, port=80, target=asg, priority=2, ) # Create a Security Group so the lambdas can access the EC2. # This is needed to check if the EC2 instance is fully booted. lambda_security_group = SecurityGroup( self, "LambdaSG", vpc=vpc, ) self.security_group.add_ingress_rule( peer=lambda_security_group, connection=Port.tcp(80), description="Lambda to target", ) self.security_group.add_ingress_rule( peer=ecs_source_security_group, connection=Port.udp(8080), description="ECS to target", ) self.create_ecs_lambda( cluster=cluster, auto_scaling_group=asg, ) self.create_asg_lambda( lifecycle_transition=LifecycleTransition.INSTANCE_LAUNCHING, timeout=Duration.seconds(180), vpc=vpc, security_group=lambda_security_group, auto_scaling_group=asg, ) self.create_asg_lambda( lifecycle_transition=LifecycleTransition.INSTANCE_TERMINATING, timeout=Duration.seconds(30), vpc=vpc, security_group=lambda_security_group, auto_scaling_group=asg, ) # Initialize the NLB record on localhost, as we need to be able to # reference it for other entries to work correctly. ARecord( self, "ARecord", target=RecordTarget.from_ip_addresses("127.0.0.1"), zone=dns.get_hosted_zone(), record_name=self.subdomain_name, ttl=Duration.seconds(60), ) AaaaRecord( self, "AAAARecord", target=RecordTarget.from_ip_addresses("::1"), zone=dns.get_hosted_zone(), record_name=self.subdomain_name, ttl=Duration.seconds(60), ) # To make things a bit easier, also alias to staging. self.create_alias(self, "nlb.staging") # Create a record for the internal DNS ARecord( self, "APrivateRecord", target=RecordTarget.from_ip_addresses("127.0.0.1"), zone=self.private_zone, record_name=self.subdomain_name, ttl=Duration.seconds(60), ) if g_nlb is not None: raise Exception("Only a single NlbStack instance can exist") g_nlb = self
def __init__(self, scope: Construct, id: str, *, deployment: Deployment, **kwargs) -> None: super().__init__(scope, id, **kwargs) Tags.of(self).add("Application", self.application_name) Tags.of(self).add("Deployment", deployment.value) hosted_zone = HostedZone.from_lookup( self, "Zone", domain_name="openttd.com", ) fqdn = "www.openttd.com" certificate = DnsValidatedCertificate( self, "OpenttdCom-Certificate", hosted_zone=hosted_zone, domain_name=fqdn, subject_alternative_names=["*.openttd.com", "openttd.com"], region="us-east-1", validation_method=ValidationMethod.DNS, ) func = lambda_edge.create_function( self, "OpenttdComRedirect", runtime=Runtime.NODEJS_10_X, handler="index.handler", code=Code.from_asset("./lambdas/openttd-com-redirect"), ) s3_cloud_front = S3CloudFront( self, "S3CloudFront", subdomain_name=fqdn, cert=CertificateResult(certificate, certificate.certificate_arn, fqdn), additional_fqdns=["*.openttd.com", "openttd.com"], lambda_function_associations=[ LambdaFunctionAssociation( event_type=LambdaEdgeEventType.ORIGIN_REQUEST, lambda_function=func, ), ], no_dns=True, ) S3CloudFrontPolicy( self, "S3cloudFrontPolicy", s3_cloud_front=s3_cloud_front, ) for record_name in ("www", None): route53.ARecord( self, f"{record_name}.openttd.com-ARecord", target=RecordTarget.from_alias(CloudFrontTarget(s3_cloud_front.distribution)), zone=hosted_zone, record_name=record_name, ) route53.AaaaRecord( self, f"{record_name}.openttd.com-AaaaRecord", target=RecordTarget.from_alias(CloudFrontTarget(s3_cloud_front.distribution)), zone=hosted_zone, record_name=record_name, )