Esempio n. 1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        prj_name = self.node.try_get_context("project_name")
        env_name = self.node.try_get_context("env")
        domain_name = self.node.try_get_context("domain_name")

        zone_id = ssm.StringParameter.from_string_parameter_name(
            self,
            'zone-id-ssm',
            string_parameter_name='/' + env_name + '/zone-id')

        dns_zone = r53.HostedZone.from_hosted_zone_attributes(
            self,
            'hosted-zone',
            hosted_zone_id=zone_id.string_value,
            zone_name=domain_name)

        self.cert_manager = acm.DnsValidatedCertificate(
            self,
            'acm-id',
            hosted_zone=dns_zone,
            domain_name=domain_name,
            subject_alternative_names=['*.' + domain_name],
            region='us-east-1')

        self.cert_manager_eu = acm.DnsValidatedCertificate(
            self,
            'acm-eu-central',
            hosted_zone=dns_zone,
            domain_name=domain_name,
            subject_alternative_names=['*.' + domain_name],
            region='eu-central-1')
Esempio n. 2
0
    def __init__(
        self, scope: core.Construct, id: str, prefix: str, domains: dict, **kwargs
    ) -> None:

        super().__init__(scope, id, **kwargs)

        regions = ["ap-southeast-2", "us-east-1"]

        primary_domain = [
            domain["domain"] for domain in domains if domain["primary"]
        ].pop()
        primary_zone = self.fetch_zone(primary_domain)
        secondary_domains = [
            domain["domain"] for domain in domains if not domain["primary"]
        ]

        subject_alt = secondary_domains + [domain["wildcard"] for domain in domains]

        for region in regions:
            certificate = certificatemanager.DnsValidatedCertificate(
                self,
                prefix + "_wildcard_" + region,
                hosted_zone=primary_zone,
                region=region,
                domain_name=primary_domain,
                subject_alternative_names=subject_alt,
                validation_method=certificatemanager.ValidationMethod.DNS,
            )
Esempio n. 3
0
 def create_api_certificate(self, domain: str,
                            zone: aws_route53.HostedZone):
     kix.info("Creating Certificate")
     cert = aws_certificatemanager.DnsValidatedCertificate(
         self, f"ApiCertificate", domain_name=domain, hosted_zone=zone)
     core.CfnOutput(self, 'ApiCertificateArn', value=cert.certificate_arn)
     return cert
Esempio n. 4
0
    def __init__(self, scope: core.Construct, id: str,domain_name:str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Materialize existing AWS Hosted DNS Zone
        dns_zone = r53_.HostedZone.from_lookup(self,"dns_zone",domain_name = domain_name)
        
        # Compose site name for AWS Certificate
        site_fqdn = "www." + dns_zone.zone_name

        # Create a DNS Certificate
        certificate = cm_.DnsValidatedCertificate (self,"certificate",
                                                         domain_name=site_fqdn,
                                                         hosted_zone=dns_zone,
                                                         region="us-east-1")

        # Define Bucket to store Site Contents
        ts_www_bucket = s3_.Bucket(self,"truesys-static-website",
                                        bucket_name = site_fqdn,
                                        website_index_document = "index.html",
                                        website_error_document = "404/index.html",
                                        public_read_access = True
                                        )

        #Cria lambda function para fazr URL Rewriting de default document
        url_rewrite_lambda = _lambda.Function(self,'URLRewriteLambdaEdge',
                                                   handler='url_rewrite_handler.handler',
                                                   runtime=_lambda.Runtime.PYTHON_3_7,
                                                   code=_lambda.Code.asset('lambda_edge'),
        )
        
        
        // A numbered version to give to cloudfront
Esempio n. 5
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get the hosted Zone and create a certificate for our domain

        hosted_zone = route53.HostedZone.from_hosted_zone_attributes(
            self,
            "HostedZone",
            hosted_zone_id=HOSTED_ZONE_ID,
            zone_name=HOSTED_ZONE_NAME,
        )

        cert = certificatemanager.DnsValidatedCertificate(
            self,
            "Certificate",
            hosted_zone=hosted_zone,
            domain_name=APP_DNS_NAME)

        # Set up a new VPC

        vpc = ec2.Vpc(self, "med-qaid-vpc", max_azs=2)

        # Set up an ECS Cluster for fargate

        cluster = ecs.Cluster(self, "med-qaid-cluster", vpc=vpc)

        # Define the Docker Image for our container (the CDK will do the build and push for us!)
        docker_image = ecr_assets.DockerImageAsset(
            self,
            "med-qaid-app",
            directory=os.path.join(os.path.dirname(__file__), "..", "src"),
        )

        # Define the fargate service + ALB

        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "FargateService",
            cluster=cluster,
            certificate=cert,
            domain_name=f"{APP_DNS_NAME}",
            domain_zone=hosted_zone,
            cpu=2048,
            memory_limit_mib=16384,
            task_image_options={
                "image":
                ecs.ContainerImage.from_docker_image_asset(docker_image),
                "environment": {
                    "PORT": "80",
                },
            },
        )

        # Allow 10 seconds for in flight requests before termination, the default of 5 minutes is much too high.
        fargate_service.target_group.set_attribute(
            key="deregistration_delay.timeout_seconds", value="10")
Esempio n. 6
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        hosted_zone_id: str,
        hosted_zone_name: str,
        domain_name: str,
        api: apigw.HttpApi,
    ):
        super().__init__(scope, id)

        hosted_zone = route53.HostedZone.from_hosted_zone_attributes(
            self,
            id="dns-hosted-zone",
            hosted_zone_id=hosted_zone_id,
            zone_name=hosted_zone_name)

        certificate = certmgr.DnsValidatedCertificate(
            self,
            "tls-certificate",
            domain_name=domain_name,
            hosted_zone=hosted_zone,
            validation_method=certmgr.ValidationMethod.DNS,
        )

        custom_domain = apigw.CfnDomainName(
            self,
            "custom-domain",
            domain_name=domain_name,
            domain_name_configurations=[
                apigw.CfnDomainName.DomainNameConfigurationProperty(
                    certificate_arn=certificate.certificate_arn)
            ],
        )

        custom_domain.node.add_dependency(api)
        custom_domain.node.add_dependency(certificate)

        api_mapping = apigw.CfnApiMapping(self,
                                          "custom-domain-mapping",
                                          api_id=api.http_api_id,
                                          domain_name=domain_name,
                                          stage="$default")

        api_mapping.node.add_dependency(custom_domain)

        route53.ARecord(
            self,
            "custom-domain-record",
            target=route53.RecordTarget.from_alias(
                ApiGatewayV2Domain(custom_domain)),
            zone=hosted_zone,
            record_name=domain_name,
        )
Esempio n. 7
0
    def __init__(self, scope: core.Construct, id: str, hostedzoneid: str, hostedzonename: str, origin_name: str, **kwargs) -> None:

        super().__init__(scope, id, **kwargs)

        domain_name = "{}.{}".format("test",hostedzonename)

        hostedzone = dns.HostedZone.from_hosted_zone_attributes(
            self,
            "hosted_zone",
            hosted_zone_id=hostedzoneid,
            zone_name=hostedzonename
        )

        acm_certificate = acm.DnsValidatedCertificate(
            self, 
            "ACMCertGenerator",
            hosted_zone=hostedzone , 
            region="us-east-1", 
            domain_name="test.awsels.com" , 
            validation_method =  acm.ValidationMethod.DNS
        )

        source_configuration = cloudfront.SourceConfiguration(
            custom_origin_source=cloudfront.CustomOriginConfig(
                domain_name=origin_name,
                allowed_origin_ssl_versions=[cloudfront.OriginSslPolicy.TLS_V1_2],
                http_port=80,
                https_port=443,
                origin_protocol_policy=cloudfront.OriginProtocolPolicy.HTTPS_ONLY
            ),
            behaviors=[cloudfront.Behavior(
                compress=False,
                allowed_methods=cloudfront.CloudFrontAllowedMethods.ALL,
                is_default_behavior=True,
                cached_methods=cloudfront.CloudFrontAllowedCachedMethods.GET_HEAD
            )]
        )

        viewer_configuration = cloudfront.ViewerCertificate.from_acm_certificate(
                certificate=acm.Certificate.from_certificate_arn(self, "certificate", certificate_arn=acm_certificate.certificate_arn),
                aliases=[origin_name],
                security_policy=cloudfront.SecurityPolicyProtocol.TLS_V1,
                ssl_method=cloudfront.SSLMethod.SNI
        )


        distribution = cloudfront.CloudFrontWebDistribution(
            self,
            'Distribution',
            origin_configs=[source_configuration],
            viewer_certificate=viewer_configuration,
            price_class=cloudfront.PriceClass.PRICE_CLASS_100,
        )
    def __init__(self, app: core.App, id: str, **kwargs) -> None:
        super().__init__(app, id)
        #create an S3 bucket
        domainName = 'accelerate.dev'
        myBucket = s3.Bucket(self,
                             'accelerate.dev-s3bucket',
                             bucket_name='accelerate-website',
                             public_read_access=True,
                             website_index_document='index.html',
                             website_error_document='404.html',
                             removal_policy=core.RemovalPolicy.DESTROY)
        myBucket.grant_public_access
        myBucket.add_to_resource_policy(  #Grant read access to everyone in your account
            iam.PolicyStatement(
                actions=['s3:GetObject'],
                resources=[myBucket.arn_for_objects('*')],
                principals=[
                    iam.AccountPrincipal(account_id=core.Aws.ACCOUNT_ID)
                ]))
        myUser = iam.User(self, 'deploy_' +
                          domainName)  #Grant write access to a specific user
        myBucket.grant_write(myUser)
        hostedZone = route53.HostedZone.from_hosted_zone_attributes(
            self,
            "HostedZone_" + domainName,
            hosted_zone_id='Z00154093I7THXRTRF8QB',
            zone_name=domainName)
        cert = certmgr.DnsValidatedCertificate(self,
                                               "cert_" + domainName,
                                               domain_name=domainName,
                                               hosted_zone=hostedZone)
        distribution = cloudfront.CloudFrontWebDistribution(
            self,
            "accelerate.dev-distribution",
            price_class=cloudfront.PriceClass.PRICE_CLASS_100,
            origin_configs=[
                cloudfront.SourceConfiguration(
                    s3_origin_source=cloudfront.S3OriginConfig(
                        s3_bucket_source=myBucket),
                    behaviors=[cloudfront.Behavior(is_default_behavior=True)])
            ],
            viewer_certificate=cloudfront.ViewerCertificate.
            from_acm_certificate(cert, aliases=['accelerate.dev']))

        route53.ARecord(self,
                        "Alias_" + domainName,
                        zone=hostedZone,
                        target=route53.RecordTarget.from_alias(
                            targets.CloudFrontTarget(distribution)))
Esempio n. 9
0
    def __init__(self, scope: core.Construct, stack_id: str, *, api_name: str,
                 domain_name: str, functions_stacks: List[FunctionsStack],
                 subdomain: str, **kwargs):
        super().__init__(scope, stack_id, **kwargs)

        hosted_zone = route53.HostedZone.from_lookup(self,
                                                     'HostedZone',
                                                     domain_name=domain_name)

        subdomain = f'{subdomain}.{hosted_zone.zone_name}'

        certificate = acm.DnsValidatedCertificate(self,
                                                  'Certificate',
                                                  domain_name=subdomain,
                                                  hosted_zone=hosted_zone)

        self.api = apigw.HttpApi(self, 'HttpApi', api_name=api_name)

        domain_name = apigw.CfnDomainName(
            self,
            'DomainName',
            domain_name=subdomain,
            domain_name_configurations=[
                apigw.CfnDomainName.DomainNameConfigurationProperty(
                    certificate_arn=certificate.certificate_arn)
            ])

        # add an alias to the hosted zone
        route53.ARecord(self,
                        'ARecord',
                        record_name=subdomain,
                        target=route53.RecordTarget.from_alias(
                            ApiGatewayV2Domain(domain_name)),
                        zone=hosted_zone)

        mapping = apigw.CfnApiMapping(self,
                                      'ApiMapping',
                                      api_id=self.api.http_api_id,
                                      domain_name=domain_name.ref,
                                      stage='$default')

        mapping.add_depends_on(domain_name)

        for functions_stack in functions_stacks:
            self.api.add_routes(integration=apigw.LambdaProxyIntegration(
                handler=functions_stack.receiver_function),
                                methods=[functions_stack.api_method],
                                path=functions_stack.api_path)
    def create_dns_certificate(self, logical_id, resource_name, **kwargs):
        '''
        Create ACM certificate using aws_certificatemanager.DnsValidatedCertificate method
        '''
        ihosted_zone = self.get_hosted_zone('IHostedZoneId')

        return aws_certificatemanager.DnsValidatedCertificate(
            self,
            logical_id,
            hosted_zone=ihosted_zone,
            # domain_name = self.hosted_zone_name,
            domain_name='*.{}'.format(self.hosted_zone_name),
            subject_alternative_names=[
                self.hosted_zone_name, '*.{}'.format(self.hosted_zone_name)
            ],
            validation_method=aws_certificatemanager.ValidationMethod.DNS)
Esempio n. 11
0
    def __init__(self, scope: core.Construct, stack_id: str, *, api_name: str,
                 bucket: s3.Bucket, domain_name: str, functions,
                 subdomain: str, **kwargs):
        super().__init__(scope, stack_id, **kwargs)

        hosted_zone = route53.HostedZone.from_lookup(self,
                                                     'HostedZone',
                                                     domain_name=domain_name)

        subdomain = f'{subdomain}.{hosted_zone.zone_name}'

        certificate = acm.DnsValidatedCertificate(self,
                                                  'Certificate',
                                                  domain_name=subdomain,
                                                  hosted_zone=hosted_zone)

        self.api = apigw.HttpApi(self, 'HttpApi', api_name=api_name)

        domain_name = apigw.CfnDomainName(
            self,
            'DomainName',
            domain_name=subdomain,
            domain_name_configurations=[
                apigw.CfnDomainName.DomainNameConfigurationProperty(
                    certificate_arn=certificate.certificate_arn)
            ])

        # add an alias to the hosted zone
        route53.ARecord(self,
                        'ARecord',
                        record_name=subdomain,
                        target=route53.RecordTarget.from_alias(
                            ApiGatewayV2Domain(domain_name)),
                        zone=hosted_zone)

        mapping = apigw.CfnApiMapping(self,
                                      'ApiMapping',
                                      api_id=self.api.http_api_id,
                                      domain_name=domain_name.ref,
                                      stage='$default')

        mapping.add_depends_on(domain_name)

        for function in functions:
            self.add_endpoint(bucket, function)
Esempio n. 12
0
 def __create_certificate(self, hosted_zone):
     if self.__domain_certificate_arn:
         # If certificate arn is provided, import the certificate
         self.certificate = acm.Certificate.from_certificate_arn(
             self,
             "site_certificate",
             certificate_arn=self.__domain_certificate_arn,
         )
     else:
         # If certificate arn is not provided, create a new one.
         # ACM certificates that are used with CloudFront must be in
         # the us-east-1 region.
         self.certificate = acm.DnsValidatedCertificate(
             self,
             "site_certificate",
             domain_name=self._site_domain_name,
             hosted_zone=hosted_zone,
             region="us-east-1",
         )
Esempio n. 13
0
    def __init__(self, scope: core.Construct, id: str, hosted_zone_id: str,
                 hosted_zone_name: str, website_domain_name: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        hosted_zone = route53.HostedZone.from_hosted_zone_attributes(
            self,
            "HostedZone",
            hosted_zone_id=hosted_zone_id,
            zone_name=hosted_zone_name)

        # SSL/TLS Certificate
        tls_cert = certificatemanager.DnsValidatedCertificate(
            self,
            "Certificate",
            hosted_zone=hosted_zone,
            domain_name=website_domain_name)

        core.CfnOutput(self, "CertificateArn", value=tls_cert.certificate_arn)
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = ec2.Vpc(self, "VPC", max_azs=2)

        cluster = ecs.Cluster(self, "Cluster", vpc=vpc)

        hosted_zone = route53.HostedZone.from_lookup(
            self,
            "HostedZone",
            domain_name="greengocloud.com",
            private_zone=False)

        certificate = certmgr.DnsValidatedCertificate(
            self,
            "HackCertificate",
            domain_name="hackernews.greengocloud.com",
            hosted_zone=hosted_zone)

        app = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            'Webservice',
            cluster=cluster,
            domain_name="hackernews.greengocloud.com",
            domain_zone=hosted_zone,
            certificate=certificate,
            assign_public_ip=True,
            cpu=256,
            memory_limit_mib=512,
            desired_count=1,
            task_image_options=ecs_patterns.
            ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset(
                    os.path.join(os.path.dirname(__file__), 'webapp')),
                container_port=8080,
            ))

        app.target_group.configure_health_check(port='8080',
                                                healthy_http_codes='302')
Esempio n. 15
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # set variables for site s3 bucket
        bucket_name = DOMAIN_WEBSITE['bucket_name']
        website_index = DOMAIN_WEBSITE['website_index']
        website_error = DOMAIN_WEBSITE['website_error']
        website_code_folder = DOMAIN_WEBSITE['website_code_folder']
        site_domain = DOMAIN_WEBSITE['site_domain']
        certificate_domain = DOMAIN_WEBSITE['certificate_domain']
        api_domain = DOMAIN_WEBSITE['api_domain']
        hosted_zone_name = DOMAIN_WEBSITE['hosted_zone_name']
        hosted_zone_id = DOMAIN_WEBSITE['hosted_zone_id']

        #self.lambda_code = aws_lambda.Code.from_cfn_parameters()

        # retrieve hosted zone
        hosted_zone = aws_route53.HostedZone.from_hosted_zone_attributes(
            self,
            'hostedZone',
            hosted_zone_id=hosted_zone_id,
            zone_name=hosted_zone_name)

        # set variables for backend
        lambda_code_location = "jukebike/backend/"

        # Construct code goes here
        CfnOutput(self, "Site", value=f"https://{site_domain}")

        # Content bucket
        site_bucket = aws_s3.Bucket(self,
                                    "websitebucket",
                                    bucket_name=bucket_name,
                                    website_index_document=website_index,
                                    website_error_document=website_error,
                                    public_read_access=True,
                                    removal_policy=RemovalPolicy.DESTROY)
        CfnOutput(self, "BucketArn", value=site_bucket.bucket_arn)
        CfnOutput(self, "WebsiteUrl", value=site_bucket.bucket_website_url)

        # Certificate
        cert = aws_certificatemanager.DnsValidatedCertificate(
            self,
            "certificate_website",
            domain_name=site_domain,
            hosted_zone=hosted_zone,
            region="us-east-1")
        CfnOutput(self, 'CertificateArn', value=cert.certificate_arn)

        distr = CloudFrontWebDistribution(
            self,
            "SiteDistribution",
            alias_configuration=AliasConfiguration(
                acm_cert_ref=cert.certificate_arn,
                names=[site_domain],
                ssl_method=aws_cloudfront.SSLMethod.SNI,
                security_policy=aws_cloudfront.SecurityPolicyProtocol.
                TLS_V1_1_2016,
            ),
            origin_configs=[
                SourceConfiguration(
                    s3_origin_source=aws_cloudfront.S3OriginConfig(
                        s3_bucket_source=site_bucket),
                    behaviors=[
                        aws_cloudfront.Behavior(is_default_behavior=True)
                    ])
            ])
        CfnOutput(self, "DistributionId", value=distr.distribution_id)
        #
        # Route 53 alias record for the cloudfront distribution
        aws_route53.ARecord(self,
                            "SiteAliasRecord",
                            zone=hosted_zone,
                            target=aws_route53.AddressRecordTarget.from_alias(
                                aws_route53_targets.CloudFrontTarget(distr)),
                            record_name=site_domain)

        aws_s3_deployment.BucketDeployment(
            self,
            "DeployWithInvalidation",
            sources=[aws_s3_deployment.Source.asset(website_code_folder)],
            destination_bucket=site_bucket,
            distribution=distr,
            distribution_paths=["/*"])

        ########################### Backend #################

        certificate = aws_certificatemanager.DnsValidatedCertificate(
            self,
            "domaincertificate",
            hosted_zone=hosted_zone,
            region='us-east-1',
            domain_name=certificate_domain,
            validation_method=aws_certificatemanager.ValidationMethod.DNS)

        ############# Search API ###################

        search_lambda = aws_lambda.Function(
            self,
            "SearchLambda",
            code=aws_lambda.Code.from_asset(lambda_code_location),
            handler="search.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7)

        CfnOutput(self, "SearchLambda_", value=search_lambda.function_arn)

        search_api = aws_apigateway.LambdaRestApi(
            self,
            'SearchSpotifyEndpoint',
            handler=search_lambda,
        )

        ############# Whats-Next API ###################
        whats_next_lambda = aws_lambda.Function(
            self,
            "WhatsNextLambda",
            code=aws_lambda.Code.from_asset(lambda_code_location),
            handler="whats_next.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7)
        CfnOutput(self,
                  "WhatsNextLambda_",
                  value=whats_next_lambda.function_arn)

        whats_next_api = aws_apigateway.LambdaRestApi(
            self,
            'WhatsNextEndpoint',
            handler=whats_next_lambda,
        )

        ############# Whats-Next API ###################
        wish_track_lambda = aws_lambda.Function(
            self,
            "WishTrackLambda",
            code=aws_lambda.Code.from_asset(lambda_code_location),
            handler="wish_track.handler",
            runtime=aws_lambda.Runtime.PYTHON_3_7)
        CfnOutput(self,
                  "WishTrackLambda_",
                  value=wish_track_lambda.function_arn)

        wish_track_api = aws_apigateway.LambdaRestApi(
            self,
            'WishTrackEndpoint',
            handler=wish_track_lambda,
        )

        ################## Publish APIS with custom domain name ##############
        # Pre-Requirements:
        # [Manual] 1) Registered Domain with Route 53 (e.g. jacubasch.com)
        # 2) Certificate in North Virgina for domain (e.g. api.jacubasch.com) -> AWS Certification Manager
        # 3) API Gateway Custom Domain with Edge
        # 4) Alias-Record in Route53 forwarding to Cloudfront Target Domain Name (can be found in API Gateway)
        # TODO: in separaten Base-Stack auslagern?
        # https://medium.com/@maciejtreder/custom-domain-in-aws-api-gateway-a2b7feaf9c74

        domain = aws_apigateway.DomainName(
            self,
            'searchDomain',
            certificate=certificate,
            endpoint_type=aws_apigateway.EndpointType.EDGE,
            domain_name=api_domain)
        domain.add_base_path_mapping(target_api=search_api, base_path="search")
        domain.add_base_path_mapping(target_api=whats_next_api,
                                     base_path="whats-next")
        domain.add_base_path_mapping(target_api=wish_track_api,
                                     base_path="wish-track")

        target = aws_route53_targets.ApiGatewayDomain(domain)
        record_target = aws_route53.RecordTarget.from_alias(target)
        alias_record = aws_route53.ARecord(self,
                                           'aliasRecord',
                                           target=record_target,
                                           record_name=api_domain,
                                           zone=hosted_zone)

        CfnOutput(self, "AliasRecord_", value=alias_record.to_string())

        ################## Dynamo DB ##############

        # create dynamo table
        track_table = aws_dynamodb.Table(
            self,
            "track_table",
            partition_key=aws_dynamodb.Attribute(
                name="track_uri", type=aws_dynamodb.AttributeType.STRING))

        # grant permission to lambda  & provide environment variable
        track_table.grant_write_data(wish_track_lambda)
        wish_track_lambda.add_environment("TRACK_TABLE_NAME",
                                          track_table.table_name)

        track_table.grant_read_write_data(whats_next_lambda)
        whats_next_lambda.add_environment("TRACK_TABLE_NAME",
                                          track_table.table_name)
Esempio n. 16
0
def add_static_site(stack: CDKMasterStack, domain: str, bucket_name: str, prefix: str = ""):

    # Construct code goes here
    core.CfnOutput(stack, f"{prefix}Site", value=f"https://{domain}")

    # Content bucket
    kix.info("Bucket Name: " + bucket_name)
    site_bucket = aws_s3.Bucket(
        stack, f"{prefix}SiteBucket",
        bucket_name=bucket_name,
        website_index_document="index.html",
        website_error_document="index.html",
        public_read_access=True,
        removal_policy=core.RemovalPolicy.DESTROY)
    core.CfnOutput(stack, f"{prefix}BucketArn", value=site_bucket.bucket_arn)

    # Certificate
    kix.info("Creating Certificate")
    cert = aws_certificatemanager.DnsValidatedCertificate(
        stack, f"{prefix}ValidatedCert",
        domain_name=domain,
        hosted_zone=stack.zone)
    core.CfnOutput(stack, f"{prefix}CertificateArn", value=cert.certificate_arn)

    kix.info("Creating Distribution")
    distribution = aws_cloudfront.CloudFrontWebDistribution(
        stack, f"{prefix}SiteDistribution",
        alias_configuration=aws_cloudfront.AliasConfiguration(
            acm_cert_ref=cert.certificate_arn,
            names=[domain],
            ssl_method=aws_cloudfront.SSLMethod.SNI,
            security_policy=aws_cloudfront.SecurityPolicyProtocol.TLS_V1_1_2016,
        ),
        origin_configs=[
            aws_cloudfront.SourceConfiguration(
                s3_origin_source=aws_cloudfront.S3OriginConfig(s3_bucket_source=site_bucket),
                behaviors=[aws_cloudfront.Behavior(is_default_behavior=True)]
            )],
        error_configurations=[
            aws_cloudfront.CfnDistribution.CustomErrorResponseProperty(
                error_code=403,
                response_code=200,
                response_page_path="/index.html"
            ),
            aws_cloudfront.CfnDistribution.CustomErrorResponseProperty(
                error_code=404,
                response_code=200,
                response_page_path="/index.html"
            )
        ]
    )
    core.CfnOutput(stack, f"{prefix}DistributionId", value=distribution.distribution_id)
    a_record_target = aws_route53.AddressRecordTarget.from_alias(aws_route53_targets.CloudFrontTarget(distribution))

    # Route 53 alias record for the CloudFront distribution
    kix.info("Routing A-Record Alias")
    aws_route53.ARecord(
        stack, f"{prefix}SiteAliasRecord",
        zone=stack.zone,
        target=a_record_target,
        record_name=domain)
    def add_webapp(self):
        """
        Adds the ALB, ECS-Service and Cognito Login Action on the ALB.
        """

        # Create the ecs cluster to house our service, this also creates a VPC in 2 AZs
        cluster = ecs.Cluster(self, "cluster")

        # Load the hosted zone
        hosted_zone = route53.HostedZone.from_hosted_zone_attributes(
            self,
            "hosted-zone",
            hosted_zone_id=self.config.hosted_zone_id,
            zone_name=self.config.hosted_zone_name)

        # Create a Certificate for the ALB
        certificate = certificatemanager.DnsValidatedCertificate(
            self,
            "certificate",
            hosted_zone=hosted_zone,
            domain_name=self.config.application_dns_name)

        # Define the Docker Image for our container (the CDK will do the build and push for us!)
        docker_image = ecr_assets.DockerImageAsset(
            self,
            "jwt-app",
            directory=os.path.join(os.path.dirname(__file__), "..", "src"))

        # This creates the ALB with an ECS Service on Fargate
        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "fargate-service",
            cluster=cluster,
            certificate=certificate,
            domain_name=self.config.application_dns_name,
            domain_zone=hosted_zone,
            desired_count=int(self.config.backend_desired_count),
            task_image_options=ecs_patterns.
            ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_docker_image_asset(docker_image),
                environment={
                    "PORT": "80",
                    "LOGOUT_URL": self.user_pool_logout_url,
                    "USER_INFO_URL": self.user_pool_user_info_url,
                }),
            redirect_http=True)

        # Configure the health checks to use our /healthcheck endpoint
        fargate_service.target_group.configure_health_check(
            enabled=True, path="/healthcheck", healthy_http_codes="200")

        # Add an additional HTTPS egress rule to the Load Balancers
        # security group to talk to Cognito, by default the construct
        # doesn't allow the ALB to make an outbound request
        lb_security_group = fargate_service.load_balancer.connections.security_groups[
            0]

        lb_security_group.add_egress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation="443",
                                from_port=443,
                                to_port=443),
            description="Outbound HTTPS traffic to get to Cognito")

        # Allow 10 seconds for in flight requests before termination,
        # the default of 5 minutes is much too high.
        fargate_service.target_group.set_attribute(
            key="deregistration_delay.timeout_seconds", value="10")

        # Add the authentication actions as a rule with priority
        fargate_service.listener.add_action(
            "authenticate-rule",
            priority=1000,
            action=elb_actions.AuthenticateCognitoAction(
                next=elb.ListenerAction.forward(
                    target_groups=[fargate_service.target_group]),
                user_pool=self.user_pool,
                user_pool_client=self.user_pool_client,
                user_pool_domain=self.user_pool_custom_domain,
            ),
            host_header=self.config.application_dns_name)

        # Overwrite the default action to show a 403 fixed response in case somebody
        # accesses the website via the alb URL directly
        cfn_listener: elb.CfnListener = fargate_service.listener.node.default_child
        cfn_listener.default_actions = [{
            "type": "fixed-response",
            "fixedResponseConfig": {
                "statusCode": "403",
                "contentType": "text/plain",
                "messageBody": "This is not a valid endpoint!"
            }
        }]
Esempio n. 18
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        domain_name = self.node.try_get_context('domain_name')

        subdomain = 'enclave.{}'.format(domain_name)

        zone = route53.HostedZone.from_lookup(
            self,
            'Zone',
            domain_name=domain_name,
        )

        certificate = acm.DnsValidatedCertificate(
            self,
            'Certificate',
            domain_name=subdomain,
            hosted_zone=zone,
        )

        vpc = ec2.Vpc(
            self,
            'Vpc',
            cidr='10.11.12.0/24',
            max_azs=2,
            # Only need public IPs, so no need for private subnets
            subnet_configuration=[
                ec2.SubnetConfiguration(name='public',
                                        subnet_type=ec2.SubnetType.PUBLIC)
            ])

        role = iam.Role(
            self,
            'Ec2SsmRole',
            assumed_by=iam.ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'AmazonSSMManagedInstanceCore')
            ],
        )

        role.add_to_policy(
            iam.PolicyStatement(
                actions=['ec2:AssociateEnclaveCertificateIamRole'],
                resources=[
                    certificate.certificate_arn,
                    role.role_arn,
                ],
            ))

        role.add_to_policy(
            iam.PolicyStatement(
                actions=['s3:GetObject'],
                resources=['arn:aws:s3:::aws-ec2-enclave-certificate-*/*'],
            ))

        role.add_to_policy(
            iam.PolicyStatement(
                actions=['kms:Decrypt'],
                resources=['arn:aws:kms:*:*:key/*'],
            ))

        role.add_to_policy(
            iam.PolicyStatement(
                actions=['iam:GetRole'],
                resources=[role.role_arn],
            ))

        nginx_config = s3_assets.Asset(
            self,
            'NginxConfig',
            path='./files/nginx.conf',
            readers=[role],
        )

        enclave_config = s3_assets.Asset(
            self,
            'EncalveConfig',
            path='./files/acm.yaml',
            readers=[role],
        )

        # Source: https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave-refapp.html
        user_data = ec2.UserData.for_linux()
        user_data.add_commands(
            'curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"',
            'unzip awscliv2.zip',
            './aws/install',
            '/usr/local/bin/aws ec2 associate-enclave-certificate-iam-role --certificate-arn {certificate_arn} --role-arn {role_arn} --region {region}'
            .format(
                certificate_arn=certificate.certificate_arn,
                role_arn=role.role_arn,
                region=self.region,
            ),
            'aws s3 cp s3://{bucket}/{key} /etc/nginx/nginx.conf'.format(
                bucket=nginx_config.s3_bucket_name,
                key=nginx_config.s3_object_key,
            ),
            'sed -i "s+DOMAIN_NAME+{domain_name}+g" /etc/nginx/nginx.conf'.
            format(domain_name=subdomain, ),
            'aws s3 cp s3://{bucket}/{key} /etc/nitro_enclaves/acm.yaml'.
            format(
                bucket=enclave_config.s3_bucket_name,
                key=enclave_config.s3_object_key,
            ),
            'sed -i "s+CERTIFICATE_ARN+{certificate_arn}+g" /etc/nitro_enclaves/acm.yaml'
            .format(certificate_arn=certificate.certificate_arn, ),
            'systemctl start nitro-enclaves-acm.service',
            'systemctl enable nitro-enclaves-acm',
        )

        instance = ec2.Instance(
            self,
            'Instance',
            role=role,
            vpc=vpc,
            user_data=user_data,
            # AWS Marketplace AMI: AWS Certificate Manager for Nitro Enclaves
            # Source: https://aws.amazon.com/marketplace/server/configuration?productId=3f5ee4f8-1439-4bce-ac57-e794a4ca82f9&ref_=psb_cfg_continue
            machine_image=ec2.MachineImage.lookup(
                name='ACM-For-Nitro-Enclaves-*',
                owners=['679593333241'],
            ),
            # Nitro Enclaves requires at least 4 vCPUs and does not run on Graviton
            instance_type=ec2.InstanceType.of(
                instance_class=ec2.InstanceClass.COMPUTE5_AMD,
                instance_size=ec2.InstanceSize.XLARGE,
            ),
        )

        # Unsupported property by CDK
        instance.instance.enclave_options = {'enabled': True}

        # Allow inbound HTTPS requests
        instance.connections.allow_from_any_ipv4(ec2.Port.tcp(443))

        # CDK route53 construct does not support EC2 instance as target
        route53.CfnRecordSet(
            self,
            'DnsRecord',
            name=subdomain,
            type='A',
            ttl='60',
            resource_records=[instance.instance_public_ip],
            hosted_zone_id=zone.hosted_zone_id,
        )
Esempio n. 19
0
    def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        admin_role = iam.Role.from_role_arn(
            self, 'admin_role', 'arn:aws:iam::989584467037:role/Admin')

        # Create EC2 key pair
        key = KeyPair(self,
                      "app-instances",
                      name="app-instances",
                      description="Used for application instances",
                      store_public_key=True)

        key.grant_read_on_private_key(admin_role)
        key.grant_read_on_public_key(admin_role)

        # Create Bastion
        #bastion = ec2.BastionHostLinux(
        #    self,
        #    "myBastion",
        #    vpc=vpc,
        #    subnet_selection=ec2.SubnetSelection(
        #        subnet_type=ec2.SubnetType.PUBLIC
        #    ),
        #    instance_name="myBastionHostLinux",
        #    instance_type=ec2.InstanceType(
        #        instance_type_identifier="t2.micro"
        #    ),
        #)

        ## Setup key_name for EC2 instance login if you don't use Session Manager
        ## bastion.instance.instance.add_property_override("KeyName", key_name)

        #bastion.connections.allow_from_any_ipv4(
        #    ec2.Port.tcp(22), "Internet access SSH"
        #)

        # Create public hosted zone
        #zone = r53.PublicHostedZone(self, "myZone", zone_name="randomhuman.org")
        # There's an issue with timeouts around ACM and Route53, so for
        # demonstration purposes, it will be best to use a pre-baked zone....
        zone = r53.HostedZone.from_lookup(self,
                                          "MOStateAppZone",
                                          domain_name="randomhuman.org")

        # Create certificate
        cert = acm.DnsValidatedCertificate(
            self,
            "MOStateAppCert",
            hosted_zone=zone,
            domain_name="*.randomhuman.org",
        )

        # Create ALB
        alb = elb.ApplicationLoadBalancer(
            self,
            "MOStateAppALB",
            vpc=vpc,
            internet_facing=True,
            load_balancer_name="MOStateAppALB",
        )
        alb.connections.allow_from_any_ipv4(ec2.Port.tcp(443),
                                            "Internet access ALB 443")
        listener = alb.add_listener("https",
                                    certificates=[cert],
                                    port=443,
                                    open=True)

        # Create Autoscaling Group with fixed 2*EC2 hosts
        self.asg = autoscaling.AutoScalingGroup(
            self,
            "MOStateAppASG",
            vpc=vpc,
            vpc_subnets=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PRIVATE),
            instance_type=ec2.InstanceType(instance_type_identifier=ec2_type),
            machine_image=linux_ami,
            key_name=key.key_pair_name,
            user_data=ec2.UserData.custom(user_data),
            desired_capacity=2,
            min_capacity=1,
            max_capacity=6,
            # block_devices=[
            #     autoscaling.BlockDevice(
            #         device_name="/dev/xvda",
            #         volume=autoscaling.BlockDeviceVolume.ebs(
            #             volume_type=autoscaling.EbsDeviceVolumeType.GP2,
            #             volume_size=12,
            #             delete_on_termination=True
            #         )),
            #     autoscaling.BlockDevice(
            #         device_name="/dev/sdb",
            #         volume=autoscaling.BlockDeviceVolume.ebs(
            #             volume_size=20)
            #         # 20GB, with default volume_type gp2
            #     )
            # ]
        )

        self.asg.connections.allow_from(
            alb,
            ec2.Port.tcp(443),
            "ALB access 443 port of EC2 in Autoscaling Group",
        )
        listener.add_targets("addTargetGroup", port=443, targets=[self.asg])

        core.CfnOutput(self, "Output", value=alb.load_balancer_dns_name)
Esempio n. 20
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # define fully qualified domain name for website
        site_domain = '{sub}.{zone}'.format(sub=WEBSITE_SUBDOMAIN,
                                            zone=DNS_ZONE_NAME)

        # create an s3 bucket to hold the static site
        bucket = s3.Bucket(
            self,
            'RootBucket',
            bucket_name=site_domain,
            website_index_document='index.html',
            website_error_document='404.html',
            public_read_access=True,
            versioned=False,
            removal_policy=core.RemovalPolicy.
            DESTROY,  # this refers to the case where a stack is destroyed, where we
            # want to destroy the static site bucket as well
        )

        # establish the route53 domain name
        dns_zone = r53.HostedZone.from_lookup(self,
                                              'WebsiteZone',
                                              domain_name='jeichenhofer.com')
        core.CfnOutput(self, 'site_address', value='https://' +
                       site_domain)  # output the address to cloudformation

        # create a tls certificate
        cert_arn = acm.DnsValidatedCertificate(
            self,
            'SiteCertificate',
            domain_name=site_domain,
            hosted_zone=dns_zone,
        ).certificate_arn
        core.CfnOutput(self, 'site_cert', value=cert_arn)

        # create CloudFront distro for https and caching
        dist = cloudfront.CloudFrontWebDistribution(
            self,
            'SiteDistribution',
            alias_configuration={
                "acmCertRef": cert_arn,
                "names": [site_domain],
                "sslMethod": cloudfront.SSLMethod.SNI,
                "securityPolicy":
                cloudfront.SecurityPolicyProtocol.TLS_V1_2_2018
            },
            origin_configs=[{
                "s3OriginSource": {
                    "s3BucketSource": bucket
                },
                "behaviors": [{
                    "isDefaultBehavior": True
                }],
            }])
        core.CfnOutput(self, 'distribution_id', value=dist.distribution_id)

        # point the domain name to the cloudfront distribution
        r53.ARecord(self,
                    'SiteAliasRecord',
                    zone=dns_zone,
                    record_name=site_domain,
                    target=r53.AddressRecordTarget.from_alias(
                        r53_targets.CloudFrontTarget(dist)))

        # deploy the static files to the bucket
        s3deploy.BucketDeployment(
            self,
            'DeployWithInvalidation',
            sources=[s3deploy.Source.asset('./site-contents')],
            destination_bucket=bucket,
            distribution=dist,
            distribution_paths=['/*'],
        )
Esempio n. 21
0
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 domain_name: str,
                 identity_provider_client_id: str,
                 identity_provider_client_secret: str,
                 identity_provider_client_url: str,
                 identity_provider_realm: str,
                 identity_provider_scope: str = 'openid',
                 vpc: ec2.IVpc = None,
                 cluster: ecs.ICluster = None,
                 load_balancer: elbv2.IApplicationLoadBalancer = None,
                 log_group: logs.ILogGroup = None,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        if vpc is None:
            vpc = ec2.Vpc(self, 'ApplicationkVpc')

        if cluster is None:
            cluster = ecs.Cluster(self, 'ApplicationCluster', vpc=vpc)

        if log_group is None:
            log_group = logs.LogGroup(
                self,
                'ApplicationLogGroup',
                retention=logs.RetentionDays.ONE_WEEK,
                removal_policy=core.RemovalPolicy.DESTROY)

        application_task_role = iam.Role(
            self,
            'ApplicationTaskRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com'))

        application_hosted_zone = route53.HostedZone.from_lookup(
            self, 'ApplicationHostedZone', domain_name=domain_name)

        application_certificate = acm.DnsValidatedCertificate(
            self,
            'FrontendAlbCertificate',
            hosted_zone=application_hosted_zone,
            domain_name='app.' + domain_name)

        application_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            'ApplicationLoadBalancedFargateService',
            cluster=cluster,
            load_balancer=load_balancer,
            task_image_options=ecs_patterns.
            ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset("application"),
                enable_logging=True,
                log_driver=ecs.AwsLogDriver(stream_prefix='application',
                                            log_group=log_group),
                task_role=application_task_role,
                container_port=8080,
            ),
            memory_limit_mib=512,
            cpu=256,
            desired_count=1,
            public_load_balancer=True,
            domain_name='app.' + domain_name,
            domain_zone=application_hosted_zone,
            protocol=elbv2.ApplicationProtocol.HTTPS,
        )

        application_service.target_group.enable_cookie_stickiness(
            core.Duration.seconds(24 * 60 * 60))
        application_service.target_group.configure_health_check(
            port='8080',
            path='/',
            timeout=core.Duration.seconds(20),
            healthy_threshold_count=2,
            unhealthy_threshold_count=10,
            interval=core.Duration.seconds(30),
        )

        application_service.listener.add_certificates(
            'ApplicationServiceCertificate',
            certificates=[application_certificate])

        application_service.listener.add_action(
            'DefaultAction',
            action=elbv2.ListenerAction.authenticate_oidc(
                authorization_endpoint=identity_provider_client_url +
                '/auth/realms/' + identity_provider_realm +
                '/protocol/openid-connect/auth',
                token_endpoint=identity_provider_client_url + '/auth/realms/' +
                identity_provider_realm + '/protocol/openid-connect/token',
                user_info_endpoint=identity_provider_client_url +
                '/auth/realms/' + identity_provider_realm +
                '/protocol/openid-connect/userinfo',
                issuer=identity_provider_client_url + '/auth/realms/' +
                identity_provider_realm,
                client_id=identity_provider_client_id,
                client_secret=core.SecretValue(
                    identity_provider_client_secret),
                scope=identity_provider_scope,
                on_unauthenticated_request=elbv2.UnauthenticatedAction.
                AUTHENTICATE,
                next=elbv2.ListenerAction.forward(
                    [application_service.target_group]),
            ))

        application_service.load_balancer.connections.allow_to_any_ipv4(
            port_range=ec2.Port(
                from_port=443,
                to_port=443,
                protocol=ec2.Protocol.TCP,
                string_representation='Allow ALB to verify token'))
Esempio n. 22
0
    def __init__(self, scope: core.Construct, id: str, api: apigateway.RestApi,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)
        stack = core.Stack.of(self)

        bucket = s3.Bucket(self, 'Storage')

        s3_deployment.BucketDeployment(
            self,
            'Deployment',
            sources=[
                s3_deployment.Source.asset('./src/html'),
            ],
            destination_bucket=bucket,
        )

        origin_identity = cloudfront.OriginAccessIdentity(self, 'Identity')

        bucket.grant_read(origin_identity.grant_principal)

        s3_origin = cloudfront.SourceConfiguration(
            s3_origin_source=cloudfront.S3OriginConfig(
                s3_bucket_source=bucket,
                origin_access_identity=origin_identity,
            ),
            behaviors=[
                cloudfront.Behavior(
                    default_ttl=core.Duration.days(1),
                    min_ttl=core.Duration.days(1),
                    max_ttl=core.Duration.days(31),
                    is_default_behavior=True,
                )
            ])

        api_origin = cloudfront.SourceConfiguration(
            origin_path='/{}'.format(api.deployment_stage.stage_name),
            custom_origin_source=cloudfront.CustomOriginConfig(
                domain_name='{}.execute-api.{}.{}'.format(
                    api.rest_api_id, stack.region, stack.url_suffix), ),
            behaviors=[
                cloudfront.Behavior(default_ttl=core.Duration.seconds(0),
                                    min_ttl=core.Duration.seconds(0),
                                    max_ttl=core.Duration.seconds(0),
                                    path_pattern='/stock/*',
                                    forwarded_values={
                                        'query_string': True,
                                        'query_string_cache_keys':
                                        ['start', 'end']
                                    })
            ])

        domain_name = 'demo.training'
        subdomain = 'finance.{}'.format(domain_name)

        zone = route53.HostedZone.from_lookup(
            self,
            'Zone',
            domain_name=domain_name,
        )

        certificate = acm.DnsValidatedCertificate(
            self,
            'Certificate',
            domain_name=subdomain,
            hosted_zone=zone,
            region='us-east-1',
        )

        distribution = cloudfront.CloudFrontWebDistribution(
            self,
            'CDN',
            price_class=cloudfront.PriceClass.PRICE_CLASS_ALL,
            origin_configs=[
                s3_origin,
                api_origin,
            ],
            alias_configuration=cloudfront.AliasConfiguration(
                acm_cert_ref=certificate.certificate_arn,
                names=[subdomain],
            ))

        route53.ARecord(
            self,
            'DnsRecord',
            record_name=subdomain,
            target=route53.AddressRecordTarget.from_alias(
                alias_target=route53_targets.CloudFrontTarget(distribution)),
            zone=zone,
        )
Esempio n. 23
0
    def __init__(self, scope: core.Construct, id: str, **kwargs):
        super().__init__(scope, id, **kwargs)

        # note assumption zone is named after domain
        hosted_zone = route53.HostedZone.from_hosted_zone_attributes(
            self,
            "HostedZone",
            hosted_zone_id=startuptoolbag_config.hosted_zone_id,
            zone_name=startuptoolbag_config.website_domain_name)

        # SSL/TLS Certificate
        # https://github.com/aws/aws-cdk/pull/8552
        # Experimental vs 'Certificate' (which requires validation in the console)
        tls_cert = certificatemanager.DnsValidatedCertificate(
            self,
            "SiteCertificate",
            hosted_zone=hosted_zone,
            domain_name=f'*.{startuptoolbag_config.website_domain_name}',
            subject_alternative_names=[
                startuptoolbag_config.website_domain_name
            ],
            region='us-east-1',
        )

        # Import the bucket that was created outside the stack
        self.www_site_bucket = s3.Bucket.from_bucket_name(
            self, 'SiteBucket', core.Fn.import_value("WWWSITEBUCKETNAME"))

        # CloudFront distribution that provides HTTPS - for www
        www_alias_configuration = cloudfront.AliasConfiguration(
            acm_cert_ref=tls_cert.certificate_arn,
            names=[f'www.{startuptoolbag_config.website_domain_name}'],
            ssl_method=cloudfront.SSLMethod.SNI,
            security_policy=cloudfront.SecurityPolicyProtocol.TLS_V1_1_2016)

        www_source_configuration = cloudfront.SourceConfiguration(
            s3_origin_source=cloudfront.S3OriginConfig(
                s3_bucket_source=self.www_site_bucket),
            behaviors=[cloudfront.Behavior(is_default_behavior=True)])

        www_distribution = cloudfront.CloudFrontWebDistribution(
            self,
            'SiteDistribution',
            alias_configuration=www_alias_configuration,
            origin_configs=[www_source_configuration])

        route53.ARecord(
            self,
            'CloudFrontARecord',
            zone=hosted_zone,
            record_name=
            f'www.{startuptoolbag_config.website_domain_name}',  #site domain
            target=aws_route53.RecordTarget.from_alias(
                aws_route53_targets.CloudFrontTarget(www_distribution)))

        # NAKED site bucket which redirects to naked to www
        redirect = aws_route53_patterns.HttpsRedirect(
            self,
            'NakedRedirect',
            record_names=[startuptoolbag_config.website_domain_name],
            target_domain=f'www.{startuptoolbag_config.website_domain_name}',
            zone=hosted_zone,
            certificate=tls_cert)

        # Create the API Gateway
        self.rest_api = aws_apigateway.RestApi(self,
                                               'RestApiGateway',
                                               deploy=False)
        api_domain_name = f'api.{startuptoolbag_config.website_domain_name}'
        domain = self.rest_api.add_domain_name('APIDomain',
                                               certificate=tls_cert,
                                               domain_name=api_domain_name)

        route53.ARecord(
            self,
            'APIGWAliasRecord',
            zone=hosted_zone,
            record_name=api_domain_name,  #site domain
            target=aws_route53.RecordTarget.from_alias(
                aws_route53_targets.ApiGatewayDomain(domain)))
Esempio n. 24
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        eks_vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/16")
        self.eks_vpc = eks_vpc

        # Create IAM Role For code-server bastion
        bastion_role = iam.Role(
            self,
            "BastionRole",
            assumed_by=iam.CompositePrincipal(
                iam.ServicePrincipal("ec2.amazonaws.com"),
                iam.AccountRootPrincipal()),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AdministratorAccess")
            ])
        self.bastion_role = bastion_role
        # Create EC2 Instance Profile for that Role
        instance_profile = iam.CfnInstanceProfile(
            self, "InstanceProfile", roles=[bastion_role.role_name])

        # Create SecurityGroup for the Control Plane ENIs
        eks_security_group = ec2.SecurityGroup(self,
                                               "EKSSecurityGroup",
                                               vpc=eks_vpc,
                                               allow_all_outbound=True)

        eks_security_group.add_ingress_rule(ec2.Peer.ipv4('10.0.0.0/16'),
                                            ec2.Port.all_traffic())

        # Create an EKS Cluster
        eks_cluster = eks.Cluster(
            self,
            "cluster",
            cluster_name="cluster",
            vpc=eks_vpc,
            masters_role=bastion_role,
            default_capacity_type=eks.DefaultCapacityType.NODEGROUP,
            default_capacity_instance=ec2.InstanceType("m5.large"),
            default_capacity=2,
            security_group=eks_security_group,
            endpoint_access=eks.EndpointAccess.PUBLIC_AND_PRIVATE,
            version=eks.KubernetesVersion.V1_17)
        self.cluster_cert = eks_cluster.cluster_certificate_authority_data

        # Deploy ALB Ingress Controller
        # Create the k8s Service account and corresponding IAM Role mapped via IRSA
        alb_service_account = eks_cluster.add_service_account(
            "alb-ingress-controller",
            name="alb-ingress-controller",
            namespace="kube-system")

        # Create the PolicyStatements to attach to the role
        # I couldn't find a way to get this to work with a PolicyDocument and there are 10 of these
        alb_policy_statement_json_1 = {
            "Effect":
            "Allow",
            "Action": [
                "acm:DescribeCertificate", "acm:ListCertificates",
                "acm:GetCertificate"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_2 = {
            "Effect":
            "Allow",
            "Action": [
                "ec2:AuthorizeSecurityGroupIngress", "ec2:CreateSecurityGroup",
                "ec2:CreateTags", "ec2:DeleteTags", "ec2:DeleteSecurityGroup",
                "ec2:DescribeAccountAttributes", "ec2:DescribeAddresses",
                "ec2:DescribeInstances", "ec2:DescribeInstanceStatus",
                "ec2:DescribeInternetGateways",
                "ec2:DescribeNetworkInterfaces", "ec2:DescribeSecurityGroups",
                "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVpcs",
                "ec2:ModifyInstanceAttribute",
                "ec2:ModifyNetworkInterfaceAttribute",
                "ec2:RevokeSecurityGroupIngress"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_3 = {
            "Effect":
            "Allow",
            "Action": [
                "elasticloadbalancing:AddListenerCertificates",
                "elasticloadbalancing:AddTags",
                "elasticloadbalancing:CreateListener",
                "elasticloadbalancing:CreateLoadBalancer",
                "elasticloadbalancing:CreateRule",
                "elasticloadbalancing:CreateTargetGroup",
                "elasticloadbalancing:DeleteListener",
                "elasticloadbalancing:DeleteLoadBalancer",
                "elasticloadbalancing:DeleteRule",
                "elasticloadbalancing:DeleteTargetGroup",
                "elasticloadbalancing:DeregisterTargets",
                "elasticloadbalancing:DescribeListenerCertificates",
                "elasticloadbalancing:DescribeListeners",
                "elasticloadbalancing:DescribeLoadBalancers",
                "elasticloadbalancing:DescribeLoadBalancerAttributes",
                "elasticloadbalancing:DescribeRules",
                "elasticloadbalancing:DescribeSSLPolicies",
                "elasticloadbalancing:DescribeTags",
                "elasticloadbalancing:DescribeTargetGroups",
                "elasticloadbalancing:DescribeTargetGroupAttributes",
                "elasticloadbalancing:DescribeTargetHealth",
                "elasticloadbalancing:ModifyListener",
                "elasticloadbalancing:ModifyLoadBalancerAttributes",
                "elasticloadbalancing:ModifyRule",
                "elasticloadbalancing:ModifyTargetGroup",
                "elasticloadbalancing:ModifyTargetGroupAttributes",
                "elasticloadbalancing:RegisterTargets",
                "elasticloadbalancing:RemoveListenerCertificates",
                "elasticloadbalancing:RemoveTags",
                "elasticloadbalancing:SetIpAddressType",
                "elasticloadbalancing:SetSecurityGroups",
                "elasticloadbalancing:SetSubnets",
                "elasticloadbalancing:SetWebAcl"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_4 = {
            "Effect":
            "Allow",
            "Action": [
                "iam:CreateServiceLinkedRole", "iam:GetServerCertificate",
                "iam:ListServerCertificates"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_5 = {
            "Effect": "Allow",
            "Action": ["cognito-idp:DescribeUserPoolClient"],
            "Resource": "*"
        }
        alb_policy_statement_json_6 = {
            "Effect":
            "Allow",
            "Action": [
                "waf-regional:GetWebACLForResource", "waf-regional:GetWebACL",
                "waf-regional:AssociateWebACL",
                "waf-regional:DisassociateWebACL"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_7 = {
            "Effect": "Allow",
            "Action": ["tag:GetResources", "tag:TagResources"],
            "Resource": "*"
        }
        alb_policy_statement_json_8 = {
            "Effect": "Allow",
            "Action": ["waf:GetWebACL"],
            "Resource": "*"
        }
        alb_policy_statement_json_9 = {
            "Effect":
            "Allow",
            "Action": [
                "wafv2:GetWebACL", "wafv2:GetWebACLForResource",
                "wafv2:AssociateWebACL", "wafv2:DisassociateWebACL"
            ],
            "Resource":
            "*"
        }
        alb_policy_statement_json_10 = {
            "Effect":
            "Allow",
            "Action": [
                "shield:DescribeProtection", "shield:GetSubscriptionState",
                "shield:DeleteProtection", "shield:CreateProtection",
                "shield:DescribeSubscription", "shield:ListProtections"
            ],
            "Resource":
            "*"
        }

        # Attach the necessary permissions
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_1))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_2))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_3))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_4))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_5))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_6))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_7))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_8))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_9))
        alb_service_account.add_to_policy(
            iam.PolicyStatement.from_json(alb_policy_statement_json_10))

        # Deploy the ALB Ingress Controller from the Helm chart
        eks_cluster.add_helm_chart(
            "aws-alb-ingress-controller",
            chart="aws-alb-ingress-controller",
            repository=
            "http://storage.googleapis.com/kubernetes-charts-incubator",
            namespace="kube-system",
            values={
                "clusterName": "cluster",
                "awsRegion": os.environ["CDK_DEFAULT_REGION"],
                "awsVpcID": eks_vpc.vpc_id,
                "rbac": {
                    "create": True,
                    "serviceAccount": {
                        "create": False,
                        "name": "alb-ingress-controller"
                    }
                }
            })

        # Create code-server bastion
        # Get Latest Amazon Linux AMI
        amzn_linux = ec2.MachineImage.latest_amazon_linux(
            generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,
            edition=ec2.AmazonLinuxEdition.STANDARD,
            virtualization=ec2.AmazonLinuxVirt.HVM,
            storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE)

        # Create SecurityGroup for code-server
        security_group = ec2.SecurityGroup(self,
                                           "SecurityGroup",
                                           vpc=eks_vpc,
                                           allow_all_outbound=True)

        security_group.add_ingress_rule(ec2.Peer.any_ipv4(),
                                        ec2.Port.tcp(8080))

        # Create our EC2 instance running CodeServer
        code_server_instance = ec2.Instance(
            self,
            "CodeServerInstance",
            instance_type=ec2.InstanceType("t3.large"),
            machine_image=amzn_linux,
            role=bastion_role,
            vpc=eks_vpc,
            vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
            security_group=security_group,
            block_devices=[
                ec2.BlockDevice(device_name="/dev/xvda",
                                volume=ec2.BlockDeviceVolume.ebs(20))
            ])

        # Add UserData
        code_server_instance.user_data.add_commands(
            "mkdir -p ~/.local/lib ~/.local/bin ~/.config/code-server")
        code_server_instance.user_data.add_commands(
            "curl -fL https://github.com/cdr/code-server/releases/download/v3.5.0/code-server-3.5.0-linux-amd64.tar.gz | tar -C ~/.local/lib -xz"
        )
        code_server_instance.user_data.add_commands(
            "mv ~/.local/lib/code-server-3.5.0-linux-amd64 ~/.local/lib/code-server-3.5.0"
        )
        code_server_instance.user_data.add_commands(
            "ln -s ~/.local/lib/code-server-3.5.0/bin/code-server ~/.local/bin/code-server"
        )
        code_server_instance.user_data.add_commands(
            "echo \"bind-addr: 0.0.0.0:8080\" > ~/.config/code-server/config.yaml"
        )
        code_server_instance.user_data.add_commands(
            "echo \"auth: password\" >> ~/.config/code-server/config.yaml")
        code_server_instance.user_data.add_commands(
            "echo \"password: $(curl -s http://169.254.169.254/latest/meta-data/instance-id)\" >> ~/.config/code-server/config.yaml"
        )
        code_server_instance.user_data.add_commands(
            "echo \"cert: false\" >> ~/.config/code-server/config.yaml")
        code_server_instance.user_data.add_commands(
            "~/.local/bin/code-server &")
        code_server_instance.user_data.add_commands(
            "yum -y install jq gettext bash-completion moreutils")
        code_server_instance.user_data.add_commands(
            "sudo pip install --upgrade awscli && hash -r")
        code_server_instance.user_data.add_commands(
            "echo 'export ALB_INGRESS_VERSION=\"v1.1.8\"' >>  ~/.bash_profile")
        code_server_instance.user_data.add_commands(
            "curl --silent --location -o /usr/local/bin/kubectl \"https://amazon-eks.s3.us-west-2.amazonaws.com/1.17.9/2020-08-04/bin/linux/amd64/kubectl\""
        )
        code_server_instance.user_data.add_commands(
            "chmod +x /usr/local/bin/kubectl")
        code_server_instance.user_data.add_commands(
            "curl -L https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash"
        )
        code_server_instance.user_data.add_commands(
            "export ACCOUNT_ID=$(aws sts get-caller-identity --output text --query Account)"
        )
        code_server_instance.user_data.add_commands(
            "export AWS_REGION=$(curl -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region')"
        )
        code_server_instance.user_data.add_commands(
            "echo \"export ACCOUNT_ID=${ACCOUNT_ID}\" | tee -a ~/.bash_profile"
        )
        code_server_instance.user_data.add_commands(
            "echo \"export AWS_REGION=${AWS_REGION}\" | tee -a ~/.bash_profile"
        )
        code_server_instance.user_data.add_commands(
            "aws configure set default.region ${AWS_REGION}")
        code_server_instance.user_data.add_commands(
            "curl --silent --location https://rpm.nodesource.com/setup_12.x | bash -"
        )
        code_server_instance.user_data.add_commands("yum -y install nodejs")
        code_server_instance.user_data.add_commands(
            "amazon-linux-extras enable python3")
        code_server_instance.user_data.add_commands(
            "yum install -y python3 --disablerepo amzn2-core")
        code_server_instance.user_data.add_commands("yum install -y git")
        code_server_instance.user_data.add_commands(
            "rm /usr/bin/python && ln -s /usr/bin/python3 /usr/bin/python && ln -s /usr/bin/pip3 /usr/bin/pip"
        )
        code_server_instance.user_data.add_commands("npm install -g aws-cdk")
        code_server_instance.user_data.add_commands(
            "echo 'export KUBECONFIG=~/.kube/config' >>  ~/.bash_profile")
        code_server_instance.user_data.add_commands(
            "git clone https://github.com/jasonumiker/eks-school.git")

        # Add ALB
        lb = elbv2.ApplicationLoadBalancer(self,
                                           "LB",
                                           vpc=eks_vpc,
                                           internet_facing=True)
        listener = lb.add_listener("Listener", port=80)
        listener.connections.allow_default_port_from_any_ipv4(
            "Open to the Internet")
        listener.connections.allow_to_any_ipv4(
            port_range=ec2.Port(string_representation="TCP 8080",
                                protocol=ec2.Protocol.TCP,
                                from_port=8080,
                                to_port=8080))
        listener.add_targets(
            "Target",
            port=8080,
            targets=[
                elbv2.InstanceTarget(
                    instance_id=code_server_instance.instance_id, port=8080)
            ])

        # If a Hosted Zone exists, setup HTTPS for coderserver.domainname.xyz
        if "CDK_HOSTEDZONEID" in os.environ and "CDK_HOSTEDZONENAME" in os.environ:
            hostedzone = route53.HostedZone.from_hosted_zone_attributes(
                self,
                "dnszone",
                hosted_zone_id=os.environ["CDK_HOSTEDZONEID"],
                zone_name=os.environ["CDK_HOSTEDZONENAME"])
            arecord = route53.ARecord(
                self,
                'CodeServer AliasRecord',
                zone=hostedzone,
                target=route53.RecordTarget.from_alias(
                    route53_targets.LoadBalancerTarget(lb)),
                record_name="codeserver")

            cert = acm.DnsValidatedCertificate(self,
                                               "codeserver cert",
                                               domain_name="codeserver." +
                                               hostedzone.zone_name,
                                               hosted_zone=hostedzone)
            listenerHttps = lb.add_listener("HTTPS Listener", port=443)
            listenerHttps.connections.allow_default_port_from_any_ipv4(
                "Open to the Internet")
            listenerHttps.connections.allow_to_any_ipv4(
                port_range=ec2.Port(string_representation="TCP 8080",
                                    protocol=ec2.Protocol.TCP,
                                    from_port=8080,
                                    to_port=8080))
            listenerHttps.add_certificates("LB Certificates", [cert])
            listenerHttps.add_targets(
                "Target",
                port=8080,
                targets=[
                    elbv2.InstanceTarget(
                        instance_id=code_server_instance.instance_id,
                        port=8080)
                ])
Esempio n. 25
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Message timeout; used by SQS and Lambda
        message_timeout = core.Duration.seconds(15)

        # SQS queue that the Raspberry Pi will write to
        queue = sqs.Queue(
            self,
            'Queue',
            visibility_timeout=message_timeout,
            receive_message_wait_time=core.Duration.seconds(20),
            retention_period=core.Duration.hours(1),
        )

        # DynamoDB table that the web app will read from
        icao_address = dynamodb.Attribute(
            name='IcaoAddress',
            type=dynamodb.AttributeType.STRING,
        )
        table = dynamodb.Table(
            self,
            'Table',
            partition_key=icao_address,
            billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        database = timestream.CfnDatabase(
            self,
            'Database',
            database_name='aircraft-database',
        )
        table2 = timestream.CfnTable(self,
                                     'Table2',
                                     database_name=database.ref,
                                     table_name='aircraft-table',
                                     retention_properties={
                                         'MemoryStoreRetentionPeriodInHours':
                                         1,
                                         'MagneticStoreRetentionPeriodInDays':
                                         1,
                                     })

        # IAM user for the Raspberry Pi
        user = iam.User(self, 'RaspberryPi')
        queue.grant_send_messages(user)
        access_key = iam.CfnAccessKey(
            self,
            'AccessKey',
            user_name=user.user_name,
        )

        # IAM role for Lambda function, so it can write to DynamoDB
        lambda_role = iam.Role(
            self,
            'LambdaRole',
            assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    'service-role/AWSLambdaBasicExecutionRole'),
            ],
        )
        lambda_role.add_to_policy(
            iam.PolicyStatement(
                actions=[
                    'timestream:CancelQuery', 'timestream:DescribeEndpoints',
                    'timestream:DescribeTable', 'timestream:ListMeasures',
                    'timestream:Select', 'timestream:WriteRecords'
                ],
                resources=['*'],  # TODO: narrow down permissions
            ))
        table.grant_read_write_data(lambda_role)

        # Integration between SQS and Lambda
        event = lambda_event_sources.SqsEventSource(
            queue=queue,
            batch_size=10,
        )

        # Lambda function that processes messages from SQS queue and updates DynamoDB table
        import_function = lambda_.Function(
            self,
            'ImportFunction',
            description='Reads SQS messages and writes to DynamoDB',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset('lambda_import/'),
            timeout=message_timeout,
            handler='index.handler',
            role=lambda_role,
            events=[event],
            environment={
                'TABLE_NAME': table2.ref,
            },
        )

        # TODO: add custom log group
        # TODO: add metric filters for number of succesfull updates and failed updates

        # Lambda function that reads from DynamoDB and returns data to API Gateway
        api_function = lambda_.Function(
            self,
            'ApiFunction',
            description='Reads from DynamoDB and returns to API GW',
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.Code.from_asset('lambda_api/'),
            timeout=message_timeout,
            handler='index.handler',
            role=lambda_role,
            environment={
                'TABLE_NAME': table.table_name,
            },
        )

        # API Gateway for requesting aircraft data
        api = apigateway.RestApi(
            self,
            'Api',
            endpoint_types=[apigateway.EndpointType.REGIONAL],
            cloud_watch_role=False,
        )

        aircraft_resource = api.root.add_resource('aircraft')

        aircraft_resource.add_method(
            http_method='GET',
            integration=apigateway.LambdaIntegration(
                api_function,
                proxy=True,
            ),
        )

        # Static website
        bucket = s3.Bucket(self, 'StaticWebsite')

        s3_deployment.BucketDeployment(
            self,
            'Deployment',
            sources=[
                s3_deployment.Source.asset('html/'),
            ],
            destination_bucket=bucket,
        )

        # Permissions between CloudFront and S3
        origin_identity = cloudfront.OriginAccessIdentity(self, 'Identity')
        bucket.grant_read(origin_identity.grant_principal)

        # CloudFront distribution pointing to both S3 and API Gateway
        s3_origin = cloudfront.SourceConfiguration(
            s3_origin_source=cloudfront.S3OriginConfig(
                s3_bucket_source=bucket,
                origin_access_identity=origin_identity,
            ),
            behaviors=[
                cloudfront.Behavior(
                    default_ttl=core.Duration.days(0),
                    min_ttl=core.Duration.days(0),
                    max_ttl=core.Duration.days(31),
                    is_default_behavior=True,
                )
            ])

        api_origin = cloudfront.SourceConfiguration(
            origin_path='/{}'.format(api.deployment_stage.stage_name),
            custom_origin_source=cloudfront.CustomOriginConfig(
                domain_name='{}.execute-api.{}.{}'.format(
                    api.rest_api_id, self.region, self.url_suffix), ),
            behaviors=[
                cloudfront.Behavior(
                    default_ttl=core.Duration.seconds(0),
                    min_ttl=core.Duration.seconds(0),
                    max_ttl=core.Duration.seconds(0),
                    path_pattern='/aircraft/*',
                )
            ])

        domain_name = self.node.try_get_context('domain_name')

        # If domain name is specified, create a certificate and alias configuration for CloudFront
        if domain_name is None:
            alias_configuration = None
        else:
            subdomain = 'aircraft.{}'.format(domain_name)

            zone = route53.HostedZone.from_lookup(
                self,
                'Zone',
                domain_name=domain_name,
            )

            certificate = acm.DnsValidatedCertificate(
                self,
                'Certificate',
                domain_name=subdomain,
                hosted_zone=zone,
                region='us-east-1',
            )

            alias_configuration = cloudfront.AliasConfiguration(
                acm_cert_ref=certificate.certificate_arn,
                names=[subdomain],
            )

        distribution = cloudfront.CloudFrontWebDistribution(
            self,
            'CDN',
            price_class=cloudfront.PriceClass.PRICE_CLASS_ALL,
            alias_configuration=alias_configuration,
            origin_configs=[
                s3_origin,
                api_origin,
            ],
        )

        # If domain name is specified, create a DNS record for CloudFront
        if domain_name is not None:
            route53.ARecord(
                self,
                'DnsRecord',
                record_name=subdomain,
                target=route53.AddressRecordTarget.from_alias(
                    alias_target=route53_targets.CloudFrontTarget(
                        distribution)),
                zone=zone,
            )

        # Outputs that are needed on the Raspberry Pi
        core.CfnOutput(
            self,
            'QueueUrl',
            value=queue.queue_url,
        )
        core.CfnOutput(
            self,
            'AccessKeyId',
            value=access_key.ref,
        )
        core.CfnOutput(
            self,
            'SecretAccessKey',
            value=access_key.attr_secret_access_key,
        )
        core.CfnOutput(
            self,
            'Region',
            value=self.region,
        )
    def __init__(
        self, 
        scope: core.Construct, 
        id: str, 
        keycloak_domain: str,
        vpc: ec2.IVpc = None, 
        cluster: ecs.ICluster = None, 
        load_balancer: elbv2.IApplicationLoadBalancer = None, 
        log_group: logs.ILogGroup = None,
        keycloak_database_name: str = 'keykloak',
        keycloak_database_user: str = 'admin',
        **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here
        
        keycloak_task_role = iam.Role(
            self, 'KeycloakTastRole',
            assumed_by=iam.ServicePrincipal('ecs-tasks.amazonaws.com')
        )

        keycloak_database_secret = secretsmanager.Secret(
            self, 'KeycloakDatabaseSecret',
            description='Keycloak Database Password',
            generate_secret_string=secretsmanager.SecretStringGenerator(exclude_punctuation=True)
        )

        keycloak_database_cluster = rds.DatabaseCluster(
            self, 'KeycloakDatabaseCluster',
            engine= rds.DatabaseClusterEngine.AURORA,
            instance_props=rds.InstanceProps(
                instance_type=ec2.InstanceType.of(
                    instance_class=ec2.InstanceClass.BURSTABLE3, 
                    instance_size=ec2.InstanceSize.SMALL
                ),
                vpc=vpc,
            ),
            master_user= rds.Login(
                username=keycloak_database_user,
                password=keycloak_database_secret.secret_value,
            ),
            instances=1,
            default_database_name=keycloak_database_name,
            removal_policy=core.RemovalPolicy.DESTROY,
        )


        keycloak_hosted_zone = route53.HostedZone.from_lookup(
            self, 'KeycloakHostedZone',
            domain_name=keycloak_domain
        )

        keycloak_certificate = acm.DnsValidatedCertificate(
            self, 'KeycloakCertificate',
            hosted_zone=keycloak_hosted_zone,
            domain_name='keycloak.' + keycloak_domain
        )

        keycloak_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self, 'KeycloakLoadBalancedFargateService',
            load_balancer=load_balancer,
            cluster=cluster,

            task_image_options=ecs_patterns.ApplicationLoadBalancedTaskImageOptions(
                image=ecs.ContainerImage.from_asset('keycloak'),
                container_port=8080,
                enable_logging=True,
                task_role=keycloak_task_role,

                log_driver=ecs.AwsLogDriver(
                    stream_prefix='keycloak',
                    log_group=log_group,
                ),

                secrets={
                    'DB_PASSWORD': ecs.Secret.from_secrets_manager(keycloak_database_secret),
                },
                environment={
                    'DB_VENDOR': 'mysql',
                    'DB_USER': keycloak_database_user,
                    'DB_ADDR': keycloak_database_cluster.cluster_endpoint.hostname,
                    'DB_DATABASE': keycloak_database_name,
                    # 'KEYCLOAK_LOGLEVEL': 'DEBUG',
                    'PROXY_ADDRESS_FORWARDING': 'true',
                },
            ),

            memory_limit_mib=512,
            cpu=256,
            desired_count=1,
            public_load_balancer=True,
            domain_name= 'keycloak.' + keycloak_domain,
            domain_zone= keycloak_hosted_zone,
            protocol=elbv2.ApplicationProtocol.HTTPS,
        )

        keycloak_service.target_group.enable_cookie_stickiness(core.Duration.seconds(24 * 60 * 60))
        keycloak_service.target_group.configure_health_check(
            port='8080',
            path='/auth/realms/master/.well-known/openid-configuration',
            timeout=core.Duration.seconds(20),
            healthy_threshold_count=2,
            unhealthy_threshold_count=10,
            interval=core.Duration.seconds(30),
        )

        keycloak_service.listener.add_certificates(
            'KeycloakListenerCertificate',
            certificates= [ keycloak_certificate ]
        )

        keycloak_database_cluster.connections.allow_default_port_from(keycloak_service.service, 'From Keycloak Fargate Service')
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Get the hosted Zone and create a certificate for our domain

        hosted_zone = route53.HostedZone.from_hosted_zone_attributes(
            self,
            "HostedZone",
            hosted_zone_id=HOSTED_ZONE_ID,
            zone_name=HOSTED_ZONE_NAME)

        cert = certificatemanager.DnsValidatedCertificate(
            self,
            "Certificate",
            hosted_zone=hosted_zone,
            domain_name=APP_DNS_NAME)

        # Set up a new VPC

        vpc = ec2.Vpc(self, "FargateDemoVpc", max_azs=2)

        # Set up an ECS Cluster for fargate

        cluster = ecs.Cluster(self, "FargateCluster", vpc=vpc)

        # Configure the user pool and related entities for authentication

        user_pool = cognito.UserPool(
            self,
            "UserPool",
            self_sign_up_enabled=True,
            user_pool_name="FargateDemoUserPool",
        )

        user_pool_custom_domain = cognito.CfnUserPoolDomain(
            self,
            "CustomDomain",
            domain=COGNITO_CUSTOM_DOMAIN,
            user_pool_id=user_pool.user_pool_id)

        user_pool_client = cognito.UserPoolClient(
            self,
            "AppClient",
            user_pool=user_pool,
            user_pool_client_name="AlbAuthentication",
            generate_secret=True)

        # Set the attributes on the user pool client that can't be updated via the construct
        user_pool_client_cf: cognito.CfnUserPoolClient = user_pool_client.node.default_child
        user_pool_client_cf.allowed_o_auth_flows = ["code"]
        user_pool_client_cf.allowed_o_auth_scopes = ["openid"]
        user_pool_client_cf.callback_ur_ls = [
            f"https://{APP_DNS_NAME}/oauth2/idpresponse",
            f"https://{APP_DNS_NAME}"
        ]
        user_pool_client_cf.default_redirect_uri = f"https://{APP_DNS_NAME}/oauth2/idpresponse"
        user_pool_client_cf.logout_ur_ls = [
            f"https://{APP_DNS_NAME}/logout", f"https://{APP_DNS_NAME}/"
        ]
        user_pool_client_cf.supported_identity_providers = [
            # This is where you'd add external identity providers as well.
            "COGNITO"
        ]
        user_pool_client_cf.allowed_o_auth_flows_user_pool_client = True

        # Define the Docker Image for our container (the CDK will do the build and push for us!)
        docker_image = ecr_assets.DockerImageAsset(
            self,
            "JwtApp",
            directory=os.path.join(os.path.dirname(__file__), "..", "src"))

        user_pool_domain = f"{user_pool_custom_domain.domain}.auth.{self.region}.amazoncognito.com"

        # Define the fargate service + ALB

        fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(
            self,
            "FargateService",
            cluster=cluster,
            certificate=cert,
            domain_name=f"{APP_DNS_NAME}",
            domain_zone=hosted_zone,
            task_image_options={
                "image":
                ecs.ContainerImage.from_docker_image_asset(docker_image),
                "environment": {
                    "PORT":
                    "80",
                    "LOGOUT_URL":
                    f"https://{user_pool_domain}/logout?" +
                    f"client_id={user_pool_client.user_pool_client_id}&" +
                    f"redirect_uri={ urllib.parse.quote(f'https://{APP_DNS_NAME}')}&"
                    + f"response_type=code&state=STATE&scope=openid"
                }
            })

        # Add an additional HTTPS egress rule to the Load Balancers security group to talk to Cognito
        lb_security_group = fargate_service.load_balancer.connections.security_groups[
            0]

        lb_security_group.add_egress_rule(
            peer=ec2.Peer.any_ipv4(),
            connection=ec2.Port(protocol=ec2.Protocol.TCP,
                                string_representation="443",
                                from_port=443,
                                to_port=443),
            description="Outbound HTTPS traffic to get to Cognito")

        # Allow 10 seconds for in flight requests before termination, the default of 5 minutes is much too high.
        fargate_service.target_group.set_attribute(
            key="deregistration_delay.timeout_seconds", value="10")

        # Enable authentication on the Load Balancer
        alb_listener: elb.CfnListener = fargate_service.listener.node.default_child

        elb.CfnListenerRule(
            self,
            "AuthenticateRule",
            actions=[{
                "type":
                "authenticate-cognito",
                "authenticateCognitoConfig":
                elb.CfnListenerRule.AuthenticateCognitoConfigProperty(
                    user_pool_arn=user_pool.user_pool_arn,
                    user_pool_client_id=user_pool_client.user_pool_client_id,
                    user_pool_domain=user_pool_custom_domain.domain),
                "order":
                1
            }, {
                "type":
                "forward",
                "order":
                10,
                "targetGroupArn":
                fargate_service.target_group.target_group_arn
            }],
            conditions=[{
                "field": "host-header",
                "hostHeaderConfig": {
                    "values": [f"{APP_DNS_NAME}"]
                }
            }],
            # Reference the Listener ARN
            listener_arn=alb_listener.ref,
            priority=1000)
Esempio n. 28
0
    def web(self):
        zone = route53.HostedZone.from_hosted_zone_attributes(
            self,
            'hosted_zone',
            hosted_zone_id=HOSTED_ZONE_ID,
            zone_name=self.zone)
        cert = cert_manager.DnsValidatedCertificate(
            self,
            'domain_cert',
            domain_name=self.zone,
            subject_alternative_names=[
                self.web_domain
            ],
            hosted_zone=zone,
            validation_method=cert_manager.ValidationMethod.DNS)
        # cert = cert_manager.Certificate.from_certificate_arn(self, 'certificateDomainForAll',
        #                                                      "arn:aws:acm:us-east-1:134764946504:certificate/f74613d7-8cc5-4de1-a2ed-467d5321839d")
        site_bucket = s3.Bucket(self, 'site_bucket',
                                bucket_name=self.web_domain,
                                website_index_document='index.html',
                                website_error_document='404.html',
                                public_read_access=True)
        core.CfnOutput(self, 'bucket_name', value=site_bucket.bucket_name)
        source_behavior = cloudfront.Behavior(
            is_default_behavior=True,
            cached_methods=cloudfront.CloudFrontAllowedCachedMethods.GET_HEAD_OPTIONS,
            allowed_methods=cloudfront.CloudFrontAllowedMethods.GET_HEAD_OPTIONS,
            compress=True)
        s3_origin_source = cloudfront.S3OriginConfig(s3_bucket_source=site_bucket)
        origin_source_config = cloudfront.SourceConfiguration(
            s3_origin_source=s3_origin_source,
            behaviors=[source_behavior])
        alias_configuration = cloudfront.AliasConfiguration(
            acm_cert_ref=cert.certificate_arn,
            names=[self.zone],
            ssl_method=cloudfront.SSLMethod.SNI,
            security_policy=cloudfront.SecurityPolicyProtocol.TLS_V1_2_2019)

        distribution = cloudfront.CloudFrontWebDistribution(
            self, 'cf-web-distro',
            alias_configuration=alias_configuration,
            origin_configs=[origin_source_config],
            error_configurations=[
                cloudfront.CfnDistribution.CustomErrorResponseProperty(
                    error_code=404,
                    response_code=404,
                    response_page_path='/404.html'
                ),
                cloudfront.CfnDistribution.CustomErrorResponseProperty(
                    error_code=403,
                    response_code=404,
                    response_page_path='/404.html',
                )
            ],
            http_version=cloudfront.HttpVersion.HTTP2,
            viewer_protocol_policy=cloudfront.ViewerProtocolPolicy.REDIRECT_TO_HTTPS
        )

        core.CfnOutput(self, 'Distribution ID', value=distribution.distribution_id)
        core.CfnOutput(self, 'Distribution domain name', value=distribution.domain_name)
        core.CfnOutput(self, 'SiteBucketWebsiteDomain', value=site_bucket.bucket_website_domain_name)
        # noinspection PyTypeChecker
        target_alias = route53.RecordTarget.from_alias(targets.CloudFrontTarget(distribution))
        route53.ARecord(self, 'arecord-web',
                        record_name=self.zone,
                        target=target_alias,
                        zone=zone,
                        ttl=core.Duration.minutes(60))
        s3_deployment.BucketDeployment(self, "deploy-with-invalidation",
                                       sources=[
                                           s3_deployment.Source.asset('./../website')
                                       ],
                                       destination_bucket=site_bucket,
                                       distribution=distribution,
                                       content_language='en-US',
                                       distribution_paths=['/*'])
Esempio n. 29
0
    def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # Validated require props.
        required_props_keys = ['CfOriginDomainName', 'Asg', 'HostedZoneName', 'WebsiteDns']
        for k in required_props_keys:
            if k not in props or not props[k]:
                raise ValueError("Required prop %s is not present" % k)

        # Create a custom resource that returns the IP of the host behind the autoscaling group
        asg = props['Asg']
        asg_ip_handler = lambda_.Function(
            self, 'GhostIpHandler',
            runtime=lambda_.Runtime.PYTHON_3_6,
            code=lambda_.Code.asset('lambda'),
            handler='ghost_ip.handler',
        )

        asg_ip_handler.add_to_role_policy(
            statement=iam.PolicyStatement(
                actions=['autoscaling:DescribeAutoScalingGroups', 'ec2:DescribeInstances'],
                resources=['*', '*'],
            )
        )

        asg_ip_provider = cr.Provider(
            self, 'GhostIpProvider',
            on_event_handler=asg_ip_handler,
        )

        asg_ip_resource = cfn.CustomResource(
            self, 'GhostIpResource',
            provider=asg_ip_provider,
            properties={
                'AsgName': asg.auto_scaling_group_name,
                'ts': time.time(), # this makes sure the function is invoked for every CFN update
            }
        )

        # Create R53 HZ and cf origin domain
        if 'ExistingHostedZoneId' in props and props['ExistingHostedZoneId']:
            hz = route53.HostedZone.from_hosted_zone_attributes(
                self, 'HostedZone', 
                zone_name=props['HostedZoneName'],
                hosted_zone_id=props['ExistingHostedZoneId'],
            )
        else:
            hz = route53.HostedZone(
                self, 'HostedZone',
                zone_name=props['HostedZoneName']
            )

        origin_rrset = route53.ARecord(
            self, 'OriginRecord',
            target=route53.RecordTarget.from_ip_addresses(asg_ip_resource.get_att_string('GhostIp')),
            record_name=props['CfOriginDomainName'],
            zone=hz,
        )

        # Create a CF distro
        acm_cert = acm.DnsValidatedCertificate(
            self, 'GhostAcmCert',
            hosted_zone=hz,
            domain_name=props['WebsiteDns'],
            region='us-east-1',
        )

        cf_distro = cf.CloudFrontWebDistribution(
            self, 'CfDistro',
            origin_configs=[cf.SourceConfiguration(
                custom_origin_source=cf.CustomOriginConfig(
                    domain_name=props['CfOriginDomainName'],
                    origin_protocol_policy=cf.OriginProtocolPolicy.HTTP_ONLY,
                ),
                behaviors=[cf.Behavior(is_default_behavior=True)],
            )],
            alias_configuration=cf.AliasConfiguration(
                names=[props['WebsiteDns']],
                acm_cert_ref=acm_cert.certificate_arn,
            ),
            default_root_object='',
        )

        # Create the top level website DNS pointing to the CF distro
        ghost_rrset = route53.CnameRecord(
            self, 'GhostDns',
            domain_name=cf_distro.domain_name,
            zone=hz,
            record_name=props['WebsiteDns'],
        )
Esempio n. 30
0
    def __init__(
        self,
        scope: core.Construct,
        id: str,
        cloudmap_namespace="airflow.com",
        postgres_password="******",
        airflow_webserver_port=80,
        dags_folder="/src/dags",
        executor="CeleryExecutor",
        postgres_user="******",
        airflow_home="/airflow",
        aws_region="us-west-2",
        postgres_db="airflow",
        log_prefix="airflow",
        domain_name=None,
        hosted_zone=None,
        certificate=None,
        load_examples=True,
        web_container_desired_count=1,
        worker_container_desired_count=1,
        worker_cpu=2048,
        worker_memory_limit_mib=4096,
        vpc=None,
        bucket=None,
        log_driver=None,
        env=None,
        cluster=None,
        base_image=None,
        rds_instance=None,
        web_task=None,
        worker_task=None,
        scheduler_task=None,
        message_broker_task=None,
        message_broker_service=None,
        message_broker_service_name="rabbitmq",
        rabbitmq_alb=None,
        web_service=None,
        scheduler_service=None,
        worker_service=None,
        max_worker_count=16,
        worker_target_memory_utilization=80,
        worker_target_cpu_utilization=80,
        worker_memory_scale_in_cooldown=10,
        worker_memory_scale_out_cooldown=10,
        worker_cpu_scale_in_cooldown=10,
        worker_cpu_scale_out_cooldown=10,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        vpc = vpc or aws_ec2.Vpc(self, "airflow-vpc")

        cloudmap_namespace_options = aws_ecs.CloudMapNamespaceOptions(
            name=cloudmap_namespace, vpc=vpc)

        bucket = bucket or aws_s3.Bucket(
            self,
            "airflow-bucket",
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        core.CfnOutput(
            self,
            "s3-log-bucket",
            value=
            f"https://s3.console.aws.amazon.com/s3/buckets/{bucket.bucket_name}",
            description="where worker logs are written to",
        )

        log_driver = log_driver or aws_ecs.LogDriver.aws_logs(
            stream_prefix=log_prefix)

        environment = {
            "AIRFLOW__WEBSERVER__WEB_SERVER_PORT": airflow_webserver_port,
            #
            "AIRFLOW__CORE__HOSTNAME_CALLABLE": "socket:gethostname",
            "AIRFLOW__CORE__LOAD_EXAMPLES": load_examples,
            "AIRFLOW__CORE__DAGS_FOLDER": dags_folder,
            "AIRFLOW__CORE__EXECUTOR": executor,
            #
            "AIRFLOW__CORE__REMOTE_BASE_LOG_FOLDER":
            f"s3://{bucket.bucket_name}/airflow/logs",
            "AIRFLOW__CORE__REMOTE_LOG_CONN_ID": "aws_default",
            "AIRFLOW__CORE__REMOTE_LOGGING": "true",
            "AIRFLOW__CORE__ENCRYPT_S3_LOGS": "false",
            #
            "GUNICORN_CMD_ARGS": "--log-level WARNING",
            "C_FORCE_ROOT": "true",
            "INVOKE_RUN_ECHO": 1,
            #
            "POSTGRES_PASSWORD": postgres_password,
            "POSTGRES_USER": postgres_user,
            "POSTGRES_DB": postgres_db,
            #
            "AWS_DEFAULT_REGION": aws_region,
            "AIRFLOW_HOME": airflow_home,
            #
            "AIRFLOW_VAR_EXAMPLE_S3_CONN": "example_s3_conn",
            "AIRFLOW_VAR_DEFAULT_S3_BUCKET": bucket.bucket_name,
            # commenting out this part, because altering the user and
            # password will affect the way workers authenticate with
            # rabbitmq
            # "RABBITMQ_DEFAULT_USER": ...,
            # "RABBITMQ_DEFAULT_PASS": ...,
        }

        environment.update(env or {})

        environment = {k: str(v) for k, v in environment.items()}

        cluster = cluster or aws_ecs.Cluster(
            self,
            "cluster",
            vpc=vpc,
            default_cloud_map_namespace=cloudmap_namespace_options,
        )

        base_image = base_image or aws_ecs.ContainerImage.from_registry(
            "knowsuchagency/airflow-cdk")

        rds_instance = rds_instance or aws_rds.DatabaseInstance(
            self,
            "airflow-rds-instance",
            master_username=postgres_user,
            engine=aws_rds.DatabaseInstanceEngine.POSTGRES,
            allocated_storage=10,
            database_name=postgres_db,
            master_user_password=core.SecretValue.plain_text(
                postgres_password),
            vpc=vpc,
            instance_type=aws_ec2.InstanceType("t3.micro"),
            # TODO: turn this on when ready for prod
            deletion_protection=False,
            delete_automated_backups=True,
            removal_policy=core.RemovalPolicy.DESTROY,
        )

        web_task = web_task or aws_ecs.FargateTaskDefinition(
            self,
            "web-task",
            cpu=1024,
            memory_limit_mib=2048,
        )

        worker_task = worker_task or aws_ecs.FargateTaskDefinition(
            self,
            "worker-task",
            cpu=worker_cpu,
            memory_limit_mib=worker_memory_limit_mib,
        )

        scheduler_task = scheduler_task or aws_ecs.FargateTaskDefinition(
            self,
            "scheduler-task",
            cpu=1024,
            memory_limit_mib=2048,
        )

        message_broker_task_pre_configured = message_broker_task is not None

        message_broker_task = (message_broker_task
                               or aws_ecs.FargateTaskDefinition(
                                   self,
                                   "message-broker-task",
                                   cpu=1024,
                                   memory_limit_mib=2048,
                               ))

        if not message_broker_task_pre_configured:

            rabbitmq_container = message_broker_task.add_container(
                "rabbitmq_container",
                image=aws_ecs.ContainerImage.from_registry(
                    "rabbitmq:management"),
                environment=environment,
                logging=log_driver,
                health_check=aws_ecs.HealthCheck(
                    command=["CMD", "rabbitmqctl", "status"]),
            )

            rabbitmq_container.add_port_mappings(
                aws_ecs.PortMapping(container_port=5672))

            rabbitmq_container.add_port_mappings(
                aws_ecs.PortMapping(container_port=15672))

        message_broker_service_pre_configured = (message_broker_service
                                                 is not None)

        message_broker_service = (message_broker_service
                                  or aws_ecs.FargateService(
                                      self,
                                      "message_broker_service",
                                      task_definition=message_broker_task,
                                      cluster=cluster,
                                  ))

        if not message_broker_service_pre_configured:

            message_broker_service.enable_cloud_map(
                name=message_broker_service_name)

            message_broker_hostname = (
                f"{message_broker_service_name}.{cloudmap_namespace}")

        for task in web_task, worker_task:

            bucket.grant_read_write(task.task_role.grant_principal)

        bucket.grant_delete(worker_task.task_role.grant_principal)

        postgres_hostname = rds_instance.db_instance_endpoint_address

        environment.update(
            AIRFLOW__CORE__SQL_ALCHEMY_CONN=
            f"postgresql+psycopg2://{postgres_user}"
            f":{postgres_password}@{postgres_hostname}"
            f":5432/{postgres_db}",
            AIRFLOW__CELERY__RESULT_BACKEND=f"db+postgresql://{postgres_user}"
            f":{postgres_password}@{postgres_hostname}"
            f":5432/{postgres_db}",
            AIRFLOW__CELERY__BROKER_URL=f"amqp://{message_broker_hostname}",
        )

        web_container = web_task.add_container(
            "web-container",
            image=base_image,
            environment=environment,
            logging=log_driver,
        )

        web_container.add_port_mappings(
            aws_ecs.PortMapping(container_port=airflow_webserver_port))

        scheduler_container = scheduler_task.add_container(
            "scheduler-container",
            image=base_image,
            environment=environment,
            logging=log_driver,
            command=["scheduler"],
        )

        worker_container = worker_task.add_container(
            "worker-container",
            image=base_image,
            environment=environment,
            logging=log_driver,
            command=["worker"],
        )

        web_service_pre_configured = web_service is not None

        hosted_zone = hosted_zone or aws_route53.PublicHostedZone(
            self,
            "hosted-zone",
            zone_name=domain_name,
            comment="rendered from cdk",
        )

        certificate = (certificate
                       or certificate_manager.DnsValidatedCertificate(
                           self,
                           "tls-cert",
                           hosted_zone=hosted_zone,
                           domain_name=domain_name,
                       ))

        protocol = elb.ApplicationProtocol.HTTPS

        web_service = (web_service or
                       aws_ecs_patterns.ApplicationLoadBalancedFargateService(
                           self,
                           "web-service",
                           task_definition=web_task,
                           cluster=cluster,
                           desired_count=web_container_desired_count,
                           protocol=protocol,
                           domain_zone=hosted_zone,
                           domain_name=domain_name,
                           certificate=certificate,
                       ))

        if not web_service_pre_configured:

            web_service.target_group.configure_health_check(
                healthy_http_codes="200-399")

        scheduler_service = scheduler_service or aws_ecs.FargateService(
            self,
            "scheduler-service",
            task_definition=scheduler_task,
            cluster=cluster,
        )

        worker_service_pre_configured = worker_service is not None

        worker_service = worker_service or aws_ecs.FargateService(
            self,
            "worker-service",
            task_definition=worker_task,
            cluster=cluster,
            desired_count=worker_container_desired_count,
        )

        if not worker_service_pre_configured:

            scalable_task_count = worker_service.auto_scale_task_count(
                max_capacity=max_worker_count)

            scalable_task_count.scale_on_memory_utilization(
                "memory-utilization-worker-scaler",
                policy_name="memory-utilization-worker-scaler",
                target_utilization_percent=worker_target_memory_utilization,
                scale_in_cooldown=core.Duration.seconds(
                    worker_memory_scale_in_cooldown),
                scale_out_cooldown=core.Duration.seconds(
                    worker_memory_scale_out_cooldown),
            )

            scalable_task_count.scale_on_cpu_utilization(
                "cpu-utilization-worker-scaler",
                policy_name="cpu-utilization-worker-scaler",
                target_utilization_percent=worker_target_cpu_utilization,
                scale_in_cooldown=core.Duration.seconds(
                    worker_cpu_scale_in_cooldown),
                scale_out_cooldown=core.Duration.seconds(
                    worker_cpu_scale_out_cooldown),
            )

        for service in (
                web_service.service,
                scheduler_service,
                worker_service,
        ):

            service.connections.allow_to(
                rds_instance,
                aws_ec2.Port.tcp(5432),
                description="allow connection to RDS",
            )

            service.connections.allow_to(
                message_broker_service.connections,
                aws_ec2.Port.tcp(5672),
                description="allow connection to rabbitmq broker",
            )

            service.connections.allow_to(
                message_broker_service.connections,
                aws_ec2.Port.tcp(15672),
                description="allow connection to rabbitmq management api",
            )

        rabbitmq_alb_pre_configured = rabbitmq_alb is not None

        rabbitmq_alb = rabbitmq_alb or elb.ApplicationLoadBalancer(
            self,
            "rabbitmq-alb",
            vpc=vpc,
            internet_facing=True,
        )

        if not rabbitmq_alb_pre_configured:

            core.CfnOutput(
                self,
                id="rabbitmqManagement",
                value=f"http://{rabbitmq_alb.load_balancer_dns_name}",
            )

            rabbitmq_listener = rabbitmq_alb.add_listener("rabbitmq-listener",
                                                          port=80)

            # rabbitmq_listener.add_targets(
            #     message_broker_service.load_balancer_target(
            #         container_name=rabbitmq_container.container_name,
            #         # TODO: cdk bug? jsii.errors.JSIIError: Expected a string, got {"$jsii.byref":"Object@10056"}
            #         container_port=15672,
            #     )
            # )

            message_broker_service.register_load_balancer_targets(
                aws_ecs.EcsTarget(
                    container_name=rabbitmq_container.container_name,
                    container_port=15672,
                    new_target_group_id="rabbitmq-management-tg",
                    listener=aws_ecs.ListenerConfig.application_listener(
                        rabbitmq_listener, ),
                ))

            rabbitmq_alb.connections.allow_to(
                message_broker_service.connections,
                aws_ec2.Port.tcp(15672),
                description="allow connection to rabbitmq management api",
            )