def airflow_web_service(self, environment): service_name = get_webserver_service_name(self.deploy_env) family = get_webserver_taskdef_family_name(self.deploy_env) task_def = ecs.FargateTaskDefinition(self, family, cpu=512, memory_limit_mib=1024, family=family) task_def.add_container(f"WebWorker-{self.deploy_env}", image=self.image, environment=environment, secrets=self.secrets, logging=ecs.LogDrivers.aws_logs( stream_prefix=family, log_retention=RetentionDays.ONE_DAY)) task_def.default_container.add_port_mappings( ecs.PortMapping(container_port=8080, host_port=8080, protocol=ec2.Protocol.TCP)) # we want only 1 instance of the web server so when new versions are deployed max_healthy_percent=100 # you have to manually stop the current version and then it should start a new version - done by deploy task lb_security_group = ec2.SecurityGroup( self, f"lb-sec-group-{self.deploy_env}", vpc=self.vpc) service = ecs_patterns.ApplicationLoadBalancedFargateService( self, service_name, cluster=self.cluster, # Required service_name=service_name, platform_version=ecs.FargatePlatformVersion.VERSION1_4, cpu=512, # Default is 256 desired_count=1, # Default is 1 task_definition=task_def, memory_limit_mib=2048, # Default is 512 public_load_balancer=True, security_groups=[lb_security_group], certificate=Certificate.from_certificate_arn( self, f"lb-cert-{self.deploy_env}", certificate_arn=self.config["lb_certificate_arn"]), max_healthy_percent=100) service.target_group.configure_health_check(path="/health") # restrict access to the load balancer to only VPN lb_security_group.connections.allow_from( ec2.Peer.ipv4(self.config["lb_vpn_addresses"]), ec2.Port.tcp(443)) # configure DNS alias for the load balancer route53.ARecord(self, f"lb-record-{self.deploy_env}", zone=route53.HostedZone.from_hosted_zone_attributes( self, f"Zone-{self.deploy_env}", zone_name=f"Zone-{self.deploy_env}", hosted_zone_id=self.config["route53_zone_id"]), record_name=self.config["lb_dns_name"], target=route53.RecordTarget.from_alias( targets.LoadBalancerTarget(service.load_balancer))) return service
def _create_userpool(self): user_pool = cognito.UserPool( self, "movio", account_recovery=cognito.AccountRecovery.EMAIL_ONLY, auto_verify=cognito.AutoVerifiedAttrs(email=True, phone=False), mfa=cognito.Mfa.OFF, mfa_second_factor=cognito.MfaSecondFactor(otp=True, sms=False), self_sign_up_enabled=False, sign_in_aliases=cognito.SignInAliases(email=True, username=True), standard_attributes=cognito.StandardAttributes( email=cognito.StandardAttribute(mutable=False, required=True), ), user_invitation=cognito.UserInvitationConfig( email_subject="Moshan email verification", email_body= "Thanks for signing up to moshan! Your username is {username} and temporary password is {####}\nYou can now login at https://moshan.tv", ), user_verification=cognito.UserVerificationConfig( email_subject="Moshan email verification", email_body= "Thanks for signing up to moshan! Verify your account by clicking on {##Verify Email##}", email_style=cognito.VerificationEmailStyle.LINK), ) user_pool.add_client( "moshan", auth_flows=cognito.AuthFlow(refresh_token=True), o_auth=cognito.OAuthSettings( flows=cognito.OAuthFlows(authorization_code_grant=True), callback_urls=[ "https://moshan.tv/callback.html", "https://beta.moshan.tv/callback.html" ], scopes=[ cognito.OAuthScope.EMAIL, cognito.OAuthScope.OPENID, cognito.OAuthScope.PROFILE ]), prevent_user_existence_errors=True, ) cert = Certificate.from_certificate_arn(self, "domainCert", self.cert_arn) user_pool.add_domain("CognitoDomain", custom_domain=cognito.CustomDomainOptions( domain_name=self.domain_name, certificate=cert))
async def acd_setup(self, id: str, domain: str, sats: list) -> None: hosted_zone_task = create_task(self.create_hosted_zone()) potential_cert_arn = await self.get_potential_cert(domain=domain, sats=sats) if potential_cert_arn: print('Cert found: {}'.format(potential_cert_arn)) self.cert = Certificate.from_certificate_arn( self, '{}APICert'.format(id), certificate_arn=potential_cert_arn) self.create_dno(domain) await hosted_zone_task else: print('Cert not found for domain: {}'.format(domain)) await hosted_zone_task self.cert = DnsValidatedCertificate( self, '{}APICert'.format(id), hosted_zone=self.zone, domain_name=domain, subject_alternative_names=sats, region='us-east-1') self.create_dno(domain=domain)
def _create_gateway(self): cert = Certificate( self, "certificate", domain_name=self.domain_name, validation_method=ValidationMethod.DNS ) domain_name = DomainName( self, "domain", domain_name=self.domain_name, certificate=cert, security_policy=SecurityPolicy.TLS_1_2 ) http_api = HttpApi( self, "movies_gateway", create_default_stage=False, api_name="movies", cors_preflight=CorsPreflightOptions( allow_methods=[HttpMethod.GET, HttpMethod.POST], allow_origins=["https://moshan.tv", "https://beta.moshan.tv"], allow_headers=["authorization", "content-type"] ) ) authorizer = CfnAuthorizer( self, "cognito", api_id=http_api.http_api_id, authorizer_type="JWT", identity_source=["$request.header.Authorization"], name="cognito", jwt_configuration=CfnAuthorizer.JWTConfigurationProperty( audience=["68v5rahd0sdvrmf7fgbq2o1a9u"], issuer="https://cognito-idp.eu-west-1.amazonaws.com/eu-west-1_sJ3Y4kSv6" ) ) routes = { "get_movies": { "method": "GET", "route": "/movies", "target_lambda": self.lambdas["api-movies"] }, "post_movies": { "method": "POST", "route": "/movies", "target_lambda": self.lambdas["api-movies"] }, "get_movies_by_id": { "method": "GET", "route": "/movies/{id}", "target_lambda": self.lambdas["api-movies_by_id"] }, } for r in routes: integration = HttpIntegration( self, f"{r}_integration", http_api=http_api, integration_type=HttpIntegrationType.LAMBDA_PROXY, integration_uri=routes[r]["target_lambda"].function_arn, method=getattr(HttpMethod, routes[r]["method"]), payload_format_version=PayloadFormatVersion.VERSION_2_0, ) CfnRoute( self, r, api_id=http_api.http_api_id, route_key=f"{routes[r]['method']} {routes[r]['route']}", authorization_type="JWT", authorizer_id=authorizer.ref, target="integrations/" + integration.integration_id ) routes[r]["target_lambda"].add_permission( f"{r}_apigateway_invoke", principal=ServicePrincipal("apigateway.amazonaws.com"), source_arn=f"arn:aws:execute-api:{self.region}:{self.account}:{http_api.http_api_id}/*" ) stage = CfnStage( self, "live", api_id=http_api.http_api_id, auto_deploy=True, default_route_settings=CfnStage.RouteSettingsProperty( throttling_burst_limit=10, throttling_rate_limit=5 ), stage_name="live" ) HttpApiMapping( self, "mapping", api=http_api, domain_name=domain_name, stage=stage )
def __init__(self, scope: core.Construct, construct_id: str, cert_arn: str, hosted_zone_id: str, domain_name: str, **kwargs) -> None: """ :param cert_arn: ARN of certificate to use :param hosted_zone_id: ID of hosted zone to use :param domain_name: Domain name to use """ super().__init__(scope, construct_id, **kwargs) ################################## # WEBSITE HOSTING INFRASTRUCTURE # ################################## # Grab hosted zone for the website to contain our records and an SSL certificate for HTTPS. These two have to # be grabbed from existing resources instead of created here because CloudFormation will time out waiting for a # newly-created cert to validate. self.hosted_zone = PublicHostedZone.from_public_hosted_zone_id( self, "personal-site-hosted-zone", hosted_zone_id) self.cert = Certificate.from_certificate_arn(self, "personal-site-cert", cert_arn) # Add an S3 bucket to host the website content self.website_bucket = Bucket(self, "personal-site-bucket", bucket_name=domain_name, removal_policy=RemovalPolicy.DESTROY, public_read_access=True, website_index_document="index.html", website_error_document="index.html") # Create a cloudfront distribution for the site self.distribution = Distribution( self, "personal-site-cf-distribution", default_behavior={ "origin": S3Origin(self.website_bucket), "allowed_methods": AllowedMethods.ALLOW_GET_HEAD_OPTIONS, "viewer_protocol_policy": ViewerProtocolPolicy.REDIRECT_TO_HTTPS }, certificate=self.cert, minimum_protocol_version=SecurityPolicyProtocol.TLS_V1_2_2019, enable_ipv6=True, domain_names=[domain_name, f"www.{domain_name}"]) # Point traffic to base and www.base to the cloudfront distribution, for both IPv4 and IPv6 ARecord(self, "personal-site-a-record", zone=self.hosted_zone, record_name=f"{domain_name}.", target=RecordTarget.from_alias( CloudFrontTarget(self.distribution))) ARecord(self, "personal-site-a-record-www", zone=self.hosted_zone, target=RecordTarget.from_alias( CloudFrontTarget(self.distribution)), record_name=f"www.{domain_name}.") AaaaRecord(self, "personal-site-aaaa-record", zone=self.hosted_zone, record_name=f"{domain_name}.", target=RecordTarget.from_alias( CloudFrontTarget(self.distribution))) AaaaRecord(self, "personal-site-aaaa-record-www", zone=self.hosted_zone, target=RecordTarget.from_alias( CloudFrontTarget(self.distribution)), record_name=f"www.{domain_name}.") ############################# # WEBSITE CD INFRASTRUCTURE # ############################# # CodeBuild project to build the website self.code_build_project = \ Project(self, "personal-site-builder", project_name="PersonalWebsite", description="Builds & deploys a personal static website on changes from GitHub", source=Source.git_hub( owner="c7c8", repo="crmyers.dev", clone_depth=1, branch_or_ref="master", webhook_filters=[ FilterGroup.in_event_of(EventAction.PUSH, EventAction.PULL_REQUEST_MERGED).and_branch_is( "master")]), artifacts=Artifacts.s3(bucket=self.website_bucket, include_build_id=False, package_zip=False, path="/"), build_spec=BuildSpec.from_object_to_yaml({ "version": "0.2", "phases": { "install": { "runtime-versions": { "nodejs": 10, } }, "pre_build": { "commands": ["npm install"] }, "build": { "commands": [ "npm run-script build &&", f"aws cloudfront create-invalidation --distribution-id={self.distribution.distribution_id} --paths '/*'" ] } }, "artifacts": { "files": ["./*"], "name": ".", "discard-paths": "no", "base-directory": "dist/crmyers-dev" } })) self.code_build_project.role.add_to_policy( PolicyStatement( effect=Effect.ALLOW, resources=[ f"arn:aws:cloudfront::{self.account}:distribution/{self.distribution.distribution_id}" ], actions=['cloudfront:CreateInvalidation'])) # Set up an SNS topic for text message notifications self.deployment_topic = Topic(self, 'personal-site-deployment-topic', topic_name='WebsiteDeployments', display_name='Website Deployments') self.deployment_topic.add_subscription(SmsSubscription("+19255968684")) self.code_build_project.on_build_failed( "BuildFailed", target=targets.SnsTopic(self.deployment_topic, message=RuleTargetInput.from_text( "Build for crmyers.dev FAILED"))) self.code_build_project.on_build_succeeded( "BuildSucceeded", target=targets.SnsTopic(self.deployment_topic, message=RuleTargetInput.from_text( "Build for crmyers.dev SUCCEEDED")))
def _create_gateway(self): cert = Certificate(self, "certificate", domain_name=self.domain_name, validation_method=ValidationMethod.DNS) domain_name = DomainName( self, "domain_name", certificate=cert, domain_name=self.domain_name, ) http_api = HttpApi( self, "shows_gateway", create_default_stage=False, api_name="shows", cors_preflight=CorsPreflightOptions( allow_methods=[HttpMethod.GET, HttpMethod.POST], allow_origins=["https://moshan.tv", "https://beta.moshan.tv"], allow_headers=["authorization", "content-type"])) routes = { "get_shows": { "method": "GET", "route": "/shows", "target_lambda": self.lambdas["api-shows"] }, "post_shows": { "method": "POST", "route": "/shows", "target_lambda": self.lambdas["api-shows"] }, "get_shows_by_id": { "method": "GET", "route": "/shows/{id}", "target_lambda": self.lambdas["api-shows_by_id"] }, "get_episodes": { "method": "GET", "route": "/episodes", "target_lambda": self.lambdas["api-episodes"] }, "post_episodes": { "method": "POST", "route": "/shows/{id}/episodes", "target_lambda": self.lambdas["api-episodes"] }, "get_episodes_by_id": { "method": "GET", "route": "/shows/{id}/episodes/{episode_id}", "target_lambda": self.lambdas["api-episodes_by_id"] }, } for r in routes: integration = HttpIntegration( self, f"{r}_integration", http_api=http_api, integration_type=HttpIntegrationType.LAMBDA_PROXY, integration_uri=routes[r]["target_lambda"].function_arn, method=getattr(HttpMethod, routes[r]["method"]), payload_format_version=PayloadFormatVersion.VERSION_2_0, ) CfnRoute( self, r, api_id=http_api.http_api_id, route_key=f"{routes[r]['method']} {routes[r]['route']}", # authorization_type="AWS_IAM", # TODO: add back when: https://github.com/aws/aws-cdk/pull/14853 gets merged (set this manually for now) target="integrations/" + integration.integration_id) routes[r]["target_lambda"].add_permission( f"{r}_apigateway_invoke", principal=ServicePrincipal("apigateway.amazonaws.com"), source_arn= f"arn:aws:execute-api:{self.region}:{self.account}:{http_api.http_api_id}/*" ) HttpStage(self, "live", http_api=http_api, auto_deploy=True, stage_name="live", domain_mapping=DomainMappingOptions( domain_name=domain_name, ))
def __init__(self, scope: core.Construct, id: str, resources: FsiSharedResources, subnet_group_name: str = 'Default', **kwargs) -> None: super().__init__(scope, id, **kwargs) # Configure the container resources... self.repo = assets.DockerImageAsset(self, 'Repo', directory='src/fsi/earnings', file='Dockerfile') code = lambda_.DockerImageCode.from_ecr( repository=self.repo.repository, tag=self.repo.image_uri.split(':')[-1]) # Configure security policies... role = iam.Role( self, 'Role', assumed_by=iam.ServicePrincipal(service='lambda'), description='HomeNet-{}-Fsi-EarningsReport'.format( resources.landing_zone.zone_name), role_name='fsi-earnings@homenet.{}.{}'.format( resources.landing_zone.zone_name, core.Stack.of(self).region).lower(), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name= 'service-role/AWSLambdaVPCAccessExecutionRole'), ]) # Grant any permissions... self.earnings_table = d.Table( self, 'EarningCalendar', table_name='FsiCoreSvc-EarningsCalendar', billing_mode=d.BillingMode.PAY_PER_REQUEST, partition_key=d.Attribute(name='PartitionKey', type=d.AttributeType.STRING), sort_key=d.Attribute(name='SortKey', type=d.AttributeType.STRING), time_to_live_attribute='Expiration', point_in_time_recovery=True, server_side_encryption=True) self.earnings_table.grant_read_write_data(role) # Define any variables for the function self.function_env = { 'CACHE_TABLE': self.earnings_table.table_name, } # Create the backing webapi compute ... self.function = lambda_.DockerImageFunction( self, 'Function', code=code, role=role, function_name='HomeNet-{}-Fsi-{}'.format( resources.landing_zone.zone_name, FsiEarningsGateway.__name__), description='Python Lambda function for ' + FsiEarningsGateway.__name__, timeout=core.Duration.seconds(30), tracing=lambda_.Tracing.ACTIVE, vpc=resources.landing_zone.vpc, log_retention=logs.RetentionDays.FIVE_DAYS, memory_size=128, allow_all_outbound=True, vpc_subnets=ec2.SubnetSelection( subnet_group_name=subnet_group_name), security_groups=[resources.landing_zone.security_group], environment=self.function_env, ) # Bind APIG to Lambda compute... self.frontend_proxy = a.LambdaRestApi( self, 'ApiGateway', proxy=True, handler=self.function, options=a.RestApiProps( description='Hosts the Earnings Calendar Services via ' + self.function.function_name, domain_name=a.DomainNameOptions( domain_name='earnings.trader.fsi', certificate=Certificate.from_certificate_arn( self, 'Certificate', certificate_arn= 'arn:aws:acm:us-east-2:581361757134:certificate/4e3235f7-49a1-42a5-a671-f2449b45f72d' ), security_policy=a.SecurityPolicy.TLS_1_0), policy=iam.PolicyDocument(statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=['execute-api:Invoke'], principals=[iam.AnyPrincipal()], resources=['*'], conditions={ 'IpAddress': { 'aws:SourceIp': [ '10.0.0.0/8', '192.168.0.0/16', '72.90.160.65/32' ] } }) ]), endpoint_configuration=a.EndpointConfiguration( types=[a.EndpointType.REGIONAL], ))) # Register Dns Name r53.ARecord(self, 'AliasRecord', zone=resources.trader_dns_zone, record_name='earnings.%s' % resources.trader_dns_zone.zone_name, target=r53.RecordTarget.from_alias( dns_targets.ApiGateway(self.frontend_proxy)))
def __init__(self, app: App, id: str, env: Environment) -> None: super().__init__(app, id, env=env) # start by getting the DNS zone we're going to work with zone = HostedZone.from_lookup(self, "Dominick", domain_name=DOMAIN) # create a certificate for the web service which matches its hostname cert = Certificate(self, "Cletus", domain_name=HOSTNAME, validation=CertificateValidation.from_dns(zone)) # the services will live in a vpc, of course vpc = ec2.Vpc(self, "Virgil") # we're going to scale this web-service automatically asg = AutoScalingGroup( self, "Alice", vpc=vpc, user_data=http_service(), instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO), machine_image=ec2.AmazonLinuxImage( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2)) # explicitly allow internal access from the vpc just to be safe asg.connections.allow_internally(Port.tcp(WEB_PORT), "web-service") asg.connections.allow_internally(Port.tcp(NOT_WEB), "not-web") # expose the scaling group ports and permit egress asg.connections.allow_from_any_ipv4(Port.tcp(WEB_PORT)) asg.connections.allow_from_any_ipv4(Port.tcp(NOT_WEB)) # create a health check for the not-web service that currently if NOT_WEB_HEALTH_CHECKS: # points to the not-web service checker = HealthCheck(interval=Duration.seconds(10), port=NOT_WEB, protocol=Protocol.TCP) else: # points to the web port where our demo server listens checker = HealthCheck(interval=Duration.seconds(10), port=str(WEB_PORT), protocol=WEB_PROT) # put the scaling group behind a network target group for the LB notwebish = NetworkTargetGroup(self, "Allison", vpc=vpc, health_check=checker, targets=[asg], port=NOT_WEB, protocol=Protocol.TCP) # for the web-like ports, we can use the default health check webish = NetworkTargetGroup( self, "Alicen", vpc=vpc, health_check=HealthCheck(interval=Duration.seconds(10)), targets=[asg], port=WEB_PORT, protocol=WEB_PROT) if True: # create the load balancer and put it into dns lb = NetworkLoadBalancer(self, "Lisa", vpc=vpc, internet_facing=True) # create a hostname for the service CnameRecord(self, "Carl", domain_name=lb.load_balancer_dns_name, zone=zone, record_name=HOSTNAME.split('.')[0], ttl=Duration.seconds(60)) else: # a multi-step deployment could allow using an alias in R53 lb = NetworkLoadBalancer.from_network_load_balancer_attributes( self, "Larry", vpc=vpc, load_balancer_arn=some.load_balancer_arn, load_balancer_dns_name=HOSTNAME, load_balancer_canonical_hosted_zone_id=zone.hosted_zone_id) # create a hostname for the service AaaaRecord(self, "Eric", zone=zone, record_name=HOSTNAME.split('.')[0], target=RecordTarget.from_alias(LoadBalancerTarget(lb))) # point the load balancer to the target group for the ssl service # # TODO: determine if we need to use the same cert for pub-facing # and internal service listener_cert = ListenerCertificate(cert.certificate_arn) lb.add_listener("Cecil", port=443, certificates=[listener_cert], default_target_groups=[webish]) # point the load balancer to the target group for the web service lb.add_listener("Webster", port=80, default_target_groups=[webish]) # point the load balancer to the group for the not-web service lb.add_listener("NotWeb", default_target_groups=[notwebish], port=NOT_WEB, protocol=Protocol.TCP) # auto scale the, uh, autoscaling group asg.scale_on_cpu_utilization("ScaleCPU", target_utilization_percent=80) # emit some output values, largely for console use CfnOutput(self, "LB", export_name="LB", value=lb.load_balancer_dns_name) CfnOutput(self, "HTTP", export_name="HTTP", value="http://{}/".format(HOSTNAME)) CfnOutput(self, "HTTPS", export_name="HTTPS", value="https://{}/".format(HOSTNAME)) CfnOutput(self, "TCP", export_name="TCP", value="tcp://{}:{}/".format(HOSTNAME, NOT_WEB)) CfnOutput(self, "Cert", export_name="Cert", value=cert.certificate_arn)
def __init__(self, scope: core.Construct, id: str, infra: RtspBaseResourcesConstruct, subnet_group_name: str = 'Default', **kwargs) -> None: super().__init__(scope, id, **kwargs) core.Tags.of(self).add(key='Source', value=PhotosApiConstruct.__name__) # Configure the container resources... self.repo = assets.DockerImageAsset(self, 'Repo', directory='src/rtsp/photo-api', file='Dockerfile') code = lambda_.DockerImageCode.from_ecr( repository=self.repo.repository, tag=self.repo.image_uri.split(':')[-1]) # Configure security policies... role = iam.Role( self, 'Role', assumed_by=iam.ServicePrincipal(service='lambda'), description='HomeNet-{}-PhotoApi'.format( infra.landing_zone.zone_name), role_name='rtsp-photoapi@homenet.{}.{}'.format( infra.landing_zone.zone_name, core.Stack.of(self).region), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name= 'service-role/AWSLambdaVPCAccessExecutionRole'), iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name='AmazonS3ReadOnlyAccess') ]) infra.bucket.grant_read(role) infra.face_table.grant_read_write_data(role) # Define any variables for the function self.function_env = { 'FACE_TABLE': infra.face_table.table_name, 'REGION': core.Stack.of(self).region, } # Create the backing webapi compute ... self.function = lambda_.DockerImageFunction( self, 'Function', code=code, role=role, function_name='HomeNet-PhotoApi', description='Python Lambda function for ' + PhotosApiConstruct.__name__, timeout=core.Duration.seconds(30), tracing=lambda_.Tracing.ACTIVE, vpc=infra.landing_zone.vpc, log_retention=RetentionDays.FIVE_DAYS, memory_size=128, allow_all_outbound=True, vpc_subnets=ec2.SubnetSelection( subnet_group_name=subnet_group_name), security_groups=[infra.security_group], environment=self.function_env, ) # Bind APIG to Lambda compute... # Calls need to use https://photos-api.virtual.world self.frontend_proxy = a.LambdaRestApi( self, 'ApiGateway', proxy=True, handler=self.function, options=a.RestApiProps( description='Photo-Api proxy for ' + self.function.function_name, binary_media_types=['image/png', 'image/jpg', 'image/bmp'], domain_name=a.DomainNameOptions( domain_name='photos-api.virtual.world', certificate=Certificate.from_certificate_arn( self, 'Certificate', certificate_arn= 'arn:aws:acm:us-east-1:581361757134:certificate/c91263e7-882e-441d-aa2f-717074aed6d0' ), security_policy=a.SecurityPolicy.TLS_1_0), policy=iam.PolicyDocument(statements=[ iam.PolicyStatement(effect=iam.Effect.ALLOW, actions=['execute-api:Invoke'], principals=[iam.AnyPrincipal()], resources=['*'], conditions={ 'IpAddress': { 'aws:SourceIp': [ '10.0.0.0/8', '192.168.0.0/16', '72.90.160.65/32' ] } }) ]), endpoint_configuration=a.EndpointConfiguration( types=[a.EndpointType.REGIONAL], #vpc_endpoints=[ # infra.landing_zone.vpc_endpoints.interfaces['execute-api'] #] )))
def _create_cert(self): self.cert = Certificate(self, "certificate", domain_name=self.domain_name, validation_method=ValidationMethod.DNS)
def __init__( self, scope: App, id: str, envs: EnvSettings, components: ComponentsStack, base_resources: BaseResources, ): super().__init__(scope, id) self.db_secret_arn = Fn.import_value( BaseResources.get_database_secret_arn_output_export_name(envs)) self.job_processing_queues = components.data_processing_queues self.vpc = base_resources.vpc self.db = base_resources.db self.app_bucket = Bucket(self, "App", versioned=True) if self.app_bucket.bucket_arn: CfnOutput( self, id="AppBucketOutput", export_name=self.get_app_bucket_arn_output_export_name(envs), value=self.app_bucket.bucket_arn, ) self.pages_bucket = Bucket(self, "Pages", public_read_access=True) self.domain_name = StringParameter.from_string_parameter_name( self, "DomainNameParameter", string_parameter_name="/schema-cms-app/DOMAIN_NAME").string_value self.certificate_arn = StringParameter.from_string_parameter_name( self, "CertificateArnParameter", string_parameter_name="/schema-cms-app/CERTIFICATE_ARN" ).string_value django_secret = Secret(self, "DjangoSecretKey", secret_name="SCHEMA_CMS_DJANGO_SECRET_KEY") lambda_auth_token_secret = Secret( self, "LambdaAuthToken", secret_name="SCHEMA_CMS_LAMBDA_AUTH_TOKEN") if lambda_auth_token_secret.secret_arn: CfnOutput( self, id="lambdaAuthTokenArnOutput", export_name=self.get_lambda_auth_token_arn_output_export_name( envs), value=lambda_auth_token_secret.secret_arn, ) self.django_secret_key = EcsSecret.from_secrets_manager(django_secret) self.lambda_auth_token = EcsSecret.from_secrets_manager( lambda_auth_token_secret) tag_from_context = self.node.try_get_context("app_image_tag") tag = tag_from_context if tag_from_context != "undefined" else None api_image = ContainerImage.from_ecr_repository( repository=Repository.from_repository_name( self, id="BackendRepository", repository_name=BaseECR.get_backend_repository_name(envs)), tag=tag, ) nginx_image = ContainerImage.from_ecr_repository( repository=Repository.from_repository_name( self, id="NginxRepository", repository_name=BaseECR.get_nginx_repository_name(envs)), tag=tag, ) self.api = ApplicationLoadBalancedFargateService( self, "ApiService", service_name=f"{envs.project_name}-api-service", cluster=Cluster.from_cluster_attributes( self, id="WorkersCluster", cluster_name="schema-ecs-cluster", vpc=self.vpc, security_groups=[], ), task_image_options=ApplicationLoadBalancedTaskImageOptions( image=nginx_image, container_name="nginx", container_port=80, enable_logging=True, ), desired_count=1, cpu=512, memory_limit_mib=1024, certificate=Certificate.from_certificate_arn( self, "Cert", certificate_arn=self.certificate_arn), domain_name=self.domain_name, domain_zone=PrivateHostedZone( self, "zone", vpc=self.vpc, zone_name=self.domain_name, ), ) self.api.task_definition.add_container( "backend", image=api_image, command=[ "sh", "-c", "/bin/chamber exec $CHAMBER_SERVICE_NAME -- ./scripts/run.sh" ], logging=AwsLogDriver(stream_prefix="backend-container"), environment={ "POSTGRES_DB": envs.data_base_name, "AWS_STORAGE_BUCKET_NAME": self.app_bucket.bucket_name, "AWS_STORAGE_PAGES_BUCKET_NAME": self.pages_bucket.bucket_name, "SQS_WORKER_QUEUE_URL": self.job_processing_queues[0].queue_url, "SQS_WORKER_EXT_QUEUE_URL": self.job_processing_queues[1].queue_url, "SQS_WORKER_MAX_QUEUE_URL": self.job_processing_queues[2].queue_url, "CHAMBER_SERVICE_NAME": "schema-cms-app", "CHAMBER_KMS_KEY_ALIAS": envs.project_name, }, secrets={ "DB_CONNECTION": EcsSecret.from_secrets_manager( Secret.from_secret_arn(self, id="DbSecret", secret_arn=self.db_secret_arn)), "DJANGO_SECRET_KEY": self.django_secret_key, "LAMBDA_AUTH_TOKEN": self.lambda_auth_token, }, cpu=512, memory_limit_mib=1024, ) self.django_secret_key.grant_read( self.api.service.task_definition.task_role) self.app_bucket.grant_read_write( self.api.service.task_definition.task_role) self.pages_bucket.grant_read_write( self.api.service.task_definition.task_role) for queue in self.job_processing_queues: queue.grant_send_messages( self.api.service.task_definition.task_role) self.api.service.connections.allow_to(self.db.connections, Port.tcp(5432)) self.api.task_definition.add_to_task_role_policy( PolicyStatement( actions=["ses:SendRawEmail", "ses:SendBulkTemplatedEmail"], resources=["*"], )) self.api.task_definition.add_to_task_role_policy( PolicyStatement( actions=[ "kms:Get*", "kms:Describe*", "kms:List*", "kms:Decrypt" ], resources=[ Fn.import_value( BaseKMS.get_kms_arn_output_export_name(envs)) ], )) self.api.task_definition.add_to_task_role_policy( PolicyStatement(actions=["ssm:DescribeParameters"], resources=["*"])) self.api.task_definition.add_to_task_role_policy( PolicyStatement( actions=["ssm:GetParameters*"], resources=[ f"arn:aws:ssm:{self.region}:{self.account}:parameter/schema-cms-app/*" ], ))
def init_certificate(self, domain_name): certificate = Certificate(self, "certificate", domain_name=domain_name, validation_method=ValidationMethod.DNS) return certificate
def __init__(self, scope: core.Construct, id: str, resources:FsiSharedResources, subnet_group_name:str='Default', **kwargs) -> None: super().__init__(scope, id, **kwargs) # Configure the container resources... self.repo = assets.DockerImageAsset(self,'Repo', directory='src/fsi/account-linking', file='Dockerfile') code = lambda_.DockerImageCode.from_ecr( repository=self.repo.repository, tag=self.repo.image_uri.split(':')[-1]) # Configure security policies... role = iam.Role(self,'Role', assumed_by=iam.ServicePrincipal(service='lambda'), description='HomeNet-{}-Fsi-AccountLinking'.format(resources.landing_zone.zone_name), role_name='fsi-accountlinking@homenet.{}.{}'.format( resources.landing_zone.zone_name, core.Stack.of(self).region), managed_policies=[ iam.ManagedPolicy.from_aws_managed_policy_name( managed_policy_name='service-role/AWSLambdaVPCAccessExecutionRole'), ]) # Grant any permissions... resources.tda_secret.grant_write(role) # Define any variables for the function self.function_env = { 'REGION': core.Stack.of(self).region, 'TDA_SECRET_ID': resources.tda_secret.secret_arn, 'TDA_REDIRECT_URI': ssm.StringParameter.from_string_parameter_name(self,'TDA_REDIRECT_URI', string_parameter_name='/HomeNet/Amertitrade/redirect_uri').string_value, 'TDA_CLIENT_ID': ssm.StringParameter.from_string_parameter_name(self, 'TDA_CLIENT_ID', string_parameter_name='/HomeNet/Ameritrade/client_id').string_value } # Create the backing webapi compute ... self.function = lambda_.DockerImageFunction(self,'Function', code = code, role= role, function_name='HomeNet-{}-Fsi-{}'.format( resources.landing_zone.zone_name, FsiAmeritradeAuthGateway.__name__), description='Python Lambda function for '+FsiAmeritradeAuthGateway.__name__, timeout= core.Duration.seconds(30), tracing= lambda_.Tracing.ACTIVE, vpc= resources.landing_zone.vpc, log_retention= logs.RetentionDays.FIVE_DAYS, memory_size=128, allow_all_outbound=True, vpc_subnets=ec2.SubnetSelection(subnet_group_name=subnet_group_name), security_groups=[resources.landing_zone.security_group], environment=self.function_env, ) # Bind APIG to Lambda compute... self.frontend_proxy = a.LambdaRestApi(self,'ApiGateway', proxy=True, handler=self.function, options=a.RestApiProps( description='Hosts the Ameritrade Auth Callback via '+self.function.function_name, domain_name= a.DomainNameOptions( domain_name='auth.trader.fsi', certificate=Certificate.from_certificate_arn(self,'Certificate', certificate_arn= 'arn:aws:acm:us-east-2:581361757134:certificate/0d1fc756-ebd6-4660-83a8-814c0976a8c2'), security_policy= a.SecurityPolicy.TLS_1_0), policy= iam.PolicyDocument( statements=[ iam.PolicyStatement( effect= iam.Effect.ALLOW, actions=['execute-api:Invoke'], principals=[iam.AnyPrincipal()], resources=['*'], conditions={ 'IpAddress':{ 'aws:SourceIp': ['10.0.0.0/8','192.168.0.0/16','72.90.160.65/32'] } } ) ] ), endpoint_configuration= a.EndpointConfiguration( types = [ a.EndpointType.REGIONAL], ) )) # Register Dns Name r53.ARecord(self,'AliasRecord', zone=resources.trader_dns_zone, record_name='auth.%s' % resources.trader_dns_zone.zone_name, target= r53.RecordTarget.from_alias(dns_targets.ApiGateway(self.frontend_proxy)))