def __init__(self, scope: Construct, stack_id: str, **kwargs) -> None: """ Initializes a new instance of NetworkTier """ super().__init__(scope, stack_id, **kwargs) # We're creating a SubnetSelection with only the standard availability zones to be used to put # the NAT gateway in and the VPC interface endpoints, because the local zones do no have # these available. standard_zone_subnets = SubnetSelection( availability_zones=config.availability_zones_standard, subnet_type=SubnetType.PUBLIC) # The VPC that all components of the render farm will be created in. We are using the `availability_zones()` # method to override the availability zones that this VPC will use. self.vpc = Vpc(self, 'Vpc', max_azs=len(self.availability_zones), subnet_configuration=[ SubnetConfiguration(name='Public', subnet_type=SubnetType.PUBLIC, cidr_mask=28), SubnetConfiguration( name='Private', subnet_type=SubnetType.PRIVATE_WITH_NAT, cidr_mask=18) ], nat_gateway_subnets=standard_zone_subnets) # Add interface endpoints for idx, service_info in enumerate(_INTERFACE_ENDPOINT_SERVICES): service_name = service_info['name'] service = service_info['service'] self.vpc.add_interface_endpoint(service_name, service=service, subnets=standard_zone_subnets) # Add gateway endpoints for idx, service_info in enumerate(_GATEWAY_ENDPOINT_SERVICES): service_name = service_info['name'] service = service_info['service'] self.vpc.add_gateway_endpoint(service_name, service=service, subnets=[standard_zone_subnets]) # Internal DNS zone for the VPC. self.dns_zone = PrivateHostedZone(self, 'DnsZone', vpc=self.vpc, zone_name='deadline-test.internal')
def _create_zone(self, zone_id: str, fqdn: str, private_zone: bool, vpc: Vpc) -> Union[PublicHostedZone, PrivateHostedZone]: if private_zone: return PrivateHostedZone( self, zone_id, zone_name=fqdn, vpc=vpc, ) else: return PublicHostedZone( self, zone_id, zone_name=fqdn, )
def __init__(self, scope: Construct, stack_id: str, **kwargs) -> None: """ Initializes a new instance of NetworkTier :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param kwargs: The stack properties. """ super().__init__(scope, stack_id, **kwargs) # The VPC that all components of the render farm will be created in. self.vpc = Vpc( self, 'Vpc', max_azs=2, subnet_configuration=[ SubnetConfiguration( name='Public', subnet_type=SubnetType.PUBLIC, cidr_mask=28 ), SubnetConfiguration( name='Private', subnet_type=SubnetType.PRIVATE, cidr_mask=18 # 16,382 IP addresses ) ] ) # VPC flow logs are a security best-practice as they allow us # to capture information about the traffic going in and out of # the VPC. For more information, see the README for this app. self.vpc.add_flow_log( 'NetworkTierFlowLogs', destination=FlowLogDestination.to_cloud_watch_logs(), traffic_type=FlowLogTrafficType.ALL ) # TODO - Create a NetworkAcl for your VPC that only allows # network traffic required for your render farm. This is a # security best-practice to ensure the safety of your farm. # The default network ACLs allow all traffic by default, # whereas custom network ACLs deny all traffic by default. # For more information, see the README for this app. # # Example code to create a custom network ACL: # acl = NetworkAcl( # self, # 'ACL', # vpc=self.vpc, # subnet_selection=SubnetSelection( # subnets=self.vpc.public_subnets # ) # ) # # You can optionally add rules to allow traffic (e.g. SSH): # acl.add_entry( # 'SSH', # cidr=AclCidr.ipv4( # # some-ipv4-address-cidr # ), # traffic=AclTraffic.tcp_port(22), # rule_number=1 # ) endpoint_subnets = SubnetSelection(subnet_type=SubnetType.PRIVATE) # Add interface endpoints for idx, service_info in enumerate(_INTERFACE_ENDPOINT_SERVICES): service_name = service_info['name'] service = service_info['service'] self.vpc.add_interface_endpoint( f'{service_name}{idx}', service=service, subnets=endpoint_subnets ) # Add gateway endpoints for idx, service_info in enumerate(_GATEWAY_ENDPOINT_SERVICES): service_name = service_info['name'] service = service_info['service'] self.vpc.add_gateway_endpoint( service_name, service=service, subnets=[endpoint_subnets] ) # Internal DNS zone for the VPC. self.dns_zone = PrivateHostedZone( self, 'DnsZone', vpc=self.vpc, zone_name='deadline-test.internal' )
def __init__(self, scope: Construct, app_id: str, **kwargs) -> None: super().__init__(scope, app_id, **kwargs) # bucket with ui contents can be reached over listener rule on ALB api_domain_name = "static." + AWS_CONF["private_base_domain"] host_domain = f"{AWS_CONF['app_name']}.{AWS_CONF['private_base_domain']}" s3_path = AWS_CONF["app_name"] if AWS_CONF["deployment_stage"] == "tst": host_domain = f"{AWS_CONF['branch_id']}." + host_domain s3_path += "-" + AWS_CONF["branch_path"] ui_bucket = Bucket.from_bucket_name( self, "UiBucket", bucket_name=AWS_CONF["optionals"]["ui_bucket"], ) BucketDeployment( self, "UiBucketDepl", destination_bucket=ui_bucket, destination_key_prefix=s3_path, sources=[Source.asset(AWS_CONF["optionals"]["node_build_path"])], ) # ALB rule for http redirect to https load_balancer_arn = Arn.format( components=ArnComponents( service="elasticloadbalancing", partition="aws", resource="loadbalancer/app", resource_name=AWS_CONF["optionals"]["alb"], ), stack=self, ) alb = ApplicationLoadBalancer.from_lookup( self, "AlbApi", load_balancer_arn=load_balancer_arn, ) listener_http = ApplicationListener.from_lookup( self, "AlbHttpListenerRule", load_balancer_arn=alb.load_balancer_arn, listener_port=80, ) # listener rule priority is mandatory input and needs to be looked up # if cdk context not set yet set fixed priority during cdk synth priority = 1 if AWS_CONF["env"]["account"] in listener_http.listener_arn: priority = _next_elb_priority(host_domain, listener_http.listener_arn) # the rule is added to the existing listener ApplicationListenerRule( self, f"ListenerRule{AWS_CONF['branch_id'].capitalize()}", listener=listener_http, priority=priority, action=ListenerAction.redirect( host=api_domain_name, path=f"/ui/{s3_path}/index.html", permanent=True, port="443", protocol="HTTPS", ), conditions=[ListenerCondition.host_headers([host_domain])], ) # route 53 private zone with listener rule for redirect to alb ARecord( self, f"ARecord{AWS_CONF['branch_id'].capitalize()}", record_name=host_domain, target=RecordTarget(alias_target=LoadBalancerTarget(alb)), zone=PrivateHostedZone.from_lookup( self, "PrivZoneCorp", domain_name=AWS_CONF["private_base_domain"], private_zone=True, vpc_id=AWS_CONF["optionals"]["vpc"], ), )
def __init__(self, scope: Construct, stack_id: str, *, props: SEPStackProps, **kwargs): """ Initialize a new instance of SEPStack :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param props: The properties for this construct. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, **kwargs) # The VPC that all components of the render farm will be created in. vpc = Vpc( self, 'Vpc', max_azs=2, ) recipes = ThinkboxDockerRecipes( self, 'Image', stage=Stage.from_directory(props.docker_recipes_stage_path), ) repository = Repository( self, 'Repository', vpc=vpc, version=recipes.version, repository_installation_timeout=Duration.minutes(20), # TODO - Evaluate deletion protection for your own needs. These properties are set to RemovalPolicy.DESTROY # to cleanly remove everything when this stack is destroyed. If you would like to ensure # that these resources are not accidentally deleted, you should set these properties to RemovalPolicy.RETAIN # or just remove the removal_policy parameter. removal_policy=RepositoryRemovalPolicies( database=RemovalPolicy.DESTROY, filesystem=RemovalPolicy.DESTROY, ), ) host = 'renderqueue' zone_name = 'deadline-test.internal' # Internal DNS zone for the VPC. dns_zone = PrivateHostedZone( self, 'DnsZone', vpc=vpc, zone_name=zone_name, ) ca_cert = X509CertificatePem( self, 'RootCA', subject=DistinguishedName( cn='SampleRootCA', ), ) server_cert = X509CertificatePem( self, 'RQCert', subject=DistinguishedName( cn=f'{host}.{dns_zone.zone_name}', o='RFDK-Sample', ou='RenderQueueExternal', ), signing_certificate=ca_cert, ) render_queue = RenderQueue( self, 'RenderQueue', vpc=vpc, version=recipes.version, images=recipes.render_queue_images, repository=repository, # TODO - Evaluate deletion protection for your own needs. This is set to false to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that this resource is not accidentally deleted, you should set this to true. deletion_protection=False, hostname=RenderQueueHostNameProps( hostname=host, zone=dns_zone, ), traffic_encryption=RenderQueueTrafficEncryptionProps( external_tls=RenderQueueExternalTLSProps( rfdk_certificate=server_cert, ), internal_protocol=ApplicationProtocol.HTTPS, ), ) if props.create_resource_tracker_role: # Creates the Resource Tracker Access role. This role is required to exist in your account so the resource tracker will work properly Role( self, 'ResourceTrackerRole', assumed_by=ServicePrincipal('lambda.amazonaws.com'), managed_policies= [ManagedPolicy.from_aws_managed_policy_name('AWSThinkboxDeadlineResourceTrackerAccessPolicy')], role_name= 'DeadlineResourceTrackerAccessRole', ) fleet = SpotEventPluginFleet( self, 'SpotEventPluginFleet', vpc=vpc, render_queue=render_queue, deadline_groups=['group_name'], instance_types=[InstanceType.of(InstanceClass.BURSTABLE3, InstanceSize.LARGE)], worker_machine_image=props.worker_machine_image, max_capacity=1, ) # Optional: Add additional tags to both spot fleet request and spot instances. Tags.of(fleet).add('name', 'SEPtest') ConfigureSpotEventPlugin( self, 'ConfigureSpotEventPlugin', vpc=vpc, render_queue=render_queue, spot_fleets=[fleet], configuration=SpotEventPluginSettings( enable_resource_tracker=True, ), )
def __init__( self, scope: App, id: str, envs: EnvSettings, components: ComponentsStack, base_resources: BaseResources, ): super().__init__(scope, id) self.db_secret_arn = Fn.import_value( BaseResources.get_database_secret_arn_output_export_name(envs)) self.job_processing_queues = components.data_processing_queues self.vpc = base_resources.vpc self.db = base_resources.db self.app_bucket = Bucket(self, "App", versioned=True) if self.app_bucket.bucket_arn: CfnOutput( self, id="AppBucketOutput", export_name=self.get_app_bucket_arn_output_export_name(envs), value=self.app_bucket.bucket_arn, ) self.pages_bucket = Bucket(self, "Pages", public_read_access=True) self.domain_name = StringParameter.from_string_parameter_name( self, "DomainNameParameter", string_parameter_name="/schema-cms-app/DOMAIN_NAME").string_value self.certificate_arn = StringParameter.from_string_parameter_name( self, "CertificateArnParameter", string_parameter_name="/schema-cms-app/CERTIFICATE_ARN" ).string_value django_secret = Secret(self, "DjangoSecretKey", secret_name="SCHEMA_CMS_DJANGO_SECRET_KEY") lambda_auth_token_secret = Secret( self, "LambdaAuthToken", secret_name="SCHEMA_CMS_LAMBDA_AUTH_TOKEN") if lambda_auth_token_secret.secret_arn: CfnOutput( self, id="lambdaAuthTokenArnOutput", export_name=self.get_lambda_auth_token_arn_output_export_name( envs), value=lambda_auth_token_secret.secret_arn, ) self.django_secret_key = EcsSecret.from_secrets_manager(django_secret) self.lambda_auth_token = EcsSecret.from_secrets_manager( lambda_auth_token_secret) tag_from_context = self.node.try_get_context("app_image_tag") tag = tag_from_context if tag_from_context != "undefined" else None api_image = ContainerImage.from_ecr_repository( repository=Repository.from_repository_name( self, id="BackendRepository", repository_name=BaseECR.get_backend_repository_name(envs)), tag=tag, ) nginx_image = ContainerImage.from_ecr_repository( repository=Repository.from_repository_name( self, id="NginxRepository", repository_name=BaseECR.get_nginx_repository_name(envs)), tag=tag, ) self.api = ApplicationLoadBalancedFargateService( self, "ApiService", service_name=f"{envs.project_name}-api-service", cluster=Cluster.from_cluster_attributes( self, id="WorkersCluster", cluster_name="schema-ecs-cluster", vpc=self.vpc, security_groups=[], ), task_image_options=ApplicationLoadBalancedTaskImageOptions( image=nginx_image, container_name="nginx", container_port=80, enable_logging=True, ), desired_count=1, cpu=512, memory_limit_mib=1024, certificate=Certificate.from_certificate_arn( self, "Cert", certificate_arn=self.certificate_arn), domain_name=self.domain_name, domain_zone=PrivateHostedZone( self, "zone", vpc=self.vpc, zone_name=self.domain_name, ), ) self.api.task_definition.add_container( "backend", image=api_image, command=[ "sh", "-c", "/bin/chamber exec $CHAMBER_SERVICE_NAME -- ./scripts/run.sh" ], logging=AwsLogDriver(stream_prefix="backend-container"), environment={ "POSTGRES_DB": envs.data_base_name, "AWS_STORAGE_BUCKET_NAME": self.app_bucket.bucket_name, "AWS_STORAGE_PAGES_BUCKET_NAME": self.pages_bucket.bucket_name, "SQS_WORKER_QUEUE_URL": self.job_processing_queues[0].queue_url, "SQS_WORKER_EXT_QUEUE_URL": self.job_processing_queues[1].queue_url, "SQS_WORKER_MAX_QUEUE_URL": self.job_processing_queues[2].queue_url, "CHAMBER_SERVICE_NAME": "schema-cms-app", "CHAMBER_KMS_KEY_ALIAS": envs.project_name, }, secrets={ "DB_CONNECTION": EcsSecret.from_secrets_manager( Secret.from_secret_arn(self, id="DbSecret", secret_arn=self.db_secret_arn)), "DJANGO_SECRET_KEY": self.django_secret_key, "LAMBDA_AUTH_TOKEN": self.lambda_auth_token, }, cpu=512, memory_limit_mib=1024, ) self.django_secret_key.grant_read( self.api.service.task_definition.task_role) self.app_bucket.grant_read_write( self.api.service.task_definition.task_role) self.pages_bucket.grant_read_write( self.api.service.task_definition.task_role) for queue in self.job_processing_queues: queue.grant_send_messages( self.api.service.task_definition.task_role) self.api.service.connections.allow_to(self.db.connections, Port.tcp(5432)) self.api.task_definition.add_to_task_role_policy( PolicyStatement( actions=["ses:SendRawEmail", "ses:SendBulkTemplatedEmail"], resources=["*"], )) self.api.task_definition.add_to_task_role_policy( PolicyStatement( actions=[ "kms:Get*", "kms:Describe*", "kms:List*", "kms:Decrypt" ], resources=[ Fn.import_value( BaseKMS.get_kms_arn_output_export_name(envs)) ], )) self.api.task_definition.add_to_task_role_policy( PolicyStatement(actions=["ssm:DescribeParameters"], resources=["*"])) self.api.task_definition.add_to_task_role_policy( PolicyStatement( actions=["ssm:GetParameters*"], resources=[ f"arn:aws:ssm:{self.region}:{self.account}:parameter/schema-cms-app/*" ], ))