def create_vpc(self, scope: BaseApp, eks_enabled: bool = True) -> Vpc: vpc = Vpc( self, scope.prefixed_str( scope.environment_config.get('vpc', {}).get('name')), cidr=scope.environment_config.get('vpc', {}).get('cidr'), max_azs=scope.environment_config.get('vpc', {}).get('maxAZs'), enable_dns_hostnames=True, enable_dns_support=True, subnet_configuration=self._get_subnet_configuration(scope), ) if eks_enabled: for subnet in vpc.public_subnets: Tag.add(subnet, "kubernetes.io/role/elb", "1") Tag.add( subnet, f"kubernetes.io/cluster/{scope.prefixed_str(scope.environment_config.get('eks', {}).get('clusterName'))}", "shared") for subnet in vpc.private_subnets: Tag.add(subnet, "kubernetes.io/role/internal-elb", "1") Tag.add( subnet, f"kubernetes.io/cluster/{scope.prefixed_str(scope.environment_config.get('eks', {}).get('clusterName'))}", "shared") if scope.environment_config.get('vpc', {}).get('bastionHost', {}).get('enabled'): BastionHostLinux(self, scope.prefixed_str('BastionHost'), vpc=vpc, instance_type=InstanceType( scope.environment_config.get('vpc', {}).get( 'bastionHost', {}).get('instanceType')), instance_name=scope.prefixed_str('BastionHost')) return vpc
def __init__(self, scope: Construct, stack_id: str, *, props: ServiceTierProps, **kwargs): """ Initialize a new instance of ServiceTier :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param props: The properties for this construct. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, **kwargs) # Bastion instance for convenience (e.g. SSH into RenderQueue and WorkerFleet instances). # Not a critical component of the farm, so this can be safely removed. An alternative way # to access your hosts is also provided by the Session Manager, which is also configured # later in this example. self.bastion = BastionHostLinux( self, 'Bastion', vpc=props.vpc, subnet_selection=SubnetSelection( subnet_group_name=subnets.PUBLIC.name), block_devices=[ BlockDevice(device_name='/dev/xvda', volume=BlockDeviceVolume.ebs(50, encrypted=True)) ]) # Mounting the root of the EFS file-system to the bastion access for convenience. # This can safely be removed. MountableEfs(self, filesystem=props.mountable_file_system.file_system ).mount_to_linux_instance(self.bastion.instance, location='/mnt/efs') self.version = VersionQuery(self, 'Version', version=props.deadline_version) secrets_management_settings = SecretsManagementProps( enabled=props.enable_secrets_management) if props.enable_secrets_management and props.secrets_management_secret_arn is not None: secrets_management_settings[ "credentials"] = Secret.from_secret_arn( self, 'SMAdminUser', props.secrets_management_secret_arn) repository = Repository( self, 'Repository', vpc=props.vpc, vpc_subnets=SubnetSelection( subnet_group_name=subnets.INFRASTRUCTURE.name), database=props.database, file_system=props.mountable_file_system, repository_installation_timeout=Duration.minutes(20), repository_installation_prefix='/', version=self.version, secrets_management_settings=secrets_management_settings) images = ThinkboxDockerImages( self, 'Images', version=self.version, user_aws_thinkbox_eula_acceptance=props.accept_aws_thinkbox_eula) server_cert = X509CertificatePem( self, 'RQCert', subject=DistinguishedName( cn=f'renderqueue.{props.dns_zone.zone_name}', o='RFDK-Sample', ou='RenderQueueExternal'), signing_certificate=props.root_ca) self.render_queue = RenderQueue( self, 'RenderQueue', vpc=props.vpc, vpc_subnets=SubnetSelection( subnet_group_name=subnets.INFRASTRUCTURE.name), # It is considered good practice to put the Render Queue's load blanacer in dedicated subnets because: # # 1. Deadline Secrets Management identity registration settings will be scoped down to least-privilege # # (see https://github.com/aws/aws-rfdk/blob/release/packages/aws-rfdk/lib/deadline/README.md#render-queue-subnet-placement) # # 2. The load balancer can scale to use IP addresses in the subnet without conflicts from other AWS # resources # # (see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#subnets-load-balancer) vpc_subnets_alb=SubnetSelection( subnet_group_name=subnets.RENDER_QUEUE_ALB.name), images=images, repository=repository, hostname=RenderQueueHostNameProps(hostname='renderqueue', zone=props.dns_zone), traffic_encryption=RenderQueueTrafficEncryptionProps( external_tls=RenderQueueExternalTLSProps( rfdk_certificate=server_cert), internal_protocol=ApplicationProtocol.HTTPS), version=self.version, # TODO - Evaluate deletion protection for your own needs. This is set to false to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that this resource is not accidentally deleted, you should set this to true. deletion_protection=False, # Enable a local transparent filesystem cache of the Repository filesystem to reduce # data traffic from the Repository's filesystem. # For an EFS and NFS filesystem, this requires the 'fsc' mount option. enable_local_file_caching=True, ) self.render_queue.connections.allow_default_port_from(self.bastion) # This is an optional feature that will set up your EC2 instances to be enabled for use with # the Session Manager. RFDK deploys EC2 instances that aren't available through a public subnet, # so connecting to them by SSH isn't easy. This is an option to quickly access hosts without # using a bastion instance. # It's important to note that the permissions need to be granted to the render queue's ASG, # rather than the render queue itself. SessionManagerHelper.grant_permissions_to(self.render_queue.asg) if props.ubl_licenses: if not props.ubl_certs_secret_arn: raise ValueError( 'UBL certificates secret ARN is required when using UBL but was not specified.' ) ubl_cert_secret = Secret.from_secret_arn( self, 'ublcertssecret', props.ubl_certs_secret_arn) self.ubl_licensing = UsageBasedLicensing( self, 'UsageBasedLicensing', vpc=props.vpc, vpc_subnets=SubnetSelection( subnet_group_name=subnets.USAGE_BASED_LICENSING.name), images=images, licenses=props.ubl_licenses, render_queue=self.render_queue, certificate_secret=ubl_cert_secret, ) # Another optional usage of the SessionManagerHelper that demonstrates how to configure the UBL # construct's ASG for access. Note that this construct also requires you to apply the permissions # to its ASG property. SessionManagerHelper.grant_permissions_to(self.ubl_licensing.asg) else: self.ubl_licensing = None
def __init__(self, scope: Construct, stack_id: str, *, props: ServiceTierProps, **kwargs): """ Initialize a new instance of ServiceTier :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param props: The properties for this construct. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, **kwargs) # Bastion instance for convenience (e.g. SSH into RenderQueue and WorkerFleet instances). # Not a critical component of the farm, so this can be safely removed. An alternative way # to access your hosts is also provided by the Session Manager, which is also configured # later in this example. self.bastion = BastionHostLinux( self, 'Bastion', vpc=props.vpc, subnet_selection=SubnetSelection(subnet_type=SubnetType.PUBLIC), block_devices=[ BlockDevice(device_name='/dev/xvda', volume=BlockDeviceVolume.ebs(50, encrypted=True)) ]) # Granting the bastion access to the file system mount for convenience. # This can also safely be removed. props.file_system.mount_to_linux_instance(self.bastion.instance, location='/mnt/efs') recipes = ThinkboxDockerRecipes(self, 'Image', stage=Stage.from_directory( props.docker_recipes_stage_path)) repository = Repository( self, 'Repository', vpc=props.vpc, database=props.database, file_system=props.file_system, repository_installation_timeout=Duration.minutes(20), version=recipes.version, ) server_cert = X509CertificatePem( self, 'RQCert', subject=DistinguishedName( cn=f'renderqueue.{props.dns_zone.zone_name}', o='RFDK-Sample', ou='RenderQueueExternal'), signing_certificate=props.root_ca) self.render_queue = RenderQueue( self, 'RenderQueue', vpc=props.vpc, images=recipes.render_queue_images, repository=repository, hostname=RenderQueueHostNameProps(hostname='renderqueue', zone=props.dns_zone), traffic_encryption=RenderQueueTrafficEncryptionProps( external_tls=RenderQueueExternalTLSProps( rfdk_certificate=server_cert), internal_protocol=ApplicationProtocol.HTTPS), version=recipes.version, # TODO - Evaluate deletion protection for your own needs. This is set to false to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that this resource is not accidentally deleted, you should set this to true. deletion_protection=False) self.render_queue.connections.allow_default_port_from(self.bastion) # This is an optional feature that will set up your EC2 instances to be enabled for use with # the Session Manager. RFDK deploys EC2 instances that aren't available through a public subnet, # so connecting to them by SSH isn't easy. This is an option to quickly access hosts without # using a bastion instance. # It's important to note that the permissions need to be granted to the render queue's ASG, # rather than the render queue itself. SessionManagerHelper.grant_permissions_to(self.render_queue.asg) if props.ubl_licenses: if not props.ubl_certs_secret_arn: raise ValueError( 'UBL certificates secret ARN is required when using UBL but was not specified.' ) ubl_cert_secret = Secret.from_secret_arn( self, 'ublcertssecret', props.ubl_certs_secret_arn) self.ubl_licensing = UsageBasedLicensing( self, 'usagebasedlicensing', vpc=props.vpc, images=recipes.ubl_images, licenses=props.ubl_licenses, render_queue=self.render_queue, certificate_secret=ubl_cert_secret, ) # Another optional usage of the SessionManagerHelper that demonstrates how to configure the UBL # construct's ASG for access. Note that this construct also requires you to apply the permissions # to its ASG property. SessionManagerHelper.grant_permissions_to(self.ubl_licensing.asg) else: self.ubl_licensing = None
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # Create two VPCs - one to host our private website, the other to act as a client website_vpc = Vpc( self, "WEBSITEVPC", cidr="10.0.0.0/16", ) client_vpc = Vpc( self, "ClientVPC", cidr="10.1.0.0/16", ) # Create a bastion host in the client API which will act like our client workstation bastion = BastionHostLinux( self, "WEBClient", vpc=client_vpc, instance_name='my-bastion', instance_type=InstanceType('t3.micro'), machine_image=AmazonLinuxImage(), subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE), security_group=SecurityGroup( scope=self, id='bastion-sg', security_group_name='bastion-sg', description= 'Security group for the bastion, no inbound open because we should access' ' to the bastion via AWS SSM', vpc=client_vpc, allow_all_outbound=True)) # Set up a VPC peering connection between client and API VPCs, and adjust # the routing table to allow connections back and forth VpcPeeringHelper(self, 'Peering', website_vpc, client_vpc) # Create VPC endpoints for API gateway vpc_endpoint = InterfaceVpcEndpoint( self, 'APIGWVpcEndpoint', vpc=website_vpc, service=InterfaceVpcEndpointAwsService.APIGATEWAY, private_dns_enabled=True, ) vpc_endpoint.connections.allow_from(bastion, Port.tcp(443)) endpoint_id = vpc_endpoint.vpc_endpoint_id api_policy = iam.PolicyDocument(statements=[ iam.PolicyStatement(principals=[iam.AnyPrincipal()], actions=['execute-api:Invoke'], resources=['execute-api:/*'], effect=iam.Effect.DENY, conditions={ "StringNotEquals": { "aws:SourceVpce": endpoint_id } }), iam.PolicyStatement(principals=[iam.AnyPrincipal()], actions=['execute-api:Invoke'], resources=['execute-api:/*'], effect=iam.Effect.ALLOW) ]) # Create an s3 bucket to hold the content content_bucket = s3.Bucket(self, "ContentBucket", removal_policy=core.RemovalPolicy.DESTROY) # Upload our static content to the bucket s3dep.BucketDeployment(self, "DeployWithInvalidation", sources=[s3dep.Source.asset('website')], destination_bucket=content_bucket) # Create a private API GW in the API VPC api = apigw.RestApi(self, 'PrivateS3Api', endpoint_configuration=apigw.EndpointConfiguration( types=[apigw.EndpointType.PRIVATE], vpc_endpoints=[vpc_endpoint]), policy=api_policy) # Create a role to allow API GW to access our S3 bucket contents role = iam.Role( self, "Role", assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com")) role.add_to_policy( iam.PolicyStatement(effect=iam.Effect.ALLOW, resources=[ content_bucket.bucket_arn, content_bucket.bucket_arn + '/*' ], actions=["s3:Get*"])) # Create a proxy resource that captures all non-root resource requests resource = api.root.add_resource("{proxy+}") # Create an integration with S3 resource_integration = apigw.Integration( type=apigw.IntegrationType.AWS, integration_http_method='GET', options=apigw.IntegrationOptions( request_parameters= { # map the proxy parameter so we can pass the request path "integration.request.path.proxy": "method.request.path.proxy" }, integration_responses=[ apigw.IntegrationResponse( status_code='200', response_parameters= { # map the content type of the S3 object back to the HTTP response "method.response.header.Content-Type": "integration.response.header.Content-Type" }) ], credentials_role=role), # reference the bucket content we want to retrieve uri='arn:aws:apigateway:eu-west-1:s3:path/%s/{proxy}' % (content_bucket.bucket_name)) # handle the GET request and map it to our new integration resource.add_method( "GET", resource_integration, method_responses=[ apigw.MethodResponse(status_code='200', response_parameters={ "method.response.header.Content-Type": False }) ], request_parameters={"method.request.path.proxy": True}) # Handle requests to the root of our site # Create another integration with S3 - this time with no proxy parameter resource_integration = apigw.Integration( type=apigw.IntegrationType.AWS, integration_http_method='GET', options=apigw.IntegrationOptions( integration_responses=[ apigw.IntegrationResponse( status_code='200', response_parameters= { # map the content type of the S3 object back to the HTTP response "method.response.header.Content-Type": "integration.response.header.Content-Type" }) ], credentials_role=role), # reference the bucket content we want to retrieve uri='arn:aws:apigateway:eu-west-1:s3:path/%s/index.html' % (content_bucket.bucket_name)) # handle the GET request and map it to our new integration api.root.add_method("GET", resource_integration, method_responses=[ apigw.MethodResponse( status_code='200', response_parameters={ "method.response.header.Content-Type": False }) ])
def __init__(self, scope: Construct, stack_id: str, *, props: ServiceTierProps, **kwargs): """ Initialize a new instance of ServiceTier :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param props: The properties for this construct. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, **kwargs) # A bastion host to connect to the render farm with. # The bastion host is for convenience (e.g. SSH into RenderQueue and WorkerFleet instances). # This is not a critical component of the farm, so can safely be removed. self.bastion = BastionHostLinux( self, 'Bastion', vpc=props.vpc, subnet_selection=SubnetSelection(subnet_type=SubnetType.PUBLIC), block_devices=[ BlockDevice(device_name='/dev/xvda', volume=BlockDeviceVolume.ebs(50, encrypted=True)) ]) # Granting the bastion access to the file system mount for convenience. # This can also safely be removed. props.file_system.mount_to_linux_instance(self.bastion.instance, location='/mnt/efs') recipes = ThinkboxDockerRecipes(self, 'Image', stage=Stage.from_directory( props.docker_recipes_stage_path)) repository = Repository( self, 'Repository', vpc=props.vpc, version=recipes.version, database=props.database, file_system=props.file_system, repository_installation_timeout=Duration.minutes(20)) server_cert = X509CertificatePem( self, 'RQCert', subject=DistinguishedName( cn=f'renderqueue.{props.dns_zone.zone_name}', o='RFDK-Sample', ou='RenderQueueExternal'), signing_certificate=props.root_ca) self.render_queue = RenderQueue( self, 'RenderQueue', vpc=props.vpc, version=recipes.version, images=recipes.render_queue_images, repository=repository, hostname=RenderQueueHostNameProps(hostname='renderqueue', zone=props.dns_zone), traffic_encryption=RenderQueueTrafficEncryptionProps( external_tls=RenderQueueExternalTLSProps( rfdk_certificate=server_cert), internal_protocol=ApplicationProtocol.HTTPS), # TODO - Evaluate deletion protection for your own needs. This is set to false to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that this resource is not accidentally deleted, you should set this to true. deletion_protection=False) self.render_queue.connections.allow_default_port_from(self.bastion) if props.ubl_licenses: if not props.ubl_certs_secret_arn: raise ValueError( 'UBL certificates secret ARN is required when using UBL but was not specified.' ) ubl_cert_secret = Secret.from_secret_arn( self, 'ublcertssecret', props.ubl_certs_secret_arn) self.ubl_licensing = UsageBasedLicensing( self, 'usagebasedlicensing', vpc=props.vpc, images=recipes.ubl_images, licenses=props.ubl_licenses, render_queue=self.render_queue, certificate_secret=ubl_cert_secret, )