def __init__(self, scope: Construct, stack_id: str, *, props: StorageTierProps, **kwargs): """ Initializes a new instance of StorageTier :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param props: The properties for the storage tier. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, **kwargs) # The file system to use (e.g. to install Deadline Repository onto). self.file_system = MountableEfs( self, filesystem=FileSystem( self, 'EfsFileSystem', vpc=props.vpc, # TODO - Evaluate this removal policy for your own needs. This is set to DESTROY to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that your data is not accidentally deleted, you should modify this value. removal_policy=RemovalPolicy.DESTROY ) ) # The database to connect Deadline to. self.database: Optional[DatabaseConnection] = None
def get_file_system(scope: Construct) -> FileSystem: config = get_volume_config() stack_name = config.stack_name security_group = SecurityGroup.from_security_group_id( scope, 'nfs_security_group', security_group_id=Fn.import_value(stack_name + 'SecurityGroupId')) return FileSystem.from_file_system_attributes( scope, 'filesystem', file_system_id=Fn.import_value(stack_name + 'FileSystemId'), security_group=security_group)
def __init__(self, scope: Construct, stack_id: str, *, props: StorageTierProps, **kwargs): """ Initializes a new instance of StorageTier :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param props: The properties for the storage tier. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, **kwargs) # The file-system to use (e.g. to install Deadline Repository onto). file_system = FileSystem( self, 'EfsFileSystem', vpc=props.vpc, encrypted=True, # TODO - Evaluate this removal policy for your own needs. This is set to DESTROY to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that your data is not accidentally deleted, you should modify this value. removal_policy=RemovalPolicy.DESTROY) # Create an EFS access point that is used to grant the Repository and RenderQueue with write access to the # Deadline Repository directory in the EFS file-system. access_point = AccessPoint( self, 'AccessPoint', file_system=file_system, # The AccessPoint will create the directory (denoted by the path property below) if it doesn't exist with # the owning UID/GID set as specified here. These should be set up to grant read and write access to the # UID/GID configured in the "poxis_user" property below. create_acl=Acl( owner_uid='10000', owner_gid='10000', permissions='750', ), # When you mount the EFS via the access point, the mount will be rooted at this path in the EFS file-system path='/DeadlineRepository', # TODO - When you mount the EFS via the access point, all file-system operations will be performed using # these UID/GID values instead of those from the user on the system where the EFS is mounted. If you intend # to use the same EFS file-system for other purposes (e.g. render assets, plug-in storage), you may want to # evaluate the UID/GID permissions based on your requirements. posix_user=PosixUser(uid='10000', gid='10000')) self.mountable_file_system = MountableEfs(self, filesystem=file_system, access_point=access_point) # The database to connect Deadline to. self.database: Optional[DatabaseConnection] = None
def do_it(self): vpc = get_vpc(self._stack) nfs_security_group = self.get_nfs_security_group(vpc) file_system = FileSystem( self._stack, self._config.volume_name, vpc=vpc, security_group=nfs_security_group, lifecycle_policy=LifecyclePolicy.AFTER_90_DAYS, ) self._export('FileSystemId', file_system.file_system_id) self._export('SecurityGroupId', nfs_security_group.security_group_id) self._tag_it(file_system) return file_system
def __init__(self, scope: Construct, stack_id: str, *, props: StorageTierProps, **kwargs): """ Initializes a new instance of StorageTier :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param props: The properties for the storage tier. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, **kwargs) # The file-system to use (e.g. to install Deadline Repository onto). file_system = FileSystem( self, 'EfsFileSystem', vpc=props.vpc, encrypted=True, # TODO - Evaluate this removal policy for your own needs. This is set to DESTROY to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that your data is not accidentally deleted, you should modify this value. removal_policy=RemovalPolicy.DESTROY) # Create an EFS access point that is used to grant the Repository and RenderQueue with write access to the # Deadline Repository directory in the EFS file-system. access_point = AccessPoint( self, 'AccessPoint', file_system=file_system, # The AccessPoint will create the directory (denoted by the path property below) if it doesn't exist with # the owning UID/GID set as specified here. These should be set up to grant read and write access to the # UID/GID configured in the "poxis_user" property below. create_acl=Acl( owner_uid='10000', owner_gid='10000', permissions='750', ), # When you mount the EFS via the access point, the mount will be rooted at this path in the EFS file-system path='/DeadlineRepository', # TODO - When you mount the EFS via the access point, all file-system operations will be performed using # these UID/GID values instead of those from the user on the system where the EFS is mounted. If you intend # to use the same EFS file-system for other purposes (e.g. render assets, plug-in storage), you may want to # evaluate the UID/GID permissions based on your requirements. posix_user=PosixUser(uid='10000', gid='10000')) self.mountable_file_system = MountableEfs( self, filesystem=file_system, access_point=access_point, # We have enable_local_file_caching set to True on the RenderQueue in the # Service Tier. EFS requires the 'fsc' mount option to take advantage of # that. extra_mount_options=['fsc']) # The database to connect Deadline to. self.database: Optional[DatabaseConnection] = None # The Amazon EFS filesystem deployed above has been deployed in bursting throughput # mode. This means that it can burst throughput up to 100 MiB/s (with reads counting as # 1/3 of their actual throughput for this purpose). However, the baseline throughput of the EFS # is 50 KiB/s per 1 GiB stored in the filesystem and exceeding this throughput consumes burst credits. # An EFS starts with a large amount of burst credits, and regains credits when throughput is below # the baseline throughput threshold. # # The Deadline Repository is approximately 1 GiB in size; resulting in 50 KiB/s baseline throughput, which is # not sufficient for the operation of Deadline. # # The following: # 1) Sets up a series of AWS CloudWatch Alarms that will send you an email to alert you to take action # to increase the data stored in the filesystem when the burst credits have decreased below certain thresholds. # If you run out of burst credits on the filesystem, then Deadline will start timing-out on requests and your # render farm may become unstable. # 2) Uses RFDK's PadEfsStorage construct to add data to the EFS for the purpose of increasing the amount # of stored data to increase the baseline throughput. # # See: https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html # for more information on AWS CloudWatch Alarms. # See: https://docs.aws.amazon.com/efs/latest/ug/performance.html#throughput-modes # for more information on Amazon EFS throughput modes. if props.alarm_email: self.add_low_efs_burst_credit_alarms(file_system, props.alarm_email) # Add padding files to the filesystem to increase baseline throughput. We add files to the filesystem to # increase this baseline throughput, while retaining the ability to burst throughput. See RFDK's PadEfsStorage # documentation for additional details. pad_access_point = AccessPoint( self, 'PaddingAccessPoint', file_system=file_system, path='/RFDK_PaddingFiles', # TODO - We set the padding files to be owned by root (uid/gid = 0) by default. You may wish to change this. create_acl=Acl( owner_gid='0', owner_uid='0', permissions='700', ), posix_user=PosixUser( uid='0', gid='0', ), ) PadEfsStorage( self, 'PadEfsStorage', vpc=props.vpc, access_point=pad_access_point, desired_padding=Size.gibibytes( 40 ), # Provides 2 MiB/s of baseline throughput. Costs $12/month. )
def __init__( self, scope: Construct, id: str, *, deployment: Deployment, policy: Policy, cluster: ICluster, vpc: IVpc, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) Tags.of(self).add("Application", self.application_name) Tags.of(self).add("Deployment", deployment.value) policy.add_stack(self) efs_cache = FileSystem( self, "WikiCacheEFS", vpc=vpc, ) efs_cache.connections.allow_default_port_from(cluster) if deployment == Deployment.PRODUCTION: desired_count = 1 # Currently this pod is stateful, and as such cannot be run more than once priority = 80 memory = 384 github_url = "[email protected]:OpenTTD/wiki-data.git" github_history_url = "https://github.com/OpenTTD/wiki-data" frontend_url = "https://wiki.openttd.org" else: desired_count = 1 priority = 180 memory = 128 github_url = "[email protected]:OpenTTD/wiki-data-staging.git" github_history_url = "https://github.com/OpenTTD/wiki-data-staging" frontend_url = "https://wiki.staging.openttd.org" sentry_dsn = parameter_store.add_secure_string(f"/Wiki/{deployment.value}/SentryDSN").parameter user_github_client_id = parameter_store.add_secure_string(f"/Wiki/{deployment.value}/UserGithubClientId").parameter user_github_client_secret = parameter_store.add_secure_string(f"/Wiki/{deployment.value}/UserGithubClientSecret").parameter storage_github_private_key = parameter_store.add_secure_string(f"/Wiki/{deployment.value}/StorageGithubPrivateKey").parameter reload_secret = parameter_store.add_secure_string(f"/Wiki/{deployment.value}/ReloadSecret").parameter self.container = ECSHTTPSContainer( self, self.application_name, subdomain_name=self.subdomain_name, deployment=deployment, policy=policy, application_name=self.application_name, image_name="ghcr.io/truebrain/truewiki", port=80, memory_limit_mib=memory, desired_count=desired_count, cluster=cluster, priority=priority, command=[ "--storage", "github", "--storage-github-url", github_url, "--storage-github-history-url", github_history_url, "--storage-folder", "/data", "--user", "github", "--frontend-url", frontend_url, "--cache-metadata-file", "/cache/metadata.json", "--cache-page-folder", "/cache-pages", "--bind", "0.0.0.0", ], environment={ "TRUEWIKI_SENTRY_ENVIRONMENT": deployment.value.lower(), }, secrets={ "TRUEWIKI_SENTRY_DSN": Secret.from_ssm_parameter(sentry_dsn), "TRUEWIKI_USER_GITHUB_CLIENT_ID": Secret.from_ssm_parameter(user_github_client_id), "TRUEWIKI_USER_GITHUB_CLIENT_SECRET": Secret.from_ssm_parameter(user_github_client_secret), "TRUEWIKI_STORAGE_GITHUB_PRIVATE_KEY": Secret.from_ssm_parameter(storage_github_private_key), "TRUEWIKI_RELOAD_SECRET": Secret.from_ssm_parameter(reload_secret), }, volumes={ "/cache": Volume( name="cache", efs_volume_configuration=EfsVolumeConfiguration( file_system_id=efs_cache.file_system_id, ), ), }, )
def __init__( self, scope: Construct, id: str, *, deployment: Deployment, policy: Policy, cluster: ICluster, vpc: IVpc, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) Tags.of(self).add("Application", self.application_name) Tags.of(self).add("Deployment", deployment.value) policy.add_stack(self) efs = FileSystem( self, "EintsEFS", vpc=vpc, ) efs.connections.allow_default_port_from(cluster) if deployment == Deployment.PRODUCTION: desired_count = 1 # Currently this pod is stateful, and as such cannot be run more than once priority = 70 memory = 512 else: desired_count = 1 priority = 170 memory = 128 github_org_api_token = parameter_store.add_secure_string( f"/Eints/{deployment.value}/GithubOrgApiToken").parameter github_oauth2_client_id = parameter_store.add_secure_string( f"/Eints/{deployment.value}/GithubOauth2ClientId").parameter github_oauth2_client_secret = parameter_store.add_secure_string( f"/Eints/{deployment.value}/GithubOauth2ClientSecret").parameter translators_password = parameter_store.add_secure_string( f"/Eints/{deployment.value}/TranslatorsPassword").parameter sentry_dsn = parameter_store.add_secure_string( f"/Eints/{deployment.value}/SentryDSN").parameter ECSHTTPSContainer( self, self.application_name, subdomain_name=self.subdomain_name, deployment=deployment, policy=policy, application_name=self.application_name, image_name="ghcr.io/openttd/eints-openttd-github", port=80, memory_limit_mib=memory, desired_count=desired_count, cluster=cluster, priority=priority, command=[ "--server-host", "0.0.0.0", "--server-port", "80", "--server-mode", "production", "--authentication", "github", "--stable-languages", "stable_languages", "--unstable-languages", "unstable_languages", "--project-cache", "1", "--project-types", "openttd", "--storage-format", "split-languages", "--data-format", "json", "--language-file-size", "10000000", "--num-backup-files", "1", "--max-num-changes", "5", "--min-num-changes", "2", "--change-stable-age", "600", "--github-organization", "OpenTTD", ], environment={ "EINTS_SENTRY_ENVIRONMENT": deployment.value.lower(), }, secrets={ "EINTS_GITHUB_ORG_API_TOKEN": Secret.from_ssm_parameter(github_org_api_token), "EINTS_GITHUB_OAUTH2_CLIENT_ID": Secret.from_ssm_parameter(github_oauth2_client_id), "EINTS_GITHUB_OAUTH2_CLIENT_SECRET": Secret.from_ssm_parameter(github_oauth2_client_secret), "EINTS_TRANSLATORS_PASSWORD": Secret.from_ssm_parameter(translators_password), "EINTS_SENTRY_DSN": Secret.from_ssm_parameter(sentry_dsn), }, volumes={ "/data": Volume( name="data", efs_volume_configuration=EfsVolumeConfiguration( file_system_id=efs.file_system_id, ), ) }, )
def __init__( self, scope: Construct, id: str, *, deployment: Deployment, policy: Policy, cluster: ICluster, vpc: IVpc, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) Tags.of(self).add("Application", self.application_name) Tags.of(self).add("Deployment", deployment.value) policy.add_stack(self) efs_seen = FileSystem( self, "DorpsGekSeenEFS", vpc=vpc, ) efs_seen.connections.allow_default_port_from(cluster) efs_logs = FileSystem( self, "DorpsGekLogsEFS", vpc=vpc, ) efs_logs.connections.allow_default_port_from(cluster) if deployment == Deployment.PRODUCTION: desired_count = 1 priority = 30 addressed_by = "@" irc_username = "******" channels = [ "--channel", "dorpsgek", "--channel", "openttd,public", "--channel", "openttd.dev,public", "--channel", "openttd.notice", "--channel", "openttd.tgp", "--channel", "opendune,public", ] else: desired_count = 1 priority = 130 addressed_by = "%" irc_username = "******" channels = [ "--channel", "dorpsgek", "--channel", "dorpsgek-test,public", ] sentry_dsn = parameter_store.add_secure_string( f"/Dorpsgek/{deployment.value}/SentryDSN").parameter github_app_id = parameter_store.add_secure_string( f"/Dorpsgek/{deployment.value}/GithubAppId").parameter github_app_private_key = parameter_store.add_secure_string( f"/Dorpsgek/{deployment.value}/GithubAppPrivateKey").parameter github_app_secret = parameter_store.add_secure_string( f"/Dorpsgek/{deployment.value}/GithubAppSecret").parameter nickserv_password = parameter_store.add_secure_string( f"/Dorpsgek/{deployment.value}/NickservPassword").parameter ECSHTTPSContainer( self, self.application_name, subdomain_name=self.subdomain_name, deployment=deployment, policy=policy, application_name=self.application_name, image_name="ghcr.io/openttd/dorpsgek", port=80, memory_limit_mib=96, desired_count=desired_count, cluster=cluster, priority=priority, command=[ "--irc-username", irc_username, "--nickserv-username", irc_username, "--addressed-by", addressed_by, ] + channels, environment={ "DORPSGEK_SENTRY_ENVIRONMENT": deployment.value.lower(), }, secrets={ "DORPSGEK_SENTRY_DSN": Secret.from_ssm_parameter(sentry_dsn), "DORPSGEK_GITHUB_APP_ID": Secret.from_ssm_parameter(github_app_id), "DORPSGEK_GITHUB_APP_PRIVATE_KEY": Secret.from_ssm_parameter(github_app_private_key), "DORPSGEK_GITHUB_APP_SECRET": Secret.from_ssm_parameter(github_app_secret), "DORPSGEK_NICKSERV_PASSWORD": Secret.from_ssm_parameter(nickserv_password), }, volumes={ "/code/data": Volume( name="data", efs_volume_configuration=EfsVolumeConfiguration( file_system_id=efs_seen.file_system_id, ), ), "/code/logs/ChannelLogger": Volume( name="logs", efs_volume_configuration=EfsVolumeConfiguration( file_system_id=efs_logs.file_system_id, ), ), }, )