def __get_block_device_list(self): block_device_list = [ BlockDevice(device_name="/dev/sda1", volume=BlockDeviceVolume.ebs(self.__storage_size)), BlockDevice(device_name="/dev/sdb", volume=BlockDeviceVolume.ebs(self.__storage_size)) ] return block_device_list
def __init__(self, scope: Construct, stack_id: str, *, props: ServiceTierProps, **kwargs): """ Initialize a new instance of ServiceTier :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param props: The properties for this construct. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, **kwargs) # Bastion instance for convenience (e.g. SSH into RenderQueue and WorkerFleet instances). # Not a critical component of the farm, so this can be safely removed. An alternative way # to access your hosts is also provided by the Session Manager, which is also configured # later in this example. self.bastion = BastionHostLinux( self, 'Bastion', vpc=props.vpc, subnet_selection=SubnetSelection( subnet_group_name=subnets.PUBLIC.name), block_devices=[ BlockDevice(device_name='/dev/xvda', volume=BlockDeviceVolume.ebs(50, encrypted=True)) ]) # Mounting the root of the EFS file-system to the bastion access for convenience. # This can safely be removed. MountableEfs(self, filesystem=props.mountable_file_system.file_system ).mount_to_linux_instance(self.bastion.instance, location='/mnt/efs') self.version = VersionQuery(self, 'Version', version=props.deadline_version) secrets_management_settings = SecretsManagementProps( enabled=props.enable_secrets_management) if props.enable_secrets_management and props.secrets_management_secret_arn is not None: secrets_management_settings[ "credentials"] = Secret.from_secret_arn( self, 'SMAdminUser', props.secrets_management_secret_arn) repository = Repository( self, 'Repository', vpc=props.vpc, vpc_subnets=SubnetSelection( subnet_group_name=subnets.INFRASTRUCTURE.name), database=props.database, file_system=props.mountable_file_system, repository_installation_timeout=Duration.minutes(20), repository_installation_prefix='/', version=self.version, secrets_management_settings=secrets_management_settings) images = ThinkboxDockerImages( self, 'Images', version=self.version, user_aws_thinkbox_eula_acceptance=props.accept_aws_thinkbox_eula) server_cert = X509CertificatePem( self, 'RQCert', subject=DistinguishedName( cn=f'renderqueue.{props.dns_zone.zone_name}', o='RFDK-Sample', ou='RenderQueueExternal'), signing_certificate=props.root_ca) self.render_queue = RenderQueue( self, 'RenderQueue', vpc=props.vpc, vpc_subnets=SubnetSelection( subnet_group_name=subnets.INFRASTRUCTURE.name), # It is considered good practice to put the Render Queue's load blanacer in dedicated subnets because: # # 1. Deadline Secrets Management identity registration settings will be scoped down to least-privilege # # (see https://github.com/aws/aws-rfdk/blob/release/packages/aws-rfdk/lib/deadline/README.md#render-queue-subnet-placement) # # 2. The load balancer can scale to use IP addresses in the subnet without conflicts from other AWS # resources # # (see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#subnets-load-balancer) vpc_subnets_alb=SubnetSelection( subnet_group_name=subnets.RENDER_QUEUE_ALB.name), images=images, repository=repository, hostname=RenderQueueHostNameProps(hostname='renderqueue', zone=props.dns_zone), traffic_encryption=RenderQueueTrafficEncryptionProps( external_tls=RenderQueueExternalTLSProps( rfdk_certificate=server_cert), internal_protocol=ApplicationProtocol.HTTPS), version=self.version, # TODO - Evaluate deletion protection for your own needs. This is set to false to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that this resource is not accidentally deleted, you should set this to true. deletion_protection=False, # Enable a local transparent filesystem cache of the Repository filesystem to reduce # data traffic from the Repository's filesystem. # For an EFS and NFS filesystem, this requires the 'fsc' mount option. enable_local_file_caching=True, ) self.render_queue.connections.allow_default_port_from(self.bastion) # This is an optional feature that will set up your EC2 instances to be enabled for use with # the Session Manager. RFDK deploys EC2 instances that aren't available through a public subnet, # so connecting to them by SSH isn't easy. This is an option to quickly access hosts without # using a bastion instance. # It's important to note that the permissions need to be granted to the render queue's ASG, # rather than the render queue itself. SessionManagerHelper.grant_permissions_to(self.render_queue.asg) if props.ubl_licenses: if not props.ubl_certs_secret_arn: raise ValueError( 'UBL certificates secret ARN is required when using UBL but was not specified.' ) ubl_cert_secret = Secret.from_secret_arn( self, 'ublcertssecret', props.ubl_certs_secret_arn) self.ubl_licensing = UsageBasedLicensing( self, 'UsageBasedLicensing', vpc=props.vpc, vpc_subnets=SubnetSelection( subnet_group_name=subnets.USAGE_BASED_LICENSING.name), images=images, licenses=props.ubl_licenses, render_queue=self.render_queue, certificate_secret=ubl_cert_secret, ) # Another optional usage of the SessionManagerHelper that demonstrates how to configure the UBL # construct's ASG for access. Note that this construct also requires you to apply the permissions # to its ASG property. SessionManagerHelper.grant_permissions_to(self.ubl_licensing.asg) else: self.ubl_licensing = None
def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) self.neo4j_user_secret = Secret(self,'secretsmanager-secret-neo4j-user', secret_name=NEO4J_USER_SECRET_NAME ) neo4j_server_instance_role = Role(self,'iam-role-neo4j-server-instance', assumed_by=ServicePrincipal('ec2.amazonaws.com'), managed_policies=[ ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'), # Use SSM Session Manager rather than straight ssh ManagedPolicy.from_aws_managed_policy_name('CloudWatchAgentServerPolicy') ], inline_policies={ "work-with-tags": PolicyDocument( statements=[ PolicyStatement( actions=[ 'ec2:CreateTags', 'ec2:Describe*', 'elasticloadbalancing:Describe*', 'cloudwatch:ListMetrics', 'cloudwatch:GetMetricStatistics', 'cloudwatch:Describe*', 'autoscaling:Describe*', ], resources=["*"] ) ] ), "access-neo4j-user-secret": PolicyDocument( statements=[ PolicyStatement( actions=['secretsmanager:GetSecretValue'], resources=[self.neo4j_user_secret.secret_arn] ) ] ) } ) instance_security_group = SecurityGroup(self, "ec2-sg-neo4j-server-instance", description="Altimeter Neo4j Server Instance", vpc=vpc ) instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7687), 'Bolt from ANYWHERE') # TESTING # instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7473), 'Bolt from ANYWHERE') # TESTING # Prepare userdata script with open("./resources/neo4j-server-instance-userdata.sh", "r") as f: userdata_content = f.read() userdata_content = userdata_content.replace("[[NEO4J_USER_SECRET_NAME]]",NEO4J_USER_SECRET_NAME) user_data = UserData.for_linux() user_data.add_commands(userdata_content) instance_type = InstanceType.of(InstanceClass.BURSTABLE2, InstanceSize.MEDIUM) self.instance = Instance(self, 'ec2-instance-neo4j-server-instance', instance_name="instance-altimeter--neo4j-community-standalone-server", machine_image=MachineImage.generic_linux( ami_map={ "eu-west-1": "ami-00c8631d384ad7c53" } ), instance_type=instance_type, role=neo4j_server_instance_role, vpc=vpc, # vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets), vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Public').subnets), security_group=instance_security_group, user_data=user_data, block_devices=[ BlockDevice( device_name="/dev/sda1", volume=BlockDeviceVolume.ebs( volume_size=10, volume_type=EbsDeviceVolumeType.GP2, encrypted=True, # Just encrypt delete_on_termination=True ) ), BlockDevice( device_name="/dev/sdb", volume=BlockDeviceVolume.ebs( volume_size=20, # TODO: Check size requirements volume_type=EbsDeviceVolumeType.GP2, encrypted=True, delete_on_termination=True # ASSUMPTION: NO 'primary' data, only altimeter results. ) ) ] ) cdk.Tags.of(self.instance).add("neo4j_mode", "SINGLE") cdk.Tags.of(self.instance).add("dbms_mode", "SINGLE")
def __init__(self, scope: Construct, stack_id: str, *, props: ServiceTierProps, **kwargs): """ Initialize a new instance of ServiceTier :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param props: The properties for this construct. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, **kwargs) # Bastion instance for convenience (e.g. SSH into RenderQueue and WorkerFleet instances). # Not a critical component of the farm, so this can be safely removed. An alternative way # to access your hosts is also provided by the Session Manager, which is also configured # later in this example. self.bastion = BastionHostLinux( self, 'Bastion', vpc=props.vpc, subnet_selection=SubnetSelection(subnet_type=SubnetType.PUBLIC), block_devices=[ BlockDevice(device_name='/dev/xvda', volume=BlockDeviceVolume.ebs(50, encrypted=True)) ]) # Granting the bastion access to the file system mount for convenience. # This can also safely be removed. props.file_system.mount_to_linux_instance(self.bastion.instance, location='/mnt/efs') recipes = ThinkboxDockerRecipes(self, 'Image', stage=Stage.from_directory( props.docker_recipes_stage_path)) repository = Repository( self, 'Repository', vpc=props.vpc, database=props.database, file_system=props.file_system, repository_installation_timeout=Duration.minutes(20), version=recipes.version, ) server_cert = X509CertificatePem( self, 'RQCert', subject=DistinguishedName( cn=f'renderqueue.{props.dns_zone.zone_name}', o='RFDK-Sample', ou='RenderQueueExternal'), signing_certificate=props.root_ca) self.render_queue = RenderQueue( self, 'RenderQueue', vpc=props.vpc, images=recipes.render_queue_images, repository=repository, hostname=RenderQueueHostNameProps(hostname='renderqueue', zone=props.dns_zone), traffic_encryption=RenderQueueTrafficEncryptionProps( external_tls=RenderQueueExternalTLSProps( rfdk_certificate=server_cert), internal_protocol=ApplicationProtocol.HTTPS), version=recipes.version, # TODO - Evaluate deletion protection for your own needs. This is set to false to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that this resource is not accidentally deleted, you should set this to true. deletion_protection=False) self.render_queue.connections.allow_default_port_from(self.bastion) # This is an optional feature that will set up your EC2 instances to be enabled for use with # the Session Manager. RFDK deploys EC2 instances that aren't available through a public subnet, # so connecting to them by SSH isn't easy. This is an option to quickly access hosts without # using a bastion instance. # It's important to note that the permissions need to be granted to the render queue's ASG, # rather than the render queue itself. SessionManagerHelper.grant_permissions_to(self.render_queue.asg) if props.ubl_licenses: if not props.ubl_certs_secret_arn: raise ValueError( 'UBL certificates secret ARN is required when using UBL but was not specified.' ) ubl_cert_secret = Secret.from_secret_arn( self, 'ublcertssecret', props.ubl_certs_secret_arn) self.ubl_licensing = UsageBasedLicensing( self, 'usagebasedlicensing', vpc=props.vpc, images=recipes.ubl_images, licenses=props.ubl_licenses, render_queue=self.render_queue, certificate_secret=ubl_cert_secret, ) # Another optional usage of the SessionManagerHelper that demonstrates how to configure the UBL # construct's ASG for access. Note that this construct also requires you to apply the permissions # to its ASG property. SessionManagerHelper.grant_permissions_to(self.ubl_licensing.asg) else: self.ubl_licensing = None
def __init__(self, scope: Construct, stack_id: str, *, props: ServiceTierProps, **kwargs): """ Initialize a new instance of ServiceTier :param scope: The scope of this construct. :param stack_id: The ID of this construct. :param props: The properties for this construct. :param kwargs: Any kwargs that need to be passed on to the parent class. """ super().__init__(scope, stack_id, **kwargs) # A bastion host to connect to the render farm with. # The bastion host is for convenience (e.g. SSH into RenderQueue and WorkerFleet instances). # This is not a critical component of the farm, so can safely be removed. self.bastion = BastionHostLinux( self, 'Bastion', vpc=props.vpc, subnet_selection=SubnetSelection(subnet_type=SubnetType.PUBLIC), block_devices=[ BlockDevice(device_name='/dev/xvda', volume=BlockDeviceVolume.ebs(50, encrypted=True)) ]) # Granting the bastion access to the file system mount for convenience. # This can also safely be removed. props.file_system.mount_to_linux_instance(self.bastion.instance, location='/mnt/efs') recipes = ThinkboxDockerRecipes(self, 'Image', stage=Stage.from_directory( props.docker_recipes_stage_path)) repository = Repository( self, 'Repository', vpc=props.vpc, version=recipes.version, database=props.database, file_system=props.file_system, repository_installation_timeout=Duration.minutes(20)) server_cert = X509CertificatePem( self, 'RQCert', subject=DistinguishedName( cn=f'renderqueue.{props.dns_zone.zone_name}', o='RFDK-Sample', ou='RenderQueueExternal'), signing_certificate=props.root_ca) self.render_queue = RenderQueue( self, 'RenderQueue', vpc=props.vpc, version=recipes.version, images=recipes.render_queue_images, repository=repository, hostname=RenderQueueHostNameProps(hostname='renderqueue', zone=props.dns_zone), traffic_encryption=RenderQueueTrafficEncryptionProps( external_tls=RenderQueueExternalTLSProps( rfdk_certificate=server_cert), internal_protocol=ApplicationProtocol.HTTPS), # TODO - Evaluate deletion protection for your own needs. This is set to false to # cleanly remove everything when this stack is destroyed. If you would like to ensure # that this resource is not accidentally deleted, you should set this to true. deletion_protection=False) self.render_queue.connections.allow_default_port_from(self.bastion) if props.ubl_licenses: if not props.ubl_certs_secret_arn: raise ValueError( 'UBL certificates secret ARN is required when using UBL but was not specified.' ) ubl_cert_secret = Secret.from_secret_arn( self, 'ublcertssecret', props.ubl_certs_secret_arn) self.ubl_licensing = UsageBasedLicensing( self, 'usagebasedlicensing', vpc=props.vpc, images=recipes.ubl_images, licenses=props.ubl_licenses, render_queue=self.render_queue, certificate_secret=ubl_cert_secret, )