예제 #1
0
    def __init__(self, scope: Construct, stack_id: str, *,
                 props: ComputeStackProps, **kwargs):
        super().__init__(scope, stack_id, **kwargs)

        region = Stack.of(self).region

        version = VersionQuery(self, 'Version', version=props.deadline_version)

        # Take a Linux image and install Deadline on it to create a new image
        linux_image = DeadlineMachineImage(
            self,
            "LinuxImage",
            props=ImageBuilderProps(
                # We use the linux full version string here as there is no Windows equivalent available on the
                # VersionQuery right now, since it is only exposing Linux installers.
                deadline_version=version.linux_full_version_string(),
                parent_ami=MachineImage.latest_amazon_linux(),
                image_version=props.image_recipe_version))
        # Set up a worker fleet that uses the image we just created
        worker_fleet_linux = WorkerInstanceFleet(
            self,
            "WorkerFleetLinux",
            vpc=props.vpc,
            render_queue=props.render_queue,
            worker_machine_image=MachineImage.generic_linux(
                {region: linux_image.ami_id}))
        worker_fleet_linux.fleet.node.default_child.node.add_dependency(
            linux_image.node.default_child)

        # Take a Windows image and install Deadline on it to create a new image
        windows_image = DeadlineMachineImage(
            self,
            "WindowsImage",
            props=ImageBuilderProps(
                deadline_version=version.linux_full_version_string(),
                parent_ami=MachineImage.latest_windows(
                    WindowsVersion.WINDOWS_SERVER_2019_ENGLISH_CORE_BASE),
                image_version=props.image_recipe_version))
        # Set up a worker fleet that uses the image we just created
        worker_fleet_windows = WorkerInstanceFleet(
            self,
            "WorkerFleetWindows",
            vpc=props.vpc,
            render_queue=props.render_queue,
            worker_machine_image=MachineImage.generic_windows(
                {region: windows_image.ami_id}))
        worker_fleet_windows.fleet.node.default_child.node.add_dependency(
            windows_image.node.default_child)
예제 #2
0
def main():
    # ------------------------------
    # Validate Config Values
    # ------------------------------

    if 'region' in config.deadline_client_linux_ami_map:
        raise ValueError('Deadline Client Linux AMI map is required but was not specified.')

    # ------------------------------
    # Application
    # ------------------------------
    app = App()

    if 'CDK_DEPLOY_ACCOUNT' not in os.environ and 'CDK_DEFAULT_ACCOUNT' not in os.environ:
        raise ValueError('You must define either CDK_DEPLOY_ACCOUNT or CDK_DEFAULT_ACCOUNT in the environment.')
    if 'CDK_DEPLOY_REGION' not in os.environ and 'CDK_DEFAULT_REGION' not in os.environ:
        raise ValueError('You must define either CDK_DEPLOY_REGION or CDK_DEFAULT_REGION in the environment.')
    env = Environment(
        account=os.environ.get('CDK_DEPLOY_ACCOUNT', os.environ.get('CDK_DEFAULT_ACCOUNT')),
        region=os.environ.get('CDK_DEPLOY_REGION', os.environ.get('CDK_DEFAULT_REGION'))
    )

    sep_props = sep_stack.SEPStackProps(
        docker_recipes_stage_path=os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'stage'),
        worker_machine_image=MachineImage.generic_linux(config.deadline_client_linux_ami_map),
        create_resource_tracker_role=config.create_resource_tracker_role,
    )
    service = sep_stack.SEPStack(app, 'SEPStack', props=sep_props, env=env)

    app.synth()
 def __get_instance_without_default_key(self, block_device_list):
     return Instance(scope=self.__scope,
                     id=self.__instance_id,
                     instance_type=InstanceType(self.__instance_type),
                     machine_image=MachineImage().lookup(
                         name=self.__image_id, owners=[self.__image_owner]),
                     vpc=self.__vpc,
                     block_devices=block_device_list,
                     vpc_subnets=self.__selected_subnet)
예제 #4
0
def main():
    # ------------------------------
    # Validate Config Values
    # ------------------------------

    if not config.ubl_certificate_secret_arn and config.ubl_licenses:
        raise ValueError(
            'UBL certificates secret ARN is required when using UBL but was not specified.'
        )

    if not config.ubl_licenses:
        print('No UBL licenses specified. UBL Licensing will not be set up.')

    if not config.key_pair_name:
        print(
            'EC2 key pair name not specified. You will not have SSH access to the render farm.'
        )

    if 'region' in config.deadline_client_linux_ami_map:
        raise ValueError(
            'Deadline Client Linux AMI map is required but was not specified.')

    # ------------------------------
    # Application
    # ------------------------------
    app = App()

    if 'CDK_DEPLOY_ACCOUNT' not in os.environ and 'CDK_DEFAULT_ACCOUNT' not in os.environ:
        raise ValueError(
            'You must define either CDK_DEPLOY_ACCOUNT or CDK_DEFAULT_ACCOUNT in the environment.'
        )
    if 'CDK_DEPLOY_REGION' not in os.environ and 'CDK_DEFAULT_REGION' not in os.environ:
        raise ValueError(
            'You must define either CDK_DEPLOY_REGION or CDK_DEFAULT_REGION in the environment.'
        )
    env = Environment(
        account=os.environ.get('CDK_DEPLOY_ACCOUNT',
                               os.environ.get('CDK_DEFAULT_ACCOUNT')),
        region=os.environ.get('CDK_DEPLOY_REGION',
                              os.environ.get('CDK_DEFAULT_REGION')))

    # ------------------------------
    # Network Tier
    # ------------------------------
    network = network_tier.NetworkTier(app, 'NetworkTier', env=env)

    # ------------------------------
    # Security Tier
    # ------------------------------
    security = security_tier.SecurityTier(app, 'SecurityTier', env=env)

    # ------------------------------
    # Storage Tier
    # ------------------------------
    if config.deploy_mongo_db:
        storage_props = storage_tier.StorageTierMongoDBProps(
            vpc=network.vpc,
            database_instance_type=InstanceType.of(InstanceClass.MEMORY5,
                                                   InstanceSize.LARGE),
            root_ca=security.root_ca,
            dns_zone=network.dns_zone,
            accept_sspl_license=config.accept_sspl_license,
            key_pair_name=config.key_pair_name)
        storage = storage_tier.StorageTierMongoDB(app,
                                                  'StorageTier',
                                                  props=storage_props,
                                                  env=env)
    else:
        storage_props = storage_tier.StorageTierDocDBProps(
            vpc=network.vpc,
            database_instance_type=InstanceType.of(InstanceClass.MEMORY5,
                                                   InstanceSize.LARGE),
        )
        storage = storage_tier.StorageTierDocDB(app,
                                                'StorageTier',
                                                props=storage_props,
                                                env=env)

    # ------------------------------
    # Service Tier
    # ------------------------------
    service_props = service_tier.ServiceTierProps(
        database=storage.database,
        file_system=storage.file_system,
        vpc=network.vpc,
        docker_recipes_stage_path=os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.pardir, 'stage'),
        ubl_certs_secret_arn=config.ubl_certificate_secret_arn,
        ubl_licenses=config.ubl_licenses,
        root_ca=security.root_ca,
        dns_zone=network.dns_zone)
    service = service_tier.ServiceTier(app,
                                       'ServiceTier',
                                       props=service_props,
                                       env=env)

    # ------------------------------
    # Compute Tier
    # ------------------------------
    deadline_client_image = MachineImage.generic_linux(
        config.deadline_client_linux_ami_map)
    compute_props = compute_tier.ComputeTierProps(
        vpc=network.vpc,
        render_queue=service.render_queue,
        worker_machine_image=deadline_client_image,
        key_pair_name=config.key_pair_name,
        usage_based_licensing=service.ubl_licensing,
        licenses=config.ubl_licenses)
    _compute = compute_tier.ComputeTier(app,
                                        'ComputeTier',
                                        props=compute_props,
                                        env=env)

    app.synth()
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        attribute_tagged_group = Group(self, "Flexible Tagged")
        access_project = core.CfnTag(key="access-project", value="elysian")
        access_team = core.CfnTag(key="access-team", value="webdev")
        access_cost_center = core.CfnTag(key="cost-center", value="2600")

        flexible_boundary_policy = CfnManagedPolicy(
            self,
            "FlexiblePermissionBoundary",
            policy_document=json.loads(flexible_policy_permission_boundary),
        )

        CfnUser(
            self,
            "Developer",
            tags=[access_project, access_team, access_cost_center],
            groups=[attribute_tagged_group.group_name],
            permissions_boundary=flexible_boundary_policy.ref,
        )

        # Add AWS managed policy for EC2 Read Only access for the console.
        attribute_tagged_group.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ReadOnlyAccess"
            )
        )

        # Import a json policy and create CloudFormation Managed Policy
        CfnManagedPolicy(
            self,
            "FlexibleAttributePolicy",
            policy_document=json.loads(full_attribute_based_policy),
            groups=[attribute_tagged_group.group_name],
        )

        vpc = Vpc.from_lookup(self, "AttributeTaggedVPC", is_default=True)
        instance_type = InstanceType("t2.micro")
        ami = MachineImage.latest_amazon_linux()

        blocked_instance = Instance(
            self,
            "Blocked Instance",
            machine_image=ami,
            instance_type=instance_type,
            vpc=vpc,
        )
        # Re-use the AMI from t
        image_id = blocked_instance.instance.image_id

        # Can only add tags to CfnInstance as of cdk v1.31
        valid_instance = CfnInstance(
            self,
            "Valid Instance",
            image_id=image_id,
            instance_type="t2.micro",
            tags=[access_project, access_team, access_cost_center],
        )
        # Empty group as it's not need to complete our tests.
        test_security_group = SecurityGroup(
            self, "EmptySecurityGroup", vpc=vpc)

        core.CfnOutput(
            self,
            "BlockedInstance",
            value=blocked_instance.instance_id,
            export_name="elysian-blocked-instance",
        )

        core.CfnOutput(
            self,
            "ValidInstance",
            value=valid_instance.ref,
            export_name="elysian-valid-instance",
        )
        core.CfnOutput(
            self,
            "TestSecurityGroup",
            value=test_security_group.security_group_id,
            export_name="test-elysian-sg",
        )
        core.CfnOutput(
            self, "DefaultAMI", value=image_id, export_name="default-elysian-ami"
        )
예제 #6
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        username_tagged = Group(self, "Username Tagged")

        developer = User(self, "Developer")
        developer.add_to_group(username_tagged)

        # Add AWS managed policy for EC2 Read Only access for the console.
        username_tagged.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ReadOnlyAccess"
            )
        )

        # Import a json policy and create CloudFormation Managed Policy
        CfnManagedPolicy(
            self,
            "UserTaggedPolicy",
            policy_document=json.loads(username_based_policy),
            groups=[username_tagged.group_name],
        )

        vpc = Vpc.from_lookup(self, "UsernameTaggedVPC", is_default=True)
        instance_type = InstanceType("t2.micro")
        ami = MachineImage.latest_amazon_linux()

        blocked_instance = Instance(
            self,
            "Blocked Instance",
            machine_image=ami,
            instance_type=instance_type,
            vpc=vpc,
        )
        # Re-use the AMI from t
        image_id = blocked_instance.instance.image_id

        # Can only add tags to CfnInstance as of 1.31
        dev_username_tag = core.CfnTag(
            key="username", value=developer.user_name)
        valid_instance = CfnInstance(
            self,
            "Valid Instance",
            image_id=image_id,
            instance_type="t2.micro",
            tags=[dev_username_tag],
        )
        # Empty group as it's not need to complete our tests.
        test_security_group = SecurityGroup(
            self, "EmptySecurityGroup", vpc=vpc)

        core.CfnOutput(
            self,
            "BlockedInstance",
            value=blocked_instance.instance_id,
            export_name="username-blocked-instance",
        )

        core.CfnOutput(
            self,
            "ValidInstance",
            value=valid_instance.ref,
            export_name="username-valid-instance",
        )
        core.CfnOutput(
            self,
            "TestSecurityGroup",
            value=test_security_group.security_group_id,
            export_name="test-username-sg",
        )
        core.CfnOutput(
            self, "DefaultAMI", value=image_id, export_name="default-username-ami"
        )
예제 #7
0
def main():
    # ------------------------------
    # Validate Config Values
    # ------------------------------
    if not config.config.key_pair_name:
        print(
            'EC2 key pair name not specified. You will not have SSH access to the render farm.'
        )

    # ------------------------------
    # Application
    # ------------------------------
    app = App()

    if 'CDK_DEPLOY_ACCOUNT' not in os.environ and 'CDK_DEFAULT_ACCOUNT' not in os.environ:
        raise ValueError(
            'You must define either CDK_DEPLOY_ACCOUNT or CDK_DEFAULT_ACCOUNT in the environment.'
        )
    if 'CDK_DEPLOY_REGION' not in os.environ and 'CDK_DEFAULT_REGION' not in os.environ:
        raise ValueError(
            'You must define either CDK_DEPLOY_REGION or CDK_DEFAULT_REGION in the environment.'
        )
    env = Environment(
        account=os.environ.get('CDK_DEPLOY_ACCOUNT',
                               os.environ.get('CDK_DEFAULT_ACCOUNT')),
        region=os.environ.get('CDK_DEPLOY_REGION',
                              os.environ.get('CDK_DEFAULT_REGION')))

    # ------------------------------
    # Network Tier
    # ------------------------------
    network = network_tier.NetworkTier(app, 'NetworkTier', env=env)

    # ------------------------------
    # Security Tier
    # ------------------------------
    security = security_tier.SecurityTier(app, 'SecurityTier', env=env)

    # ------------------------------
    # Service Tier
    # ------------------------------
    service_props = service_tier.ServiceTierProps(
        vpc=network.vpc,
        availability_zones=config.config.availability_zones_standard,
        root_ca=security.root_ca,
        dns_zone=network.dns_zone,
        deadline_version=config.config.deadline_version,
        accept_aws_thinkbox_eula=config.config.accept_aws_thinkbox_eula)
    service = service_tier.ServiceTier(app,
                                       'ServiceTier',
                                       props=service_props,
                                       env=env)

    # ------------------------------
    # Compute Tier
    # ------------------------------
    deadline_client_image = MachineImage.generic_linux(
        config.config.deadline_client_linux_ami_map)
    compute_props = compute_tier.ComputeTierProps(
        vpc=network.vpc,
        availability_zones=config.config.availability_zones_local,
        render_queue=service.render_queue,
        worker_machine_image=deadline_client_image,
        key_pair_name=config.config.key_pair_name,
    )
    _compute = compute_tier.ComputeTier(app,
                                        'ComputeTier',
                                        props=compute_props,
                                        env=env)

    app.synth()
예제 #8
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        cluster: ICluster,
        ecs_security_group: SecurityGroup,
        ecs_source_security_group: SecurityGroup,
        vpc: IVpc,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        global g_nlb

        Tags.of(self).add("Stack", "Common-Nlb")

        # TODO -- You need to do some manual actions:
        # TODO --  1) enable auto-assign IPv6 address on public subnets
        # TODO --  2) add to the Outbound rules of "Live-Common-Nlb/ASG/InstanceSecurityGroup" the destination "::/0"

        self.private_zone = HostedZone.from_lookup(
            self,
            "PrivateZone",
            domain_name="openttd.internal",
            private_zone=True,
        )

        user_data = UserData.for_linux(shebang="#!/bin/bash -ex")

        asset = Asset(self, "NLB", path="user_data/nlb/")
        user_data.add_commands(
            "echo 'Extracting user-data files'",
            "mkdir /nlb",
            "cd /nlb",
        )
        user_data.add_s3_download_command(
            bucket=asset.bucket,
            bucket_key=asset.s3_object_key,
            local_file="/nlb/files.zip",
        )
        user_data.add_commands("unzip files.zip", )

        user_data.add_commands(
            "echo 'Setting up configuration'",
            f"echo '{self.region}' > /etc/.region",
            f"echo '{cluster.cluster_name}' > /etc/.cluster",
        )

        user_data.add_commands(
            "echo 'Installing nginx'",
            "amazon-linux-extras install epel",
            "yum install nginx -y",
            "cp /nlb/nginx.conf /etc/nginx/nginx.conf",
            "mkdir /etc/nginx/nlb.d",
        )

        user_data.add_commands(
            "echo 'Installing Python3'",
            "yum install python3 -y",
            "python3 -m venv /venv",
            "/venv/bin/pip install -r /nlb/requirements.txt",
        )

        user_data.add_commands(
            "echo 'Generating nginx configuration'",
            "cd /etc/nginx/nlb.d",
            "/venv/bin/python /nlb/nginx.py",
            "systemctl start nginx",
        )

        user_data.add_commands(
            "echo 'Setting up SOCKS proxy'",
            "useradd pproxy",
            "cp /nlb/pproxy.service /etc/systemd/system/",
            "systemctl daemon-reload",
            "systemctl enable pproxy.service",
            "systemctl start pproxy.service",
        )

        asg = AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            instance_type=InstanceType("t3a.nano"),
            machine_image=MachineImage.latest_amazon_linux(
                generation=AmazonLinuxGeneration.AMAZON_LINUX_2),
            min_capacity=2,
            vpc_subnets=SubnetSelection(subnet_type=SubnetType.PUBLIC,
                                        one_per_az=True),
            user_data=user_data,
            health_check=HealthCheck.elb(grace=Duration.seconds(0)),
        )
        asg.add_security_group(ecs_security_group)

        asg.role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore"))
        asset.grant_read(asg.role)
        policy = ManagedPolicy(self, "Policy")
        policy_statement = PolicyStatement(
            actions=[
                "ec2:DescribeInstances",
                "ecs:DescribeContainerInstances",
                "ecs:DescribeTasks",
                "ecs:ListContainerInstances",
                "ecs:ListServices",
                "ecs:ListTagsForResource",
                "ecs:ListTasks",
            ],
            resources=["*"],
        )
        policy.add_statements(policy_statement)
        asg.role.add_managed_policy(policy)

        # We could also make an additional security-group and add that to
        # the ASG, but it keeps adding up. This makes it a tiny bit
        # easier to get an overview what traffic is allowed from the
        # console on AWS.
        assert isinstance(asg.node.children[0], SecurityGroup)
        self.security_group = asg.node.children[0]

        listener_https.add_targets(
            subdomain_name=self.admin_subdomain_name,
            port=80,
            target=asg,
            priority=2,
        )

        # Create a Security Group so the lambdas can access the EC2.
        # This is needed to check if the EC2 instance is fully booted.
        lambda_security_group = SecurityGroup(
            self,
            "LambdaSG",
            vpc=vpc,
        )
        self.security_group.add_ingress_rule(
            peer=lambda_security_group,
            connection=Port.tcp(80),
            description="Lambda to target",
        )

        self.security_group.add_ingress_rule(
            peer=ecs_source_security_group,
            connection=Port.udp(8080),
            description="ECS to target",
        )

        self.create_ecs_lambda(
            cluster=cluster,
            auto_scaling_group=asg,
        )

        self.create_asg_lambda(
            lifecycle_transition=LifecycleTransition.INSTANCE_LAUNCHING,
            timeout=Duration.seconds(180),
            vpc=vpc,
            security_group=lambda_security_group,
            auto_scaling_group=asg,
        )
        self.create_asg_lambda(
            lifecycle_transition=LifecycleTransition.INSTANCE_TERMINATING,
            timeout=Duration.seconds(30),
            vpc=vpc,
            security_group=lambda_security_group,
            auto_scaling_group=asg,
        )

        # Initialize the NLB record on localhost, as we need to be able to
        # reference it for other entries to work correctly.
        ARecord(
            self,
            "ARecord",
            target=RecordTarget.from_ip_addresses("127.0.0.1"),
            zone=dns.get_hosted_zone(),
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )
        AaaaRecord(
            self,
            "AAAARecord",
            target=RecordTarget.from_ip_addresses("::1"),
            zone=dns.get_hosted_zone(),
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )
        # To make things a bit easier, also alias to staging.
        self.create_alias(self, "nlb.staging")

        # Create a record for the internal DNS
        ARecord(
            self,
            "APrivateRecord",
            target=RecordTarget.from_ip_addresses("127.0.0.1"),
            zone=self.private_zone,
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )

        if g_nlb is not None:
            raise Exception("Only a single NlbStack instance can exist")
        g_nlb = self
예제 #9
0
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.neo4j_user_secret = Secret(self,'secretsmanager-secret-neo4j-user',
            secret_name=NEO4J_USER_SECRET_NAME
        )
       
        neo4j_server_instance_role = Role(self,'iam-role-neo4j-server-instance',
            assumed_by=ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'), # Use SSM Session Manager rather than straight ssh
                ManagedPolicy.from_aws_managed_policy_name('CloudWatchAgentServerPolicy')
            ],
            inline_policies={
                "work-with-tags": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                'ec2:CreateTags',
                                'ec2:Describe*',
                                'elasticloadbalancing:Describe*',
                                'cloudwatch:ListMetrics',
                                'cloudwatch:GetMetricStatistics',
                                'cloudwatch:Describe*',
                                'autoscaling:Describe*',
                            ],
                            resources=["*"]
                        )
                    ]
                ),
                "access-neo4j-user-secret": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=['secretsmanager:GetSecretValue'],
                            resources=[self.neo4j_user_secret.secret_arn]
                        )
                    ]
                )
            }
        )

        instance_security_group = SecurityGroup(self, "ec2-sg-neo4j-server-instance",
            description="Altimeter Neo4j Server Instance",
            vpc=vpc
        )

        instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7687), 'Bolt from ANYWHERE') # TESTING
        # instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7473), 'Bolt from ANYWHERE') # TESTING

        # Prepare userdata script
        with open("./resources/neo4j-server-instance-userdata.sh", "r") as f:
            userdata_content = f.read()
        userdata_content = userdata_content.replace("[[NEO4J_USER_SECRET_NAME]]",NEO4J_USER_SECRET_NAME)
        user_data = UserData.for_linux()
        user_data.add_commands(userdata_content)

        instance_type = InstanceType.of(InstanceClass.BURSTABLE2, InstanceSize.MEDIUM)

        self.instance = Instance(self, 'ec2-instance-neo4j-server-instance',
            instance_name="instance-altimeter--neo4j-community-standalone-server",
            machine_image=MachineImage.generic_linux(
                ami_map={
                    "eu-west-1": "ami-00c8631d384ad7c53"
                }
            ),
            instance_type=instance_type,
            role=neo4j_server_instance_role,
            vpc=vpc,
            # vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Public').subnets),
            security_group=instance_security_group,
            user_data=user_data,
            block_devices=[
                BlockDevice(
                    device_name="/dev/sda1",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=10,
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True, # Just encrypt
                        delete_on_termination=True
                    )                    
                ),
                BlockDevice(
                    device_name="/dev/sdb",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=20, # TODO: Check size requirements
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True,
                        delete_on_termination=True # ASSUMPTION: NO 'primary' data, only altimeter results.
                    )                    
                )
            ]
        )

        cdk.Tags.of(self.instance).add("neo4j_mode", "SINGLE")
        cdk.Tags.of(self.instance).add("dbms_mode", "SINGLE")