Esempio n. 1
0
    def __init__(self, scope: Construct, id: str, envs: EnvSettings):
        super().__init__(scope, id)

        self.ecr = BaseECR(self, "ECR", envs)

        self.key = BaseKMS(self, "KMS", envs)

        self.vpc = Vpc(self, "Vpc", nat_gateways=1)

        self.cluster = Cluster(self,
                               "WorkersCluster",
                               cluster_name="schema-ecs-cluster",
                               vpc=self.vpc)

        self.db = DatabaseInstance(
            self,
            "DataBase",
            database_name=envs.data_base_name,
            engine=DatabaseInstanceEngine.POSTGRES,
            storage_encrypted=True,
            allocated_storage=50,
            instance_type=InstanceType.of(InstanceClass.BURSTABLE2,
                                          InstanceSize.SMALL),
            vpc=self.vpc,
        )

        if self.db.secret:
            CfnOutput(
                self,
                id="DbSecretOutput",
                export_name=self.get_database_secret_arn_output_export_name(
                    envs),
                value=self.db.secret.secret_arn,
            )
 def __get_instance_without_default_key(self, block_device_list):
     return Instance(scope=self.__scope,
                     id=self.__instance_id,
                     instance_type=InstanceType(self.__instance_type),
                     machine_image=MachineImage().lookup(
                         name=self.__image_id, owners=[self.__image_owner]),
                     vpc=self.__vpc,
                     block_devices=block_device_list,
                     vpc_subnets=self.__selected_subnet)
Esempio n. 3
0
    def add_asg_fleet(self, scope: BaseApp, cluster: Cluster,
                      fleet) -> List[AutoScalingGroup]:
        created_fleets: List[AutoScalingGroup] = []

        node_labels = fleet.get('nodeLabels', {})
        node_labels["fleetName"] = fleet.get('name')
        node_labels_as_str = ','.join(map('='.join, node_labels.items()))

        # Source of tweaks: https://kubedex.com/90-days-of-aws-eks-in-production/
        kubelet_extra_args = ' '.join([
            # Add node labels
            f'--node-labels {node_labels_as_str}'
            if len(node_labels_as_str) else '',

            # Capture resource reservation for kubernetes system daemons like the kubelet, container runtime,
            # node problem detector, etc.
            '--kube-reserved cpu=250m,memory=1Gi,ephemeral-storage=1Gi',

            # Capture resources for vital system functions, such as sshd, udev.
            '--system-reserved cpu=250m,memory=0.2Gi,ephemeral-storage=1Gi',

            # Start evicting pods from this node once these thresholds are crossed.
            '--eviction-hard memory.available<0.2Gi,nodefs.available<10%',
        ])

        cluster_sg = SecurityGroup.from_security_group_id(
            self,
            'eks-cluster-sg',
            security_group_id=cluster.cluster_security_group_id)

        asg_tags = {
            "k8s.io/cluster-autoscaler/enabled": "true",
            f"k8s.io/cluster-autoscaler/{cluster.cluster_name}": "owned",
        }

        # For correctly autoscaling the cluster we need our autoscaling groups to not span across AZs
        # to avoid the AZ Rebalance, hence we create an ASG per subnet
        for counter, subnet in enumerate(cluster.vpc.private_subnets):
            asg: AutoScalingGroup = cluster.add_capacity(
                id=scope.prefixed_str(f'{fleet.get("name")}-{counter}'),
                instance_type=InstanceType(fleet.get('instanceType')),
                min_capacity=fleet.get('autoscaling', {}).get('minInstances'),
                max_capacity=fleet.get('autoscaling', {}).get('maxInstances'),
                bootstrap_options=BootstrapOptions(
                    kubelet_extra_args=kubelet_extra_args, ),
                spot_price=str(fleet.get('spotPrice'))
                if fleet.get('spotPrice') else None,
                vpc_subnets=SubnetSelection(subnets=[subnet]),
            )
            created_fleets.append(asg)
            self._add_userdata_production_tweaks(asg)

            for key, value in asg_tags.items():
                Tag.add(asg, key, value)

        return created_fleets
Esempio n. 4
0
    def __init__(self, scope: Construct, id: str, vpc: IVpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        Tags.of(self).add("Stack", "Common-Ecs")

        self._cluster = Cluster(
            self,
            "Cluster",
            vpc=vpc,
        )

        asg = AutoScalingGroup(
            self,
            "ClusterASG",
            vpc=vpc,
            instance_type=InstanceType("t3a.small"),
            machine_image=EcsOptimizedImage.amazon_linux2(),
            min_capacity=4,
        )
        self._cluster.add_auto_scaling_group(asg)

        # Create a SecurityGroup that the NLB can use to allow traffic from
        # NLB to us. This avoids a cyclic dependency.
        self.security_group = SecurityGroup(
            self,
            "SecurityGroup",
            vpc=vpc,
            allow_all_outbound=False,
        )

        # Only use "source_security_group" to check if flows come from ECS.
        # Do not use it to allow traffic in ECS; use "security_group" for
        # that.
        assert isinstance(asg.node.children[0], SecurityGroup)
        self.source_security_group = asg.node.children[0]

        # We could also make an additional security-group and add that to
        # the ASG, but it keeps adding up. This makes it a tiny bit
        # easier to get an overview what traffic is allowed from the
        # console on AWS.
        asg.node.children[0].add_ingress_rule(
            peer=self.security_group,
            connection=Port.tcp_range(32768, 65535),
            description="NLB-self to target",
        )
        asg.node.children[0].add_ingress_rule(
            peer=self.security_group,
            connection=Port.udp_range(32768, 65535),
            description="NLB-self to target (UDP)",
        )
Esempio n. 5
0
    def add_managed_fleet(self, cluster: Cluster, fleet: dict):
        # To correctly scale the cluster we need our node groups to not span across AZs
        # to avoid the automatic AZ re-balance, hence we create a node group per subnet

        for counter, subnet in enumerate(cluster.vpc.private_subnets):
            fleet_id = f'{fleet.get("name")}-{counter}'
            cluster.add_nodegroup(
                id=fleet_id,
                instance_type=InstanceType(fleet.get('instanceType')),
                min_size=fleet.get('autoscaling', {}).get('minInstances'),
                max_size=fleet.get('autoscaling', {}).get('maxInstances'),
                labels=dict(**fleet.get('nodeLabels', {}),
                            fleetName=fleet.get('name')),
                nodegroup_name=
                f'{fleet.get("name")}-{subnet.availability_zone}',
                subnets=SubnetSelection(subnets=[subnet]),
            )
Esempio n. 6
0
    def __init__(self, scope: Construct, stack_id: str, *,
                 props: ComputeTierProps, **kwargs):
        """
        Initializes a new instance of ComputeTier
        :param scope: The Scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties of this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

        # We can put the health monitor and worker fleet in all of the local zones we're using
        subnets = SubnetSelection(availability_zones=props.availability_zones,
                                  subnet_type=SubnetType.PRIVATE,
                                  one_per_az=True)

        # We can put the health monitor in all of the local zones we're using for the worker fleet
        self.health_monitor = HealthMonitor(self,
                                            'HealthMonitor',
                                            vpc=props.vpc,
                                            vpc_subnets=subnets,
                                            deletion_protection=False)

        self.worker_fleet = WorkerInstanceFleet(
            self,
            'WorkerFleet',
            vpc=props.vpc,
            vpc_subnets=subnets,
            render_queue=props.render_queue,
            # Not all instance types will be available in local zones. For a list of the instance types
            # available in each local zone, you can refer to:
            # https://aws.amazon.com/about-aws/global-infrastructure/localzones/features/#AWS_Services
            # BURSTABLE3 is a T3; the third generation of burstable instances
            instance_type=InstanceType.of(InstanceClass.BURSTABLE3,
                                          InstanceSize.LARGE),
            worker_machine_image=props.worker_machine_image,
            health_monitor=self.health_monitor,
            key_name=props.key_pair_name,
            user_data_provider=UserDataProvider(self, 'UserDataProvider'))
        SessionManagerHelper.grant_permissions_to(self.worker_fleet)
Esempio n. 7
0
    def create_vpc(self, scope: BaseApp, eks_enabled: bool = True) -> Vpc:
        vpc = Vpc(
            self,
            scope.prefixed_str(
                scope.environment_config.get('vpc', {}).get('name')),
            cidr=scope.environment_config.get('vpc', {}).get('cidr'),
            max_azs=scope.environment_config.get('vpc', {}).get('maxAZs'),
            enable_dns_hostnames=True,
            enable_dns_support=True,
            subnet_configuration=self._get_subnet_configuration(scope),
        )

        if eks_enabled:
            for subnet in vpc.public_subnets:
                Tag.add(subnet, "kubernetes.io/role/elb", "1")
                Tag.add(
                    subnet,
                    f"kubernetes.io/cluster/{scope.prefixed_str(scope.environment_config.get('eks', {}).get('clusterName'))}",
                    "shared")
            for subnet in vpc.private_subnets:
                Tag.add(subnet, "kubernetes.io/role/internal-elb", "1")
                Tag.add(
                    subnet,
                    f"kubernetes.io/cluster/{scope.prefixed_str(scope.environment_config.get('eks', {}).get('clusterName'))}",
                    "shared")

        if scope.environment_config.get('vpc', {}).get('bastionHost',
                                                       {}).get('enabled'):
            BastionHostLinux(self,
                             scope.prefixed_str('BastionHost'),
                             vpc=vpc,
                             instance_type=InstanceType(
                                 scope.environment_config.get('vpc', {}).get(
                                     'bastionHost', {}).get('instanceType')),
                             instance_name=scope.prefixed_str('BastionHost'))
        return vpc
Esempio n. 8
0
    def __init__(self, scope: Construct, id: str, vpc: Vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.rds = DatabaseInstance(
            self,
            "rds",
            master_username=USER,
            master_user_password=SecretValue(PASSWORD),
            database_name=DATABASE,
            vpc=vpc.vpc,
            engine=DatabaseInstanceEngine.POSTGRES,
            port=PORT,
            instance_type=InstanceType.of(InstanceClass.BURSTABLE3,
                                          InstanceSize.MICRO),
            security_groups=[vpc.sg_rds],
            deletion_protection=False,
        )

        CfnOutput(
            self,
            "rds_address",
            value=
            f"postgres://{USER}:{PASSWORD}@{self.rds.db_instance_endpoint_address}:{PORT}/{DATABASE}",
        )
Esempio n. 9
0
    def __init__(
        self, scope: Construct, id: str, vpc: Vpc, rds: Rds, batch, **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        cluster = _Cluster(self, "cluster", vpc=vpc.vpc)

        cluster.add_capacity(
            "ag",
            instance_type=InstanceType.of(InstanceClass.STANDARD5, InstanceSize.LARGE),
        )

        shared_airflow_env = {
            "AIRFLOW__CORE__SQL_ALCHEMY_CONN": rds.uri,
            "AIRFLOW__CORE__LOAD_EXAMPLES": "True",
            "AIRFLOW__CORE__EXECUTOR": "LocalExecutor",
        }

        init_db = InitTask(self, "init_db", shared_airflow_env=shared_airflow_env)

        scheduler_service = SchedulerService(
            self,
            "scheduler_service",
            cluster=cluster,
            shared_airflow_env=shared_airflow_env,
        )

        webserver_service = WebserverService(
            self,
            "webserver_service",
            cluster=cluster,
            shared_airflow_env=shared_airflow_env,
            vpc=vpc.vpc,
        )

        CfnOutput(self, "cluster-name", value=cluster.cluster_name)
Esempio n. 10
0
def main():
    # ------------------------------
    # Validate Config Values
    # ------------------------------

    if not config.ubl_certificate_secret_arn and config.ubl_licenses:
        raise ValueError(
            'UBL certificates secret ARN is required when using UBL but was not specified.'
        )

    if not config.ubl_licenses:
        print('No UBL licenses specified. UBL Licensing will not be set up.')

    if not config.key_pair_name:
        print(
            'EC2 key pair name not specified. You will not have SSH access to the render farm.'
        )

    if 'region' in config.deadline_client_linux_ami_map:
        raise ValueError(
            'Deadline Client Linux AMI map is required but was not specified.')

    # ------------------------------
    # Application
    # ------------------------------
    app = App()

    if 'CDK_DEPLOY_ACCOUNT' not in os.environ and 'CDK_DEFAULT_ACCOUNT' not in os.environ:
        raise ValueError(
            'You must define either CDK_DEPLOY_ACCOUNT or CDK_DEFAULT_ACCOUNT in the environment.'
        )
    if 'CDK_DEPLOY_REGION' not in os.environ and 'CDK_DEFAULT_REGION' not in os.environ:
        raise ValueError(
            'You must define either CDK_DEPLOY_REGION or CDK_DEFAULT_REGION in the environment.'
        )
    env = Environment(
        account=os.environ.get('CDK_DEPLOY_ACCOUNT',
                               os.environ.get('CDK_DEFAULT_ACCOUNT')),
        region=os.environ.get('CDK_DEPLOY_REGION',
                              os.environ.get('CDK_DEFAULT_REGION')))

    # ------------------------------
    # Network Tier
    # ------------------------------
    network = network_tier.NetworkTier(app, 'NetworkTier', env=env)

    # ------------------------------
    # Security Tier
    # ------------------------------
    security = security_tier.SecurityTier(app, 'SecurityTier', env=env)

    # ------------------------------
    # Storage Tier
    # ------------------------------
    if config.deploy_mongo_db:
        storage_props = storage_tier.StorageTierMongoDBProps(
            vpc=network.vpc,
            database_instance_type=InstanceType.of(InstanceClass.MEMORY5,
                                                   InstanceSize.LARGE),
            root_ca=security.root_ca,
            dns_zone=network.dns_zone,
            accept_sspl_license=config.accept_sspl_license,
            key_pair_name=config.key_pair_name)
        storage = storage_tier.StorageTierMongoDB(app,
                                                  'StorageTier',
                                                  props=storage_props,
                                                  env=env)
    else:
        storage_props = storage_tier.StorageTierDocDBProps(
            vpc=network.vpc,
            database_instance_type=InstanceType.of(InstanceClass.MEMORY5,
                                                   InstanceSize.LARGE),
        )
        storage = storage_tier.StorageTierDocDB(app,
                                                'StorageTier',
                                                props=storage_props,
                                                env=env)

    # ------------------------------
    # Service Tier
    # ------------------------------
    service_props = service_tier.ServiceTierProps(
        database=storage.database,
        file_system=storage.file_system,
        vpc=network.vpc,
        docker_recipes_stage_path=os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.pardir, 'stage'),
        ubl_certs_secret_arn=config.ubl_certificate_secret_arn,
        ubl_licenses=config.ubl_licenses,
        root_ca=security.root_ca,
        dns_zone=network.dns_zone)
    service = service_tier.ServiceTier(app,
                                       'ServiceTier',
                                       props=service_props,
                                       env=env)

    # ------------------------------
    # Compute Tier
    # ------------------------------
    deadline_client_image = MachineImage.generic_linux(
        config.deadline_client_linux_ami_map)
    compute_props = compute_tier.ComputeTierProps(
        vpc=network.vpc,
        render_queue=service.render_queue,
        worker_machine_image=deadline_client_image,
        key_pair_name=config.key_pair_name,
        usage_based_licensing=service.ubl_licensing,
        licenses=config.ubl_licenses)
    _compute = compute_tier.ComputeTier(app,
                                        'ComputeTier',
                                        props=compute_props,
                                        env=env)

    app.synth()
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        attribute_tagged_group = Group(self, "Flexible Tagged")
        access_project = core.CfnTag(key="access-project", value="elysian")
        access_team = core.CfnTag(key="access-team", value="webdev")
        access_cost_center = core.CfnTag(key="cost-center", value="2600")

        flexible_boundary_policy = CfnManagedPolicy(
            self,
            "FlexiblePermissionBoundary",
            policy_document=json.loads(flexible_policy_permission_boundary),
        )

        CfnUser(
            self,
            "Developer",
            tags=[access_project, access_team, access_cost_center],
            groups=[attribute_tagged_group.group_name],
            permissions_boundary=flexible_boundary_policy.ref,
        )

        # Add AWS managed policy for EC2 Read Only access for the console.
        attribute_tagged_group.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ReadOnlyAccess"
            )
        )

        # Import a json policy and create CloudFormation Managed Policy
        CfnManagedPolicy(
            self,
            "FlexibleAttributePolicy",
            policy_document=json.loads(full_attribute_based_policy),
            groups=[attribute_tagged_group.group_name],
        )

        vpc = Vpc.from_lookup(self, "AttributeTaggedVPC", is_default=True)
        instance_type = InstanceType("t2.micro")
        ami = MachineImage.latest_amazon_linux()

        blocked_instance = Instance(
            self,
            "Blocked Instance",
            machine_image=ami,
            instance_type=instance_type,
            vpc=vpc,
        )
        # Re-use the AMI from t
        image_id = blocked_instance.instance.image_id

        # Can only add tags to CfnInstance as of cdk v1.31
        valid_instance = CfnInstance(
            self,
            "Valid Instance",
            image_id=image_id,
            instance_type="t2.micro",
            tags=[access_project, access_team, access_cost_center],
        )
        # Empty group as it's not need to complete our tests.
        test_security_group = SecurityGroup(
            self, "EmptySecurityGroup", vpc=vpc)

        core.CfnOutput(
            self,
            "BlockedInstance",
            value=blocked_instance.instance_id,
            export_name="elysian-blocked-instance",
        )

        core.CfnOutput(
            self,
            "ValidInstance",
            value=valid_instance.ref,
            export_name="elysian-valid-instance",
        )
        core.CfnOutput(
            self,
            "TestSecurityGroup",
            value=test_security_group.security_group_id,
            export_name="test-elysian-sg",
        )
        core.CfnOutput(
            self, "DefaultAMI", value=image_id, export_name="default-elysian-ami"
        )
Esempio n. 12
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        username_tagged = Group(self, "Username Tagged")

        developer = User(self, "Developer")
        developer.add_to_group(username_tagged)

        # Add AWS managed policy for EC2 Read Only access for the console.
        username_tagged.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                managed_policy_name="AmazonEC2ReadOnlyAccess"
            )
        )

        # Import a json policy and create CloudFormation Managed Policy
        CfnManagedPolicy(
            self,
            "UserTaggedPolicy",
            policy_document=json.loads(username_based_policy),
            groups=[username_tagged.group_name],
        )

        vpc = Vpc.from_lookup(self, "UsernameTaggedVPC", is_default=True)
        instance_type = InstanceType("t2.micro")
        ami = MachineImage.latest_amazon_linux()

        blocked_instance = Instance(
            self,
            "Blocked Instance",
            machine_image=ami,
            instance_type=instance_type,
            vpc=vpc,
        )
        # Re-use the AMI from t
        image_id = blocked_instance.instance.image_id

        # Can only add tags to CfnInstance as of 1.31
        dev_username_tag = core.CfnTag(
            key="username", value=developer.user_name)
        valid_instance = CfnInstance(
            self,
            "Valid Instance",
            image_id=image_id,
            instance_type="t2.micro",
            tags=[dev_username_tag],
        )
        # Empty group as it's not need to complete our tests.
        test_security_group = SecurityGroup(
            self, "EmptySecurityGroup", vpc=vpc)

        core.CfnOutput(
            self,
            "BlockedInstance",
            value=blocked_instance.instance_id,
            export_name="username-blocked-instance",
        )

        core.CfnOutput(
            self,
            "ValidInstance",
            value=valid_instance.ref,
            export_name="username-valid-instance",
        )
        core.CfnOutput(
            self,
            "TestSecurityGroup",
            value=test_security_group.security_group_id,
            export_name="test-username-sg",
        )
        core.CfnOutput(
            self, "DefaultAMI", value=image_id, export_name="default-username-ami"
        )
Esempio n. 13
0
    def __init__(self, scope: Construct, stack_id: str, *, props: SEPStackProps, **kwargs):
        """
        Initialize a new instance of SEPStack
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

         # The VPC that all components of the render farm will be created in.
        vpc = Vpc(
            self,
            'Vpc',
            max_azs=2,
        )

        recipes = ThinkboxDockerRecipes(
            self,
            'Image',
            stage=Stage.from_directory(props.docker_recipes_stage_path),
        )

        repository = Repository(
            self,
            'Repository',
            vpc=vpc,
            version=recipes.version,
            repository_installation_timeout=Duration.minutes(20),
            # TODO - Evaluate deletion protection for your own needs. These properties are set to RemovalPolicy.DESTROY
            # to cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that these resources are not accidentally deleted, you should set these properties to RemovalPolicy.RETAIN
            # or just remove the removal_policy parameter.
            removal_policy=RepositoryRemovalPolicies(
                database=RemovalPolicy.DESTROY,
                filesystem=RemovalPolicy.DESTROY,
            ),
        )

        host = 'renderqueue'
        zone_name = 'deadline-test.internal'

        # Internal DNS zone for the VPC.
        dns_zone = PrivateHostedZone(
            self,
            'DnsZone',
            vpc=vpc,
            zone_name=zone_name,
        )

        ca_cert = X509CertificatePem(
            self,
            'RootCA',
            subject=DistinguishedName(
                cn='SampleRootCA',
            ),
        )

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'{host}.{dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal',
            ),
            signing_certificate=ca_cert,
        )

        render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=vpc,
            version=recipes.version,
            images=recipes.render_queue_images,
            repository=repository,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False,
            hostname=RenderQueueHostNameProps(
                hostname=host,
                zone=dns_zone,
            ),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert,
                ),
                internal_protocol=ApplicationProtocol.HTTPS,
            ),
        )

        if props.create_resource_tracker_role:
            # Creates the Resource Tracker Access role. This role is required to exist in your account so the resource tracker will work properly
            Role(
                self,
                'ResourceTrackerRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                managed_policies= [ManagedPolicy.from_aws_managed_policy_name('AWSThinkboxDeadlineResourceTrackerAccessPolicy')],
                role_name= 'DeadlineResourceTrackerAccessRole',
            )

        fleet = SpotEventPluginFleet(
            self,
            'SpotEventPluginFleet',
            vpc=vpc,
            render_queue=render_queue,
            deadline_groups=['group_name'],
            instance_types=[InstanceType.of(InstanceClass.BURSTABLE3, InstanceSize.LARGE)],
            worker_machine_image=props.worker_machine_image,
            max_capacity=1,
        )

        # Optional: Add additional tags to both spot fleet request and spot instances.
        Tags.of(fleet).add('name', 'SEPtest')

        ConfigureSpotEventPlugin(
            self,
            'ConfigureSpotEventPlugin',
            vpc=vpc,
            render_queue=render_queue,
            spot_fleets=[fleet],
            configuration=SpotEventPluginSettings(
                enable_resource_tracker=True,
            ),
        )
Esempio n. 14
0
    def __init__(
        self,
        scope: Construct,
        id: str,
        cluster: ICluster,
        ecs_security_group: SecurityGroup,
        ecs_source_security_group: SecurityGroup,
        vpc: IVpc,
        **kwargs,
    ) -> None:
        super().__init__(scope, id, **kwargs)

        global g_nlb

        Tags.of(self).add("Stack", "Common-Nlb")

        # TODO -- You need to do some manual actions:
        # TODO --  1) enable auto-assign IPv6 address on public subnets
        # TODO --  2) add to the Outbound rules of "Live-Common-Nlb/ASG/InstanceSecurityGroup" the destination "::/0"

        self.private_zone = HostedZone.from_lookup(
            self,
            "PrivateZone",
            domain_name="openttd.internal",
            private_zone=True,
        )

        user_data = UserData.for_linux(shebang="#!/bin/bash -ex")

        asset = Asset(self, "NLB", path="user_data/nlb/")
        user_data.add_commands(
            "echo 'Extracting user-data files'",
            "mkdir /nlb",
            "cd /nlb",
        )
        user_data.add_s3_download_command(
            bucket=asset.bucket,
            bucket_key=asset.s3_object_key,
            local_file="/nlb/files.zip",
        )
        user_data.add_commands("unzip files.zip", )

        user_data.add_commands(
            "echo 'Setting up configuration'",
            f"echo '{self.region}' > /etc/.region",
            f"echo '{cluster.cluster_name}' > /etc/.cluster",
        )

        user_data.add_commands(
            "echo 'Installing nginx'",
            "amazon-linux-extras install epel",
            "yum install nginx -y",
            "cp /nlb/nginx.conf /etc/nginx/nginx.conf",
            "mkdir /etc/nginx/nlb.d",
        )

        user_data.add_commands(
            "echo 'Installing Python3'",
            "yum install python3 -y",
            "python3 -m venv /venv",
            "/venv/bin/pip install -r /nlb/requirements.txt",
        )

        user_data.add_commands(
            "echo 'Generating nginx configuration'",
            "cd /etc/nginx/nlb.d",
            "/venv/bin/python /nlb/nginx.py",
            "systemctl start nginx",
        )

        user_data.add_commands(
            "echo 'Setting up SOCKS proxy'",
            "useradd pproxy",
            "cp /nlb/pproxy.service /etc/systemd/system/",
            "systemctl daemon-reload",
            "systemctl enable pproxy.service",
            "systemctl start pproxy.service",
        )

        asg = AutoScalingGroup(
            self,
            "ASG",
            vpc=vpc,
            instance_type=InstanceType("t3a.nano"),
            machine_image=MachineImage.latest_amazon_linux(
                generation=AmazonLinuxGeneration.AMAZON_LINUX_2),
            min_capacity=2,
            vpc_subnets=SubnetSelection(subnet_type=SubnetType.PUBLIC,
                                        one_per_az=True),
            user_data=user_data,
            health_check=HealthCheck.elb(grace=Duration.seconds(0)),
        )
        asg.add_security_group(ecs_security_group)

        asg.role.add_managed_policy(
            ManagedPolicy.from_aws_managed_policy_name(
                "AmazonSSMManagedInstanceCore"))
        asset.grant_read(asg.role)
        policy = ManagedPolicy(self, "Policy")
        policy_statement = PolicyStatement(
            actions=[
                "ec2:DescribeInstances",
                "ecs:DescribeContainerInstances",
                "ecs:DescribeTasks",
                "ecs:ListContainerInstances",
                "ecs:ListServices",
                "ecs:ListTagsForResource",
                "ecs:ListTasks",
            ],
            resources=["*"],
        )
        policy.add_statements(policy_statement)
        asg.role.add_managed_policy(policy)

        # We could also make an additional security-group and add that to
        # the ASG, but it keeps adding up. This makes it a tiny bit
        # easier to get an overview what traffic is allowed from the
        # console on AWS.
        assert isinstance(asg.node.children[0], SecurityGroup)
        self.security_group = asg.node.children[0]

        listener_https.add_targets(
            subdomain_name=self.admin_subdomain_name,
            port=80,
            target=asg,
            priority=2,
        )

        # Create a Security Group so the lambdas can access the EC2.
        # This is needed to check if the EC2 instance is fully booted.
        lambda_security_group = SecurityGroup(
            self,
            "LambdaSG",
            vpc=vpc,
        )
        self.security_group.add_ingress_rule(
            peer=lambda_security_group,
            connection=Port.tcp(80),
            description="Lambda to target",
        )

        self.security_group.add_ingress_rule(
            peer=ecs_source_security_group,
            connection=Port.udp(8080),
            description="ECS to target",
        )

        self.create_ecs_lambda(
            cluster=cluster,
            auto_scaling_group=asg,
        )

        self.create_asg_lambda(
            lifecycle_transition=LifecycleTransition.INSTANCE_LAUNCHING,
            timeout=Duration.seconds(180),
            vpc=vpc,
            security_group=lambda_security_group,
            auto_scaling_group=asg,
        )
        self.create_asg_lambda(
            lifecycle_transition=LifecycleTransition.INSTANCE_TERMINATING,
            timeout=Duration.seconds(30),
            vpc=vpc,
            security_group=lambda_security_group,
            auto_scaling_group=asg,
        )

        # Initialize the NLB record on localhost, as we need to be able to
        # reference it for other entries to work correctly.
        ARecord(
            self,
            "ARecord",
            target=RecordTarget.from_ip_addresses("127.0.0.1"),
            zone=dns.get_hosted_zone(),
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )
        AaaaRecord(
            self,
            "AAAARecord",
            target=RecordTarget.from_ip_addresses("::1"),
            zone=dns.get_hosted_zone(),
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )
        # To make things a bit easier, also alias to staging.
        self.create_alias(self, "nlb.staging")

        # Create a record for the internal DNS
        ARecord(
            self,
            "APrivateRecord",
            target=RecordTarget.from_ip_addresses("127.0.0.1"),
            zone=self.private_zone,
            record_name=self.subdomain_name,
            ttl=Duration.seconds(60),
        )

        if g_nlb is not None:
            raise Exception("Only a single NlbStack instance can exist")
        g_nlb = self
    def __init__(self, scope: core.Construct, construct_id: str,
                 **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        # Create two VPCs - one to host our private website, the other to act as a client
        website_vpc = Vpc(
            self,
            "WEBSITEVPC",
            cidr="10.0.0.0/16",
        )
        client_vpc = Vpc(
            self,
            "ClientVPC",
            cidr="10.1.0.0/16",
        )

        # Create a bastion host in the client API which will act like our client workstation
        bastion = BastionHostLinux(
            self,
            "WEBClient",
            vpc=client_vpc,
            instance_name='my-bastion',
            instance_type=InstanceType('t3.micro'),
            machine_image=AmazonLinuxImage(),
            subnet_selection=SubnetSelection(subnet_type=SubnetType.PRIVATE),
            security_group=SecurityGroup(
                scope=self,
                id='bastion-sg',
                security_group_name='bastion-sg',
                description=
                'Security group for the bastion, no inbound open because we should access'
                ' to the bastion via AWS SSM',
                vpc=client_vpc,
                allow_all_outbound=True))

        # Set up a VPC peering connection between client and API VPCs, and adjust
        # the routing table to allow connections back and forth
        VpcPeeringHelper(self, 'Peering', website_vpc, client_vpc)

        # Create VPC endpoints for API gateway
        vpc_endpoint = InterfaceVpcEndpoint(
            self,
            'APIGWVpcEndpoint',
            vpc=website_vpc,
            service=InterfaceVpcEndpointAwsService.APIGATEWAY,
            private_dns_enabled=True,
        )
        vpc_endpoint.connections.allow_from(bastion, Port.tcp(443))
        endpoint_id = vpc_endpoint.vpc_endpoint_id

        api_policy = iam.PolicyDocument(statements=[
            iam.PolicyStatement(principals=[iam.AnyPrincipal()],
                                actions=['execute-api:Invoke'],
                                resources=['execute-api:/*'],
                                effect=iam.Effect.DENY,
                                conditions={
                                    "StringNotEquals": {
                                        "aws:SourceVpce": endpoint_id
                                    }
                                }),
            iam.PolicyStatement(principals=[iam.AnyPrincipal()],
                                actions=['execute-api:Invoke'],
                                resources=['execute-api:/*'],
                                effect=iam.Effect.ALLOW)
        ])

        # Create an s3 bucket to hold the content
        content_bucket = s3.Bucket(self,
                                   "ContentBucket",
                                   removal_policy=core.RemovalPolicy.DESTROY)

        # Upload our static content to the bucket
        s3dep.BucketDeployment(self,
                               "DeployWithInvalidation",
                               sources=[s3dep.Source.asset('website')],
                               destination_bucket=content_bucket)

        # Create a private API GW in the API VPC
        api = apigw.RestApi(self,
                            'PrivateS3Api',
                            endpoint_configuration=apigw.EndpointConfiguration(
                                types=[apigw.EndpointType.PRIVATE],
                                vpc_endpoints=[vpc_endpoint]),
                            policy=api_policy)

        # Create a role to allow API GW to access our S3 bucket contents
        role = iam.Role(
            self,
            "Role",
            assumed_by=iam.ServicePrincipal("apigateway.amazonaws.com"))
        role.add_to_policy(
            iam.PolicyStatement(effect=iam.Effect.ALLOW,
                                resources=[
                                    content_bucket.bucket_arn,
                                    content_bucket.bucket_arn + '/*'
                                ],
                                actions=["s3:Get*"]))

        # Create a proxy resource that captures all non-root resource requests
        resource = api.root.add_resource("{proxy+}")
        # Create an integration with S3
        resource_integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            integration_http_method='GET',
            options=apigw.IntegrationOptions(
                request_parameters=
                {  # map the proxy parameter so we can pass the request path
                    "integration.request.path.proxy":
                    "method.request.path.proxy"
                },
                integration_responses=[
                    apigw.IntegrationResponse(
                        status_code='200',
                        response_parameters=
                        {  # map the content type of the S3 object back to the HTTP response
                            "method.response.header.Content-Type":
                            "integration.response.header.Content-Type"
                        })
                ],
                credentials_role=role),
            # reference the bucket content we want to retrieve
            uri='arn:aws:apigateway:eu-west-1:s3:path/%s/{proxy}' %
            (content_bucket.bucket_name))
        # handle the GET request and map it to our new integration
        resource.add_method(
            "GET",
            resource_integration,
            method_responses=[
                apigw.MethodResponse(status_code='200',
                                     response_parameters={
                                         "method.response.header.Content-Type":
                                         False
                                     })
            ],
            request_parameters={"method.request.path.proxy": True})
        # Handle requests to the root of our site
        # Create another integration with S3 - this time with no proxy parameter
        resource_integration = apigw.Integration(
            type=apigw.IntegrationType.AWS,
            integration_http_method='GET',
            options=apigw.IntegrationOptions(
                integration_responses=[
                    apigw.IntegrationResponse(
                        status_code='200',
                        response_parameters=
                        {  # map the content type of the S3 object back to the HTTP response
                            "method.response.header.Content-Type":
                            "integration.response.header.Content-Type"
                        })
                ],
                credentials_role=role),
            # reference the bucket content we want to retrieve
            uri='arn:aws:apigateway:eu-west-1:s3:path/%s/index.html' %
            (content_bucket.bucket_name))
        # handle the GET request and map it to our new integration
        api.root.add_method("GET",
                            resource_integration,
                            method_responses=[
                                apigw.MethodResponse(
                                    status_code='200',
                                    response_parameters={
                                        "method.response.header.Content-Type":
                                        False
                                    })
                            ])
Esempio n. 16
0
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.neo4j_user_secret = Secret(self,'secretsmanager-secret-neo4j-user',
            secret_name=NEO4J_USER_SECRET_NAME
        )
       
        neo4j_server_instance_role = Role(self,'iam-role-neo4j-server-instance',
            assumed_by=ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'), # Use SSM Session Manager rather than straight ssh
                ManagedPolicy.from_aws_managed_policy_name('CloudWatchAgentServerPolicy')
            ],
            inline_policies={
                "work-with-tags": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                'ec2:CreateTags',
                                'ec2:Describe*',
                                'elasticloadbalancing:Describe*',
                                'cloudwatch:ListMetrics',
                                'cloudwatch:GetMetricStatistics',
                                'cloudwatch:Describe*',
                                'autoscaling:Describe*',
                            ],
                            resources=["*"]
                        )
                    ]
                ),
                "access-neo4j-user-secret": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=['secretsmanager:GetSecretValue'],
                            resources=[self.neo4j_user_secret.secret_arn]
                        )
                    ]
                )
            }
        )

        instance_security_group = SecurityGroup(self, "ec2-sg-neo4j-server-instance",
            description="Altimeter Neo4j Server Instance",
            vpc=vpc
        )

        instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7687), 'Bolt from ANYWHERE') # TESTING
        # instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7473), 'Bolt from ANYWHERE') # TESTING

        # Prepare userdata script
        with open("./resources/neo4j-server-instance-userdata.sh", "r") as f:
            userdata_content = f.read()
        userdata_content = userdata_content.replace("[[NEO4J_USER_SECRET_NAME]]",NEO4J_USER_SECRET_NAME)
        user_data = UserData.for_linux()
        user_data.add_commands(userdata_content)

        instance_type = InstanceType.of(InstanceClass.BURSTABLE2, InstanceSize.MEDIUM)

        self.instance = Instance(self, 'ec2-instance-neo4j-server-instance',
            instance_name="instance-altimeter--neo4j-community-standalone-server",
            machine_image=MachineImage.generic_linux(
                ami_map={
                    "eu-west-1": "ami-00c8631d384ad7c53"
                }
            ),
            instance_type=instance_type,
            role=neo4j_server_instance_role,
            vpc=vpc,
            # vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Public').subnets),
            security_group=instance_security_group,
            user_data=user_data,
            block_devices=[
                BlockDevice(
                    device_name="/dev/sda1",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=10,
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True, # Just encrypt
                        delete_on_termination=True
                    )                    
                ),
                BlockDevice(
                    device_name="/dev/sdb",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=20, # TODO: Check size requirements
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True,
                        delete_on_termination=True # ASSUMPTION: NO 'primary' data, only altimeter results.
                    )                    
                )
            ]
        )

        cdk.Tags.of(self.instance).add("neo4j_mode", "SINGLE")
        cdk.Tags.of(self.instance).add("dbms_mode", "SINGLE")