Ejemplo n.º 1
0
    def __init__(self, scope: Construct, id: str, envs: EnvSettings):
        super().__init__(scope, id)

        self.ecr = BaseECR(self, "ECR", envs)

        self.key = BaseKMS(self, "KMS", envs)

        self.vpc = Vpc(self, "Vpc", nat_gateways=1)

        self.cluster = Cluster(self,
                               "WorkersCluster",
                               cluster_name="schema-ecs-cluster",
                               vpc=self.vpc)

        self.db = DatabaseInstance(
            self,
            "DataBase",
            database_name=envs.data_base_name,
            engine=DatabaseInstanceEngine.POSTGRES,
            storage_encrypted=True,
            allocated_storage=50,
            instance_type=InstanceType.of(InstanceClass.BURSTABLE2,
                                          InstanceSize.SMALL),
            vpc=self.vpc,
        )

        if self.db.secret:
            CfnOutput(
                self,
                id="DbSecretOutput",
                export_name=self.get_database_secret_arn_output_export_name(
                    envs),
                value=self.db.secret.secret_arn,
            )
Ejemplo n.º 2
0
    def __init__(self, scope: Construct, stack_id: str, *,
                 props: ComputeTierProps, **kwargs):
        """
        Initializes a new instance of ComputeTier
        :param scope: The Scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties of this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

        # We can put the health monitor and worker fleet in all of the local zones we're using
        subnets = SubnetSelection(availability_zones=props.availability_zones,
                                  subnet_type=SubnetType.PRIVATE,
                                  one_per_az=True)

        # We can put the health monitor in all of the local zones we're using for the worker fleet
        self.health_monitor = HealthMonitor(self,
                                            'HealthMonitor',
                                            vpc=props.vpc,
                                            vpc_subnets=subnets,
                                            deletion_protection=False)

        self.worker_fleet = WorkerInstanceFleet(
            self,
            'WorkerFleet',
            vpc=props.vpc,
            vpc_subnets=subnets,
            render_queue=props.render_queue,
            # Not all instance types will be available in local zones. For a list of the instance types
            # available in each local zone, you can refer to:
            # https://aws.amazon.com/about-aws/global-infrastructure/localzones/features/#AWS_Services
            # BURSTABLE3 is a T3; the third generation of burstable instances
            instance_type=InstanceType.of(InstanceClass.BURSTABLE3,
                                          InstanceSize.LARGE),
            worker_machine_image=props.worker_machine_image,
            health_monitor=self.health_monitor,
            key_name=props.key_pair_name,
            user_data_provider=UserDataProvider(self, 'UserDataProvider'))
        SessionManagerHelper.grant_permissions_to(self.worker_fleet)
Ejemplo n.º 3
0
    def __init__(self, scope: Construct, id: str, vpc: Vpc, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.rds = DatabaseInstance(
            self,
            "rds",
            master_username=USER,
            master_user_password=SecretValue(PASSWORD),
            database_name=DATABASE,
            vpc=vpc.vpc,
            engine=DatabaseInstanceEngine.POSTGRES,
            port=PORT,
            instance_type=InstanceType.of(InstanceClass.BURSTABLE3,
                                          InstanceSize.MICRO),
            security_groups=[vpc.sg_rds],
            deletion_protection=False,
        )

        CfnOutput(
            self,
            "rds_address",
            value=
            f"postgres://{USER}:{PASSWORD}@{self.rds.db_instance_endpoint_address}:{PORT}/{DATABASE}",
        )
Ejemplo n.º 4
0
    def __init__(
        self, scope: Construct, id: str, vpc: Vpc, rds: Rds, batch, **kwargs
    ) -> None:
        super().__init__(scope, id, **kwargs)

        cluster = _Cluster(self, "cluster", vpc=vpc.vpc)

        cluster.add_capacity(
            "ag",
            instance_type=InstanceType.of(InstanceClass.STANDARD5, InstanceSize.LARGE),
        )

        shared_airflow_env = {
            "AIRFLOW__CORE__SQL_ALCHEMY_CONN": rds.uri,
            "AIRFLOW__CORE__LOAD_EXAMPLES": "True",
            "AIRFLOW__CORE__EXECUTOR": "LocalExecutor",
        }

        init_db = InitTask(self, "init_db", shared_airflow_env=shared_airflow_env)

        scheduler_service = SchedulerService(
            self,
            "scheduler_service",
            cluster=cluster,
            shared_airflow_env=shared_airflow_env,
        )

        webserver_service = WebserverService(
            self,
            "webserver_service",
            cluster=cluster,
            shared_airflow_env=shared_airflow_env,
            vpc=vpc.vpc,
        )

        CfnOutput(self, "cluster-name", value=cluster.cluster_name)
Ejemplo n.º 5
0
def main():
    # ------------------------------
    # Validate Config Values
    # ------------------------------

    if not config.ubl_certificate_secret_arn and config.ubl_licenses:
        raise ValueError(
            'UBL certificates secret ARN is required when using UBL but was not specified.'
        )

    if not config.ubl_licenses:
        print('No UBL licenses specified. UBL Licensing will not be set up.')

    if not config.key_pair_name:
        print(
            'EC2 key pair name not specified. You will not have SSH access to the render farm.'
        )

    if 'region' in config.deadline_client_linux_ami_map:
        raise ValueError(
            'Deadline Client Linux AMI map is required but was not specified.')

    # ------------------------------
    # Application
    # ------------------------------
    app = App()

    if 'CDK_DEPLOY_ACCOUNT' not in os.environ and 'CDK_DEFAULT_ACCOUNT' not in os.environ:
        raise ValueError(
            'You must define either CDK_DEPLOY_ACCOUNT or CDK_DEFAULT_ACCOUNT in the environment.'
        )
    if 'CDK_DEPLOY_REGION' not in os.environ and 'CDK_DEFAULT_REGION' not in os.environ:
        raise ValueError(
            'You must define either CDK_DEPLOY_REGION or CDK_DEFAULT_REGION in the environment.'
        )
    env = Environment(
        account=os.environ.get('CDK_DEPLOY_ACCOUNT',
                               os.environ.get('CDK_DEFAULT_ACCOUNT')),
        region=os.environ.get('CDK_DEPLOY_REGION',
                              os.environ.get('CDK_DEFAULT_REGION')))

    # ------------------------------
    # Network Tier
    # ------------------------------
    network = network_tier.NetworkTier(app, 'NetworkTier', env=env)

    # ------------------------------
    # Security Tier
    # ------------------------------
    security = security_tier.SecurityTier(app, 'SecurityTier', env=env)

    # ------------------------------
    # Storage Tier
    # ------------------------------
    if config.deploy_mongo_db:
        storage_props = storage_tier.StorageTierMongoDBProps(
            vpc=network.vpc,
            database_instance_type=InstanceType.of(InstanceClass.MEMORY5,
                                                   InstanceSize.LARGE),
            root_ca=security.root_ca,
            dns_zone=network.dns_zone,
            accept_sspl_license=config.accept_sspl_license,
            key_pair_name=config.key_pair_name)
        storage = storage_tier.StorageTierMongoDB(app,
                                                  'StorageTier',
                                                  props=storage_props,
                                                  env=env)
    else:
        storage_props = storage_tier.StorageTierDocDBProps(
            vpc=network.vpc,
            database_instance_type=InstanceType.of(InstanceClass.MEMORY5,
                                                   InstanceSize.LARGE),
        )
        storage = storage_tier.StorageTierDocDB(app,
                                                'StorageTier',
                                                props=storage_props,
                                                env=env)

    # ------------------------------
    # Service Tier
    # ------------------------------
    service_props = service_tier.ServiceTierProps(
        database=storage.database,
        file_system=storage.file_system,
        vpc=network.vpc,
        docker_recipes_stage_path=os.path.join(
            os.path.dirname(os.path.realpath(__file__)), os.pardir, 'stage'),
        ubl_certs_secret_arn=config.ubl_certificate_secret_arn,
        ubl_licenses=config.ubl_licenses,
        root_ca=security.root_ca,
        dns_zone=network.dns_zone)
    service = service_tier.ServiceTier(app,
                                       'ServiceTier',
                                       props=service_props,
                                       env=env)

    # ------------------------------
    # Compute Tier
    # ------------------------------
    deadline_client_image = MachineImage.generic_linux(
        config.deadline_client_linux_ami_map)
    compute_props = compute_tier.ComputeTierProps(
        vpc=network.vpc,
        render_queue=service.render_queue,
        worker_machine_image=deadline_client_image,
        key_pair_name=config.key_pair_name,
        usage_based_licensing=service.ubl_licensing,
        licenses=config.ubl_licenses)
    _compute = compute_tier.ComputeTier(app,
                                        'ComputeTier',
                                        props=compute_props,
                                        env=env)

    app.synth()
Ejemplo n.º 6
0
    def __init__(self, scope: Construct, stack_id: str, *, props: SEPStackProps, **kwargs):
        """
        Initialize a new instance of SEPStack
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

         # The VPC that all components of the render farm will be created in.
        vpc = Vpc(
            self,
            'Vpc',
            max_azs=2,
        )

        recipes = ThinkboxDockerRecipes(
            self,
            'Image',
            stage=Stage.from_directory(props.docker_recipes_stage_path),
        )

        repository = Repository(
            self,
            'Repository',
            vpc=vpc,
            version=recipes.version,
            repository_installation_timeout=Duration.minutes(20),
            # TODO - Evaluate deletion protection for your own needs. These properties are set to RemovalPolicy.DESTROY
            # to cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that these resources are not accidentally deleted, you should set these properties to RemovalPolicy.RETAIN
            # or just remove the removal_policy parameter.
            removal_policy=RepositoryRemovalPolicies(
                database=RemovalPolicy.DESTROY,
                filesystem=RemovalPolicy.DESTROY,
            ),
        )

        host = 'renderqueue'
        zone_name = 'deadline-test.internal'

        # Internal DNS zone for the VPC.
        dns_zone = PrivateHostedZone(
            self,
            'DnsZone',
            vpc=vpc,
            zone_name=zone_name,
        )

        ca_cert = X509CertificatePem(
            self,
            'RootCA',
            subject=DistinguishedName(
                cn='SampleRootCA',
            ),
        )

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'{host}.{dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal',
            ),
            signing_certificate=ca_cert,
        )

        render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=vpc,
            version=recipes.version,
            images=recipes.render_queue_images,
            repository=repository,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False,
            hostname=RenderQueueHostNameProps(
                hostname=host,
                zone=dns_zone,
            ),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert,
                ),
                internal_protocol=ApplicationProtocol.HTTPS,
            ),
        )

        if props.create_resource_tracker_role:
            # Creates the Resource Tracker Access role. This role is required to exist in your account so the resource tracker will work properly
            Role(
                self,
                'ResourceTrackerRole',
                assumed_by=ServicePrincipal('lambda.amazonaws.com'),
                managed_policies= [ManagedPolicy.from_aws_managed_policy_name('AWSThinkboxDeadlineResourceTrackerAccessPolicy')],
                role_name= 'DeadlineResourceTrackerAccessRole',
            )

        fleet = SpotEventPluginFleet(
            self,
            'SpotEventPluginFleet',
            vpc=vpc,
            render_queue=render_queue,
            deadline_groups=['group_name'],
            instance_types=[InstanceType.of(InstanceClass.BURSTABLE3, InstanceSize.LARGE)],
            worker_machine_image=props.worker_machine_image,
            max_capacity=1,
        )

        # Optional: Add additional tags to both spot fleet request and spot instances.
        Tags.of(fleet).add('name', 'SEPtest')

        ConfigureSpotEventPlugin(
            self,
            'ConfigureSpotEventPlugin',
            vpc=vpc,
            render_queue=render_queue,
            spot_fleets=[fleet],
            configuration=SpotEventPluginSettings(
                enable_resource_tracker=True,
            ),
        )
Ejemplo n.º 7
0
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.neo4j_user_secret = Secret(self,'secretsmanager-secret-neo4j-user',
            secret_name=NEO4J_USER_SECRET_NAME
        )
       
        neo4j_server_instance_role = Role(self,'iam-role-neo4j-server-instance',
            assumed_by=ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'), # Use SSM Session Manager rather than straight ssh
                ManagedPolicy.from_aws_managed_policy_name('CloudWatchAgentServerPolicy')
            ],
            inline_policies={
                "work-with-tags": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                'ec2:CreateTags',
                                'ec2:Describe*',
                                'elasticloadbalancing:Describe*',
                                'cloudwatch:ListMetrics',
                                'cloudwatch:GetMetricStatistics',
                                'cloudwatch:Describe*',
                                'autoscaling:Describe*',
                            ],
                            resources=["*"]
                        )
                    ]
                ),
                "access-neo4j-user-secret": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=['secretsmanager:GetSecretValue'],
                            resources=[self.neo4j_user_secret.secret_arn]
                        )
                    ]
                )
            }
        )

        instance_security_group = SecurityGroup(self, "ec2-sg-neo4j-server-instance",
            description="Altimeter Neo4j Server Instance",
            vpc=vpc
        )

        instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7687), 'Bolt from ANYWHERE') # TESTING
        # instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7473), 'Bolt from ANYWHERE') # TESTING

        # Prepare userdata script
        with open("./resources/neo4j-server-instance-userdata.sh", "r") as f:
            userdata_content = f.read()
        userdata_content = userdata_content.replace("[[NEO4J_USER_SECRET_NAME]]",NEO4J_USER_SECRET_NAME)
        user_data = UserData.for_linux()
        user_data.add_commands(userdata_content)

        instance_type = InstanceType.of(InstanceClass.BURSTABLE2, InstanceSize.MEDIUM)

        self.instance = Instance(self, 'ec2-instance-neo4j-server-instance',
            instance_name="instance-altimeter--neo4j-community-standalone-server",
            machine_image=MachineImage.generic_linux(
                ami_map={
                    "eu-west-1": "ami-00c8631d384ad7c53"
                }
            ),
            instance_type=instance_type,
            role=neo4j_server_instance_role,
            vpc=vpc,
            # vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Public').subnets),
            security_group=instance_security_group,
            user_data=user_data,
            block_devices=[
                BlockDevice(
                    device_name="/dev/sda1",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=10,
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True, # Just encrypt
                        delete_on_termination=True
                    )                    
                ),
                BlockDevice(
                    device_name="/dev/sdb",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=20, # TODO: Check size requirements
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True,
                        delete_on_termination=True # ASSUMPTION: NO 'primary' data, only altimeter results.
                    )                    
                )
            ]
        )

        cdk.Tags.of(self.instance).add("neo4j_mode", "SINGLE")
        cdk.Tags.of(self.instance).add("dbms_mode", "SINGLE")