Ejemplo n.º 1
0
    def add_deny_for_everyone_except(
        self,
        master_secret: secretsmanager.Secret,
        producer_functions: List[lambda_.Function],
    ) -> None:
        """
        Sets up the master secret resource policy so that everything *except* the given functions
        is denied access to GetSecretValue.

        Args:
            master_secret: the master secret construct
            producer_functions: a list of functions we are going to set as the only allowed accessors
        """
        # this end locks down the master secret so that *only* the JWT producer can read values
        # (it is only when we set the DENY policy here that in general other roles in the same account
        #  cannot access the secret value - so it is only after doing that that we need to explicitly enable
        #  the role we do want to access it)
        role_arns: List[str] = []

        for f in producer_functions:
            if not f.role:
                raise Exception(
                    f"Rotation function {f.function_name} has somehow not created a Lambda role correctly"
                )

            role_arns.append(f.role.role_arn)

        master_secret.add_to_resource_policy(
            iam.PolicyStatement(
                effect=iam.Effect.DENY,
                actions=["secretsmanager:GetSecretValue"],
                resources=["*"],
                principals=[iam.AccountRootPrincipal()],
                # https://stackoverflow.com/questions/63915906/aws-secrets-manager-resource-policy-to-deny-all-roles-except-one-role
                conditions={
                    "ForAllValues:StringNotEquals": {
                        "aws:PrincipalArn": role_arns
                    }
                },
            ))
Ejemplo n.º 2
0
 def configure_secret_rotation(self, rotator_lambda, secret_config,
                               duration):
     """
     Adds the rotator_lambda to the secret referenced using secret_id in the secrets_config.json.
     Args:
         rotator_lambda (Function): The lambda function used for rotating a secret
         secret_config (Dictionary): The configuration for the secret specified in secrets_config.json
         duration (Duration): The rotation time interval
     """
     secret_arn = get_secret_arn(secret_config)
     secret_key = get_secret_key(secret_config)
     secret = Secret.from_secret_complete_arn(
         self, secret_key, secret_complete_arn=secret_arn)
     secret.add_rotation_schedule(id=f'{secret_key}-rotator',
                                  automatically_after=duration,
                                  rotation_lambda=rotator_lambda)
Ejemplo n.º 3
0
    def __init__(self, scope: App, id: str, envs: EnvSettings,
                 components: ComponentsStack):
        super().__init__(scope, id)

        self.backend_domain_name = StringParameter.from_string_parameter_name(
            self,
            "DomainNameParameter",
            string_parameter_name="/schema-cms-app/DOMAIN_NAME").string_value

        self.backend_url = f"https://{self.backend_domain_name}/api/v1/"

        self.job_processing_queues = components.data_processing_queues

        self.app_bucket = Bucket.from_bucket_arn(
            self,
            id="App",
            bucket_arn=Fn.import_value(
                ApiStack.get_app_bucket_arn_output_export_name(envs)))
        self.resize_lambda_image_bucket = Bucket.from_bucket_arn(
            self,
            id="Images",
            bucket_arn=Fn.import_value(
                ImageResizeStack.
                get_image_resize_bucket_arn_output_export_name(envs)),
        )

        self.lambda_auth_token = Secret.from_secret_arn(
            self,
            id="lambda-auth-token",
            secret_arn=Fn.import_value(
                ApiStack.get_lambda_auth_token_arn_output_export_name(envs)),
        )

        self.functions = [
            self._create_lambda_fn(envs, memory_size, queue) for memory_size,
            queue in zip(envs.lambdas_sizes, self.job_processing_queues)
        ]
Ejemplo n.º 4
0
    def __init__(self, scope: Construct, stack_id: str, *,
                 props: ServiceTierProps, **kwargs):
        """
        Initialize a new instance of ServiceTier
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

        # Bastion instance for convenience (e.g. SSH into RenderQueue and WorkerFleet instances).
        # Not a critical component of the farm, so this can be safely removed. An alternative way
        # to access your hosts is also provided by the Session Manager, which is also configured
        # later in this example.
        self.bastion = BastionHostLinux(
            self,
            'Bastion',
            vpc=props.vpc,
            subnet_selection=SubnetSelection(
                subnet_group_name=subnets.PUBLIC.name),
            block_devices=[
                BlockDevice(device_name='/dev/xvda',
                            volume=BlockDeviceVolume.ebs(50, encrypted=True))
            ])

        # Mounting the root of the EFS file-system to the bastion access for convenience.
        # This can safely be removed.
        MountableEfs(self, filesystem=props.mountable_file_system.file_system
                     ).mount_to_linux_instance(self.bastion.instance,
                                               location='/mnt/efs')

        self.version = VersionQuery(self,
                                    'Version',
                                    version=props.deadline_version)

        secrets_management_settings = SecretsManagementProps(
            enabled=props.enable_secrets_management)
        if props.enable_secrets_management and props.secrets_management_secret_arn is not None:
            secrets_management_settings[
                "credentials"] = Secret.from_secret_arn(
                    self, 'SMAdminUser', props.secrets_management_secret_arn)

        repository = Repository(
            self,
            'Repository',
            vpc=props.vpc,
            vpc_subnets=SubnetSelection(
                subnet_group_name=subnets.INFRASTRUCTURE.name),
            database=props.database,
            file_system=props.mountable_file_system,
            repository_installation_timeout=Duration.minutes(20),
            repository_installation_prefix='/',
            version=self.version,
            secrets_management_settings=secrets_management_settings)

        images = ThinkboxDockerImages(
            self,
            'Images',
            version=self.version,
            user_aws_thinkbox_eula_acceptance=props.accept_aws_thinkbox_eula)

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'renderqueue.{props.dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal'),
            signing_certificate=props.root_ca)

        self.render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=props.vpc,
            vpc_subnets=SubnetSelection(
                subnet_group_name=subnets.INFRASTRUCTURE.name),
            # It is considered good practice to put the Render Queue's load blanacer in dedicated subnets because:
            #
            # 1. Deadline Secrets Management identity registration settings will be scoped down to least-privilege
            #
            #    (see https://github.com/aws/aws-rfdk/blob/release/packages/aws-rfdk/lib/deadline/README.md#render-queue-subnet-placement)
            #
            # 2. The load balancer can scale to use IP addresses in the subnet without conflicts from other AWS
            #    resources
            #
            #    (see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/application-load-balancers.html#subnets-load-balancer)
            vpc_subnets_alb=SubnetSelection(
                subnet_group_name=subnets.RENDER_QUEUE_ALB.name),
            images=images,
            repository=repository,
            hostname=RenderQueueHostNameProps(hostname='renderqueue',
                                              zone=props.dns_zone),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert),
                internal_protocol=ApplicationProtocol.HTTPS),
            version=self.version,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False,
            # Enable a local transparent filesystem cache of the Repository filesystem to reduce
            # data traffic from the Repository's filesystem.
            # For an EFS and NFS filesystem, this requires the 'fsc' mount option.
            enable_local_file_caching=True,
        )
        self.render_queue.connections.allow_default_port_from(self.bastion)

        # This is an optional feature that will set up your EC2 instances to be enabled for use with
        # the Session Manager. RFDK deploys EC2 instances that aren't available through a public subnet,
        # so connecting to them by SSH isn't easy. This is an option to quickly access hosts without
        # using a bastion instance.
        # It's important to note that the permissions need to be granted to the render queue's ASG,
        # rather than the render queue itself.
        SessionManagerHelper.grant_permissions_to(self.render_queue.asg)

        if props.ubl_licenses:
            if not props.ubl_certs_secret_arn:
                raise ValueError(
                    'UBL certificates secret ARN is required when using UBL but was not specified.'
                )
            ubl_cert_secret = Secret.from_secret_arn(
                self, 'ublcertssecret', props.ubl_certs_secret_arn)
            self.ubl_licensing = UsageBasedLicensing(
                self,
                'UsageBasedLicensing',
                vpc=props.vpc,
                vpc_subnets=SubnetSelection(
                    subnet_group_name=subnets.USAGE_BASED_LICENSING.name),
                images=images,
                licenses=props.ubl_licenses,
                render_queue=self.render_queue,
                certificate_secret=ubl_cert_secret,
            )

            # Another optional usage of the SessionManagerHelper that demonstrates how to configure the UBL
            # construct's ASG for access. Note that this construct also requires you to apply the permissions
            # to its ASG property.
            SessionManagerHelper.grant_permissions_to(self.ubl_licensing.asg)
        else:
            self.ubl_licensing = None
Ejemplo n.º 5
0
    def __init__(
        self,
        scope: cdk.Construct,
        construct_id: str,
        vpc: ec2.IVpc,
        name: str,
        release_label: str,
        rds_secret: secrets.Secret,
        rds_connections: ec2.Connections,
        log_bucket_name: str = None,
        ssh_key_name: str = None,
        **kwargs,
    ) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.tag_vpc(vpc)

        job_role = self.get_job_role()
        service_role = self.get_service_role()
        instance_profile = self.create_instance_profile(job_role)
        log_bucket = get_or_create_bucket(self, "emr_logs", log_bucket_name)

        # Assign necessary permissions
        # EMR needs to be able to PutObject to the log bucket
        log_bucket.grant_put(job_role)

        # EMR needs to be able to PassRole to the instance profile role
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-iam-role-for-ec2.html#emr-ec2-role-least-privilege
        # https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-iam-role.html
        service_role.add_to_policy(
            iam.PolicyStatement(
                actions=["iam:PassRole"],
                resources=[job_role.role_arn],
                conditions={
                    "StringEquals": {"iam:PassedToService": "ec2.amazonaws.com"}
                },
            )
        )

        # Database configuration variables
        rds_hostname = rds_secret.secret_value_from_json("host").to_string()
        rds_port = rds_secret.secret_value_from_json("port").to_string()
        rds_dbname = rds_secret.secret_value_from_json("dbname").to_string()

        # Desired subnet for the EMR cluster
        emr_subnet = vpc.public_subnets[0]

        self.cluster = emr.CfnCluster(
            self,
            construct_id,
            instances=emr.CfnCluster.JobFlowInstancesConfigProperty(
                master_instance_group=emr.CfnCluster.InstanceGroupConfigProperty(
                    instance_count=1, instance_type="m5.xlarge"
                ),
                core_instance_group=emr.CfnCluster.InstanceGroupConfigProperty(
                    instance_count=2, instance_type="m5.xlarge"
                ),
                ec2_subnet_id=emr_subnet.subnet_id,
            ),
            name=name,
            release_label=release_label,
            log_uri=f"s3://{log_bucket.bucket_name}/elasticmapreduce/",
            job_flow_role=job_role.role_name,
            service_role=service_role.role_name,
            applications=[
                emr.CfnCluster.ApplicationProperty(name=n)
                for n in [
                    "Spark",
                    "Hive",
                    "Zeppelin",
                    "Livy",
                    "JupyterEnterpriseGateway",
                ]
            ],
            visible_to_all_users=True, # Required for EMR Notebooks
            configurations=[
                emr.CfnCluster.ConfigurationProperty(
                    classification="hive-site",
                    configuration_properties={
                        "javax.jdo.option.ConnectionURL": f"jdbc:mysql://{rds_hostname}:{rds_port}/{rds_dbname}?createDatabaseIfNotExist=true",
                        "javax.jdo.option.ConnectionDriverName": "org.mariadb.jdbc.Driver",
                        "javax.jdo.option.ConnectionUserName": rds_secret.secret_value_from_json(
                            "username"
                        ).to_string(),
                        "javax.jdo.option.ConnectionPassword": rds_secret.secret_value_from_json(
                            "password"
                        ).to_string(),
                    },
                ),
            ],
            tags=[
                cdk.CfnTag(
                    key="for-use-with-amazon-emr-managed-policies", value="true"
                ),
            ],
        )

        # Wait for the instance profile to be created
        self.cluster.add_depends_on(instance_profile)

        # Allow EMR to connect to the RDS database
        self.add_rds_ingres(emr_subnet.ipv4_cidr_block, rds_connections)
Ejemplo n.º 6
0
    def create_jwt_secret(
        self,
        master_secret: secretsmanager.Secret,
        ica_base_url: str,
        key_name: str,
        project_ids: Union[str, List[str]],
    ) -> Tuple[secretsmanager.Secret, lambda_.Function]:
        """
        Create a JWT holding secret - that will use the master secret for JWT making - and which will have
        broad permissions to be read by all roles.

        Args:
            master_secret: the master secret to read for the API key for JWT making
            ica_base_url: the base url of ICA to be passed on to the rotators
            key_name: a unique string that we use to name this JWT secret
            project_ids: *either* a single string or a list of string - the choice of type *will* affect
                         the resulting secret output i.e a string input will end up different to a list with one string!

        Returns:
            the JWT secret
        """
        dirname = os.path.dirname(__file__)
        filename = os.path.join(dirname, "runtime/jwt_producer")

        env = {
            "MASTER_ARN": master_secret.secret_arn,
            "ICA_BASE_URL": ica_base_url,
        }

        # flip the instructions to our single lambda - the handle either a single JWT generator or
        # dictionary of JWTS
        if isinstance(project_ids, List):
            env["PROJECT_IDS"] = " ".join(project_ids)
        else:
            env["PROJECT_ID"] = project_ids

        jwt_producer = lambda_.Function(
            self,
            "JwtProduce" + key_name,
            runtime=lambda_.Runtime.PYTHON_3_8,
            code=lambda_.AssetCode(filename),
            handler="lambda_entrypoint.main",
            timeout=Duration.minutes(1),
            environment=env,
        )

        # this end makes the lambda role for JWT producer able to attempt to read the master secret
        # (this is only one part of the permission decision though - also need to set the Secrets policy too)
        master_secret.grant_read(jwt_producer)

        # secret itself - no default value as it will eventually get replaced by the JWT
        jwt_secret = secretsmanager.Secret(
            self,
            "Jwt" + key_name,
            secret_name=key_name,
            description="JWT(s) providing access to ICA projects",
        )

        # the rotation function that creates JWTs
        jwt_secret.add_rotation_schedule(
            "JwtSecretRotation",
            automatically_after=Duration.days(ROTATION_DAYS),
            rotation_lambda=jwt_producer,
        )

        return jwt_secret, jwt_producer
Ejemplo n.º 7
0
    def __init__(self, scope: cdk.Construct, construct_id: str, config, vpc: IVpc, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        self.neo4j_user_secret = Secret(self,'secretsmanager-secret-neo4j-user',
            secret_name=NEO4J_USER_SECRET_NAME
        )
       
        neo4j_server_instance_role = Role(self,'iam-role-neo4j-server-instance',
            assumed_by=ServicePrincipal('ec2.amazonaws.com'),
            managed_policies=[
                ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore'), # Use SSM Session Manager rather than straight ssh
                ManagedPolicy.from_aws_managed_policy_name('CloudWatchAgentServerPolicy')
            ],
            inline_policies={
                "work-with-tags": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=[
                                'ec2:CreateTags',
                                'ec2:Describe*',
                                'elasticloadbalancing:Describe*',
                                'cloudwatch:ListMetrics',
                                'cloudwatch:GetMetricStatistics',
                                'cloudwatch:Describe*',
                                'autoscaling:Describe*',
                            ],
                            resources=["*"]
                        )
                    ]
                ),
                "access-neo4j-user-secret": PolicyDocument(
                    statements=[
                        PolicyStatement(
                            actions=['secretsmanager:GetSecretValue'],
                            resources=[self.neo4j_user_secret.secret_arn]
                        )
                    ]
                )
            }
        )

        instance_security_group = SecurityGroup(self, "ec2-sg-neo4j-server-instance",
            description="Altimeter Neo4j Server Instance",
            vpc=vpc
        )

        instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7687), 'Bolt from ANYWHERE') # TESTING
        # instance_security_group.add_ingress_rule(Peer.ipv4("0.0.0.0/0"), Port.tcp(7473), 'Bolt from ANYWHERE') # TESTING

        # Prepare userdata script
        with open("./resources/neo4j-server-instance-userdata.sh", "r") as f:
            userdata_content = f.read()
        userdata_content = userdata_content.replace("[[NEO4J_USER_SECRET_NAME]]",NEO4J_USER_SECRET_NAME)
        user_data = UserData.for_linux()
        user_data.add_commands(userdata_content)

        instance_type = InstanceType.of(InstanceClass.BURSTABLE2, InstanceSize.MEDIUM)

        self.instance = Instance(self, 'ec2-instance-neo4j-server-instance',
            instance_name="instance-altimeter--neo4j-community-standalone-server",
            machine_image=MachineImage.generic_linux(
                ami_map={
                    "eu-west-1": "ami-00c8631d384ad7c53"
                }
            ),
            instance_type=instance_type,
            role=neo4j_server_instance_role,
            vpc=vpc,
            # vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Private').subnets),
            vpc_subnets=SubnetSelection(subnets=vpc.select_subnets(subnet_group_name='Public').subnets),
            security_group=instance_security_group,
            user_data=user_data,
            block_devices=[
                BlockDevice(
                    device_name="/dev/sda1",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=10,
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True, # Just encrypt
                        delete_on_termination=True
                    )                    
                ),
                BlockDevice(
                    device_name="/dev/sdb",
                    volume=BlockDeviceVolume.ebs(
                        volume_size=20, # TODO: Check size requirements
                        volume_type=EbsDeviceVolumeType.GP2,
                        encrypted=True,
                        delete_on_termination=True # ASSUMPTION: NO 'primary' data, only altimeter results.
                    )                    
                )
            ]
        )

        cdk.Tags.of(self.instance).add("neo4j_mode", "SINGLE")
        cdk.Tags.of(self.instance).add("dbms_mode", "SINGLE")
Ejemplo n.º 8
0
    def __init__(self, scope: Construct, stack_id: str, *,
                 props: ServiceTierProps, **kwargs):
        """
        Initialize a new instance of ServiceTier
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

        # Bastion instance for convenience (e.g. SSH into RenderQueue and WorkerFleet instances).
        # Not a critical component of the farm, so this can be safely removed. An alternative way
        # to access your hosts is also provided by the Session Manager, which is also configured
        # later in this example.
        self.bastion = BastionHostLinux(
            self,
            'Bastion',
            vpc=props.vpc,
            subnet_selection=SubnetSelection(subnet_type=SubnetType.PUBLIC),
            block_devices=[
                BlockDevice(device_name='/dev/xvda',
                            volume=BlockDeviceVolume.ebs(50, encrypted=True))
            ])

        # Granting the bastion access to the file system mount for convenience.
        # This can also safely be removed.
        props.file_system.mount_to_linux_instance(self.bastion.instance,
                                                  location='/mnt/efs')

        recipes = ThinkboxDockerRecipes(self,
                                        'Image',
                                        stage=Stage.from_directory(
                                            props.docker_recipes_stage_path))

        repository = Repository(
            self,
            'Repository',
            vpc=props.vpc,
            database=props.database,
            file_system=props.file_system,
            repository_installation_timeout=Duration.minutes(20),
            version=recipes.version,
        )

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'renderqueue.{props.dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal'),
            signing_certificate=props.root_ca)

        self.render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=props.vpc,
            images=recipes.render_queue_images,
            repository=repository,
            hostname=RenderQueueHostNameProps(hostname='renderqueue',
                                              zone=props.dns_zone),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert),
                internal_protocol=ApplicationProtocol.HTTPS),
            version=recipes.version,
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False)
        self.render_queue.connections.allow_default_port_from(self.bastion)

        # This is an optional feature that will set up your EC2 instances to be enabled for use with
        # the Session Manager. RFDK deploys EC2 instances that aren't available through a public subnet,
        # so connecting to them by SSH isn't easy. This is an option to quickly access hosts without
        # using a bastion instance.
        # It's important to note that the permissions need to be granted to the render queue's ASG,
        # rather than the render queue itself.
        SessionManagerHelper.grant_permissions_to(self.render_queue.asg)

        if props.ubl_licenses:
            if not props.ubl_certs_secret_arn:
                raise ValueError(
                    'UBL certificates secret ARN is required when using UBL but was not specified.'
                )
            ubl_cert_secret = Secret.from_secret_arn(
                self, 'ublcertssecret', props.ubl_certs_secret_arn)
            self.ubl_licensing = UsageBasedLicensing(
                self,
                'usagebasedlicensing',
                vpc=props.vpc,
                images=recipes.ubl_images,
                licenses=props.ubl_licenses,
                render_queue=self.render_queue,
                certificate_secret=ubl_cert_secret,
            )

            # Another optional usage of the SessionManagerHelper that demonstrates how to configure the UBL
            # construct's ASG for access. Note that this construct also requires you to apply the permissions
            # to its ASG property.
            SessionManagerHelper.grant_permissions_to(self.ubl_licensing.asg)
        else:
            self.ubl_licensing = None
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        db_name = 'petclinic'
        db_cluster = 'petclinic-serverless-graphql'

        petclinic_graphql_api = CfnGraphQLApi(
            self, 'PetClinicApi',
            name='PetClinicApi',
            authentication_type='API_KEY'
        )

        petclinic_graphql_key = CfnApiKey(
            self, 'ItemsApiKey',
            api_id=petclinic_graphql_api.attr_api_id
        )

        with open('./definition/petclinic.graphql', 'rt') as f:
            schema_def = f.read()

        petclinic_schema = CfnGraphQLSchema(
            self, 'PetclinicSchema',
            api_id=petclinic_graphql_api.attr_api_id,
            definition=schema_def
        )

        serverless_rds_secret = Secret(
            self, 'PetclinicRDSSecret',
            generate_secret_string=SecretStringGenerator(
                generate_string_key='password',
                secret_string_template='{"username":"******"}',
                exclude_characters= '"@/',
                password_length=16
            )
        )

        serverless_rds_cluster = CfnDBCluster(
            self, 'PetclinicRDSServerless',
            engine='aurora',
            database_name=db_name,
            db_cluster_identifier=db_cluster,
            engine_mode='serverless',
            master_username=serverless_rds_secret.secret_value_from_json('username').to_string(),
            master_user_password=serverless_rds_secret.secret_value_from_json('password').to_string(),
            scaling_configuration=CfnDBCluster.ScalingConfigurationProperty(
              min_capacity=1,
              max_capacity=2,
              auto_pause=False
            )
        )

        serverless_rds_cluster.apply_removal_policy(core.RemovalPolicy.DESTROY)

        serverless_rds_arn='arn:aws:rds:' + self.region + ':' + self.account + ':cluster:' + db_cluster

        website_bucket = Bucket(self, 'PetclinicWebsite',
            website_index_document='index.html',
            public_read_access=True,
            removal_policy=core.RemovalPolicy.DESTROY
        )

        deployment = BucketDeployment(self, 'PetclinicDeployWebsite',
          sources=[Source.asset('../frontend/public')],
          destination_bucket=website_bucket,
          retain_on_delete=False
          #destination_key_prefix='web/static'
        )

        iam_policy = [PolicyStatement(
            actions=["secretsmanager:GetSecretValue"],
            effect=Effect.ALLOW,
            resources=[serverless_rds_secret.secret_arn]
           ),PolicyStatement(
             actions=["rds-data:ExecuteStatement",
                "rds-data:DeleteItems",
                "rds-data:ExecuteSql",
                "rds-data:GetItems",
                "rds-data:InsertItems",
                "rds-data:UpdateItems"],
             effect=Effect.ALLOW,
             resources=[serverless_rds_arn, serverless_rds_arn + ':*']
          ),PolicyStatement(
              actions=["rds:*"],
              effect=Effect.ALLOW,
              resources=[serverless_rds_arn, serverless_rds_arn + ':*']
          ),PolicyStatement(
                actions=[ "s3:PutObject","s3:PutObjectAcl","s3:PutObjectVersionAcl","s3:GetObject"],
                effect=Effect.ALLOW,
                resources=[website_bucket.bucket_arn + "/*"]
          ),PolicyStatement(
                actions=[ "s3:ListBucket"],
                effect=Effect.ALLOW,
                resources=[website_bucket.bucket_arn]
          )]

        init_resource = CustomResource(self, "PetlinicInitCustomResource",
            provider=CustomResourceProvider.lambda_(
                SingletonFunction(
                    self, "CustomResourceSingleton",
                    uuid="f7d4f730-4ee1-11e8-9c2d-fa7ae01bbebc",
                    code=Code.from_asset('./custom-resource-code'),
                    handler="index.handler",
                    timeout=core.Duration.seconds(600),
                    runtime=Runtime.PYTHON_3_7,
                    initial_policy=iam_policy
                )
            ),
            properties={
                "DBClusterIdentifier": db_cluster,
                "DBClusterArn": serverless_rds_arn,
                "DBSecretArn": serverless_rds_secret.secret_arn,
                "DBName": db_name,
                "Bucket": website_bucket.bucket_name,
                "GraphqlApi": petclinic_graphql_api.attr_graph_ql_url,
                "GraphqlKey": petclinic_graphql_key.attr_api_key
            }
        )

        petclinic_rds_role = Role(
            self, 'PetclinicRDSRole',
            assumed_by=ServicePrincipal('appsync.amazonaws.com')
        )

        petclinic_rds_role.add_to_policy(iam_policy[0])
        petclinic_rds_role.add_to_policy(iam_policy[1])

        data_source = CfnDataSource(
            self, 'PetclinicRDSDatesource',
            api_id=petclinic_graphql_api.attr_api_id,
            type='RELATIONAL_DATABASE',
            name='PetclinicRDSDatesource',
            relational_database_config=CfnDataSource.RelationalDatabaseConfigProperty(
                relational_database_source_type='RDS_HTTP_ENDPOINT',
                rds_http_endpoint_config=CfnDataSource.RdsHttpEndpointConfigProperty(
                    aws_region=self.region,
                    aws_secret_store_arn=serverless_rds_secret.secret_arn,
                    database_name='petclinic',
                    db_cluster_identifier=serverless_rds_arn
                )
            ),
            service_role_arn=petclinic_rds_role.role_arn 
        )
        data_source.add_depends_on(petclinic_schema)
        data_source.add_depends_on(serverless_rds_cluster)

        query_req_path = './definition/template/query/request/'
        query_res_path = './definition/template/query/response/'

        for req_file in os.listdir(query_req_path):
            query_name = req_file.split('.')[0]
            with open(query_req_path + req_file, 'rt') as f:
                query_req = f.read()
            with open(query_res_path + query_name + '.vm', 'rt') as f:
                query_res = f.read()
            pettypes_resolver = CfnResolver(
                self, query_name,
                api_id=petclinic_graphql_api.attr_api_id,
                type_name='Query',
                field_name=query_name,
                data_source_name=data_source.name,
                request_mapping_template=query_req,
                response_mapping_template=query_res
            )
            pettypes_resolver.add_depends_on(data_source)

        func_dict = {}

        func_req_path = './definition/template/function/request/'
        func_res_path = './definition/template/function/response/'

        for req_file in os.listdir(func_req_path):
            func_name = req_file.split('.')[0]
            with open(func_req_path + req_file) as f:
                func_req = f.read()
            with open(func_res_path + func_name + '.vm') as f:
                func_res = f.read()
            func_dict[func_name] = CfnFunctionConfiguration(
                self, func_name,
                api_id=petclinic_graphql_api.attr_api_id,
                data_source_name=data_source.name,
                name=func_name,
                function_version='2018-05-29',
                request_mapping_template=func_req,
                response_mapping_template=func_res
            )
            func_dict[func_name].add_depends_on(data_source)

        query_owner = CfnResolver(
            self, 'QueryOnwer',
            api_id=petclinic_graphql_api.attr_api_id,
            kind='PIPELINE',
            type_name='Query',
            field_name='owner',
            request_mapping_template="{}",
            response_mapping_template="$util.toJson($ctx.result)",
            pipeline_config=CfnResolver.PipelineConfigProperty(
                functions=[func_dict['Query_Owner_getOwnerById'].attr_function_id,
                 func_dict['Query_Owner_getPetsByOwner'].attr_function_id,
                 func_dict['Query_Owner_getVistsByPet'].attr_function_id]
            )
        )

        query_owner.add_depends_on(func_dict['Query_Owner_getOwnerById'])
        query_owner.add_depends_on(func_dict['Query_Owner_getPetsByOwner'])
        query_owner.add_depends_on(func_dict['Query_Owner_getVistsByPet'])

        query_all_owners = CfnResolver(
            self, 'QueryAllOnwers',
            api_id=petclinic_graphql_api.attr_api_id,
            kind='PIPELINE',
            type_name='Query',
            field_name='owners',
            request_mapping_template="{}",
            response_mapping_template="$util.toJson($ctx.result)",
            pipeline_config=CfnResolver.PipelineConfigProperty(
                functions=[func_dict['Query_Owners_getAllOwners'].attr_function_id,
                 func_dict['Query_Owners_getPetsByOwner'].attr_function_id]
            )
        )

        query_all_owners.add_depends_on(func_dict['Query_Owners_getAllOwners'])
        query_all_owners.add_depends_on(func_dict['Query_Owners_getPetsByOwner'])

        query_pet = CfnResolver(
            self, 'QueryPet',
            api_id=petclinic_graphql_api.attr_api_id,
            kind='PIPELINE',
            type_name='Query',
            field_name='pet',
            request_mapping_template="{}",
            response_mapping_template="$util.toJson($ctx.result)",
            pipeline_config=CfnResolver.PipelineConfigProperty(
                functions=[func_dict['Query_Pet_getPetById'].attr_function_id,
                 func_dict['Query_Pet_getVisitByPet'].attr_function_id]
            )
        )

        query_pet.add_depends_on(func_dict['Query_Pet_getPetById'])
        query_pet.add_depends_on(func_dict['Query_Pet_getVisitByPet'])   

        query_vets = CfnResolver(
            self, 'QueryVets',
            api_id=petclinic_graphql_api.attr_api_id,
            kind='PIPELINE',
            type_name='Query',
            field_name='vets',
            request_mapping_template="{}",
            response_mapping_template="$util.toJson($ctx.result)",
            pipeline_config=CfnResolver.PipelineConfigProperty(
                functions=[func_dict['Query_Vets_getVets'].attr_function_id,
                 func_dict['Query_Vets_getSpecByVets'].attr_function_id]
            )
        )

        query_vets.add_depends_on(func_dict['Query_Vets_getVets'])
        query_vets.add_depends_on(func_dict['Query_Vets_getSpecByVets'])

        mutation_req_path = './definition/template/mutation/request/'
        mutation_res_path = './definition/template/mutation/response/'

        for req_file in os.listdir(mutation_req_path):
            mutation_name = req_file.split('.')[0]
            with open(mutation_req_path + req_file) as f:
                func_req = f.read()
            with open(mutation_res_path + mutation_name + '.vm') as f:
                func_res = f.read()
            mutation = CfnResolver(
                self, mutation_name,
                api_id=petclinic_graphql_api.attr_api_id,
                type_name='Mutation',
                field_name=mutation_name,
                data_source_name=data_source.name,
                request_mapping_template=func_req,
                response_mapping_template=func_res
            )
            mutation.add_depends_on(data_source)

        core.CfnOutput(self,"GraphqlPetclinicWebsiteUrl",export_name="GraphqlPetclinicWebsiteUrl",value=website_bucket.bucket_website_url)
Ejemplo n.º 10
0
 def __configure_secrets(self, function: lambda_.Function,
                         secret: sm.Secret) -> None:
     secret.grant_write(function.role)
     function.add_environment('TDA_SECRET_ID', secret.secret_full_arn)
    def __init__(self,
                 scope: core.Construct,
                 id: str,
                 vpc: _ec2.Vpc,
                 redshift_secret_arn: str,
                 lambda_sg: _ec2.SecurityGroup,
                 clean_glue_db: _glue.Database,
                 redshift_role_arn: str,
                 redshift_cluster_endpoint: _redshift.Endpoint,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.__vpc = vpc
        self.__redshift_secret_arn = redshift_secret_arn
        self.__lambda_sg = lambda_sg
        self.__clean_glue_db = clean_glue_db
        self.__redshift_role_arn = redshift_role_arn

        stack = Stack.of(self)

        # Generate secrets for Redshift users
        generator = SecretStringGenerator(exclude_characters="'", exclude_punctuation=True)

        self.__etl_user_secret = Secret(
            scope=self, id='ETLUserSecret',
            description="ETL user Redshift",
            generate_secret_string=SecretStringGenerator(
                exclude_characters="'",
                exclude_punctuation=True,
                generate_string_key="password",
                secret_string_template=json.dumps(
                    {
                        'username': _config.Redshift.ETL_USER,
                        'dbname': _config.Redshift.DATABASE,
                        'host': redshift_cluster_endpoint.hostname,
                        'port': core.Token.as_string(redshift_cluster_endpoint.port)
                    }
                )
            )
        )
        self.__dataengineer_user_secret = Secret(
            scope=self, id='DataEngineerUserSecret',
            description="DataEngineer user Redshift",
            generate_secret_string=SecretStringGenerator(
                exclude_characters="'",
                exclude_punctuation=True,
                generate_string_key="password",
                secret_string_template=json.dumps(
                    {
                        'username': _config.Redshift.DATA_ENGINEER_USER,
                        'dbname': _config.Redshift.DATABASE,
                        'host': redshift_cluster_endpoint.hostname,
                        'port': core.Token.as_string(redshift_cluster_endpoint.port)
                    }
                )
            ))

        self.__quicksight_user_secret = Secret(
            scope=self, id='DatavizUserSecret',
            description="Quicksight user Redshift",
            generate_secret_string=SecretStringGenerator(
                exclude_characters="'",
                exclude_punctuation=True,
                generate_string_key="password",
                secret_string_template=json.dumps(
                    {
                        'username': _config.Redshift.DATAVIZ_USER,
                        'dbname': _config.Redshift.DATABASE,
                        'host': redshift_cluster_endpoint.hostname,
                        'port': core.Token.as_string(redshift_cluster_endpoint.port)
                    }
                )
            ))

        self.__subnets_selection = _ec2.SubnetSelection(availability_zones=None, one_per_az=None,
                                                       subnet_group_name=None, subnet_name=None,
                                                       subnets=None, subnet_type=_ec2.SubnetType.PRIVATE)

        # Use the following command line to generate the python dependencies layer content
        # pip3 install -t lambda-layer/python/lib/python3.8/site-packages -r lambda/requirements.txt
        # Build the lambda layer assets
        subprocess.call(
            ['pip', 'install', '-t', 'dwh/dwh_cdk/bootstrap_lambda_layer/python/lib/python3.8/site-packages', '-r',
             'dwh/dwh_cdk/bootstrap_lambda/requirements.txt', '--platform', 'manylinux1_x86_64', '--only-binary=:all:',
             '--upgrade'])

        requirements_layer = _lambda.LayerVersion(scope=self,
                                                  id='PythonRequirementsTemplate',
                                                  code=_lambda.Code.from_asset('dwh/dwh_cdk/bootstrap_lambda_layer'),
                                                  compatible_runtimes=[_lambda.Runtime.PYTHON_3_8])

        # This lambda function will run SQL commands to setup Redshift users and tables
        bootstrap_function_name = 'RedshiftBootstrap'
        register_template_lambda = _lambda.Function(scope=self,
                                                    id='RegisterTemplate',
                                                    runtime=_lambda.Runtime.PYTHON_3_8,
                                                    code=_lambda.Code.from_asset(
                                                        'dwh/dwh_cdk/bootstrap_lambda'),
                                                    handler='redshift_setup.handler',
                                                    environment={
                                                        'SQL_SCRIPT_LOCATION': _config.BINARIES_LOCATION + self.SQL_SCRIPT_DIR,
                                                        'SECRET_ARN': self.__redshift_secret_arn,
                                                        'SQL_SCRIPT_FILES': _config.RedshiftDeploy.SQL_SCRIPT_FILES,
                                                        'ETL_SECRET': self.__etl_user_secret.secret_arn,
                                                        'DATAENG_SECRET': self.__dataengineer_user_secret.secret_arn,
                                                        'DATAVIZ_SECRET': self.__quicksight_user_secret.secret_arn,
                                                        'GLUE_DATABASE': self.__clean_glue_db.database_name,
                                                        'REDSHIFT_IAM_ROLE': self.__redshift_role_arn
                                                    },
                                                    layers=[requirements_layer],
                                                    timeout=core.Duration.minutes(3),
                                                    vpc=self.__vpc,
                                                    vpc_subnets=self.__subnets_selection,
                                                    security_group=self.__lambda_sg,
                                                    function_name=bootstrap_function_name,
                                                    memory_size=256
                                                    )

        lambda_role = register_template_lambda.role

        lambda_role.add_to_policy(PolicyStatement(
            actions=['secretsmanager:GetResourcePolicy', 'secretsmanager:GetSecretValue',
                     'secretsmanager:DescribeSecret', 'secretsmanager:ListSecretVersionIds'],
            resources=[stack.format_arn(service='secretsmanager', resource='*')]))

        lambda_role.add_to_policy(PolicyStatement(actions=['logs:CreateLogGroup'],
                                                  resources=[stack.format_arn(service='logs', resource='*')]))
        lambda_role.add_to_policy(PolicyStatement(actions=['logs:CreateLogStream', 'logs:PutLogEvents'],
                                                  resources=[stack.format_arn(service='logs', resource='log_group',
                                                                              resource_name='/aws/lambda/' + bootstrap_function_name + ':*')]))

        artifacts_bucket_arn = 'arn:aws:s3:::' + _config.ARA_BUCKET.replace("s3://", "")
        lambda_role.add_to_policy(PolicyStatement(actions=['s3:GetObject', 's3:GetObjectVersion'],
                                                  resources=[artifacts_bucket_arn,
                                                             artifacts_bucket_arn + '/binaries/*']))

        bootstrap_lambda_provider = Provider(scope=self,
                                             id='BootstrapLambdaProvider',
                                             on_event_handler=register_template_lambda)
        CustomResource(scope=self,
                      id='ExecuteRegisterTemplate',
                      service_token=bootstrap_lambda_provider.service_token)

        self.__secrets_manager_vpc_endpoint_sg = _ec2.SecurityGroup(self, id="secrets_manager_vpc_endpoint-sg",
                                                                   vpc=self.__vpc, allow_all_outbound=None,
                                                                   description=None,
                                                                   security_group_name="secrets-manager-vpc_endpoint-sg")

        self.__secrets_manager_vpc_endpoint_sg.add_ingress_rule(self.__lambda_sg,
                                                                _ec2.Port.all_tcp()
                                                                )

        self.__security_groups_list = [self.__secrets_manager_vpc_endpoint_sg]

        self.__endpoint_service_name = 'com.amazonaws.%s.secretsmanager' % stack.region
        # Create VPC endpoint for SecretsManager
        secrets_manager_vpc_endpoint = _ec2.InterfaceVpcEndpoint(stack, "Secretsmanager VPC Endpoint",
                                                                vpc=self.__vpc,
                                                                service=_ec2.InterfaceVpcEndpointService(
                                                                    self.__endpoint_service_name, 443),
                                                                subnets=self.__subnets_selection,
                                                                security_groups=self.__security_groups_list
                                                                )
Ejemplo n.º 12
0
    def __init__(
        self,
        scope: App,
        id: str,
        envs: EnvSettings,
        components: ComponentsStack,
        base_resources: BaseResources,
    ):
        super().__init__(scope, id)

        self.db_secret_arn = Fn.import_value(
            BaseResources.get_database_secret_arn_output_export_name(envs))

        self.job_processing_queues = components.data_processing_queues
        self.vpc = base_resources.vpc
        self.db = base_resources.db

        self.app_bucket = Bucket(self, "App", versioned=True)

        if self.app_bucket.bucket_arn:
            CfnOutput(
                self,
                id="AppBucketOutput",
                export_name=self.get_app_bucket_arn_output_export_name(envs),
                value=self.app_bucket.bucket_arn,
            )

        self.pages_bucket = Bucket(self, "Pages", public_read_access=True)

        self.domain_name = StringParameter.from_string_parameter_name(
            self,
            "DomainNameParameter",
            string_parameter_name="/schema-cms-app/DOMAIN_NAME").string_value

        self.certificate_arn = StringParameter.from_string_parameter_name(
            self,
            "CertificateArnParameter",
            string_parameter_name="/schema-cms-app/CERTIFICATE_ARN"
        ).string_value

        django_secret = Secret(self,
                               "DjangoSecretKey",
                               secret_name="SCHEMA_CMS_DJANGO_SECRET_KEY")
        lambda_auth_token_secret = Secret(
            self,
            "LambdaAuthToken",
            secret_name="SCHEMA_CMS_LAMBDA_AUTH_TOKEN")

        if lambda_auth_token_secret.secret_arn:
            CfnOutput(
                self,
                id="lambdaAuthTokenArnOutput",
                export_name=self.get_lambda_auth_token_arn_output_export_name(
                    envs),
                value=lambda_auth_token_secret.secret_arn,
            )

        self.django_secret_key = EcsSecret.from_secrets_manager(django_secret)
        self.lambda_auth_token = EcsSecret.from_secrets_manager(
            lambda_auth_token_secret)

        tag_from_context = self.node.try_get_context("app_image_tag")
        tag = tag_from_context if tag_from_context != "undefined" else None

        api_image = ContainerImage.from_ecr_repository(
            repository=Repository.from_repository_name(
                self,
                id="BackendRepository",
                repository_name=BaseECR.get_backend_repository_name(envs)),
            tag=tag,
        )
        nginx_image = ContainerImage.from_ecr_repository(
            repository=Repository.from_repository_name(
                self,
                id="NginxRepository",
                repository_name=BaseECR.get_nginx_repository_name(envs)),
            tag=tag,
        )

        self.api = ApplicationLoadBalancedFargateService(
            self,
            "ApiService",
            service_name=f"{envs.project_name}-api-service",
            cluster=Cluster.from_cluster_attributes(
                self,
                id="WorkersCluster",
                cluster_name="schema-ecs-cluster",
                vpc=self.vpc,
                security_groups=[],
            ),
            task_image_options=ApplicationLoadBalancedTaskImageOptions(
                image=nginx_image,
                container_name="nginx",
                container_port=80,
                enable_logging=True,
            ),
            desired_count=1,
            cpu=512,
            memory_limit_mib=1024,
            certificate=Certificate.from_certificate_arn(
                self, "Cert", certificate_arn=self.certificate_arn),
            domain_name=self.domain_name,
            domain_zone=PrivateHostedZone(
                self,
                "zone",
                vpc=self.vpc,
                zone_name=self.domain_name,
            ),
        )

        self.api.task_definition.add_container(
            "backend",
            image=api_image,
            command=[
                "sh", "-c",
                "/bin/chamber exec $CHAMBER_SERVICE_NAME -- ./scripts/run.sh"
            ],
            logging=AwsLogDriver(stream_prefix="backend-container"),
            environment={
                "POSTGRES_DB": envs.data_base_name,
                "AWS_STORAGE_BUCKET_NAME": self.app_bucket.bucket_name,
                "AWS_STORAGE_PAGES_BUCKET_NAME": self.pages_bucket.bucket_name,
                "SQS_WORKER_QUEUE_URL":
                self.job_processing_queues[0].queue_url,
                "SQS_WORKER_EXT_QUEUE_URL":
                self.job_processing_queues[1].queue_url,
                "SQS_WORKER_MAX_QUEUE_URL":
                self.job_processing_queues[2].queue_url,
                "CHAMBER_SERVICE_NAME": "schema-cms-app",
                "CHAMBER_KMS_KEY_ALIAS": envs.project_name,
            },
            secrets={
                "DB_CONNECTION":
                EcsSecret.from_secrets_manager(
                    Secret.from_secret_arn(self,
                                           id="DbSecret",
                                           secret_arn=self.db_secret_arn)),
                "DJANGO_SECRET_KEY":
                self.django_secret_key,
                "LAMBDA_AUTH_TOKEN":
                self.lambda_auth_token,
            },
            cpu=512,
            memory_limit_mib=1024,
        )

        self.django_secret_key.grant_read(
            self.api.service.task_definition.task_role)

        self.app_bucket.grant_read_write(
            self.api.service.task_definition.task_role)
        self.pages_bucket.grant_read_write(
            self.api.service.task_definition.task_role)

        for queue in self.job_processing_queues:
            queue.grant_send_messages(
                self.api.service.task_definition.task_role)

        self.api.service.connections.allow_to(self.db.connections,
                                              Port.tcp(5432))
        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(
                actions=["ses:SendRawEmail", "ses:SendBulkTemplatedEmail"],
                resources=["*"],
            ))

        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(
                actions=[
                    "kms:Get*", "kms:Describe*", "kms:List*", "kms:Decrypt"
                ],
                resources=[
                    Fn.import_value(
                        BaseKMS.get_kms_arn_output_export_name(envs))
                ],
            ))

        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(actions=["ssm:DescribeParameters"],
                            resources=["*"]))

        self.api.task_definition.add_to_task_role_policy(
            PolicyStatement(
                actions=["ssm:GetParameters*"],
                resources=[
                    f"arn:aws:ssm:{self.region}:{self.account}:parameter/schema-cms-app/*"
                ],
            ))
Ejemplo n.º 13
0
    def __init__(self, scope: Construct, stack_id: str, *,
                 props: ServiceTierProps, **kwargs):
        """
        Initialize a new instance of ServiceTier
        :param scope: The scope of this construct.
        :param stack_id: The ID of this construct.
        :param props: The properties for this construct.
        :param kwargs: Any kwargs that need to be passed on to the parent class.
        """
        super().__init__(scope, stack_id, **kwargs)

        # A bastion host to connect to the render farm with.
        # The bastion host is for convenience (e.g. SSH into RenderQueue and WorkerFleet instances).
        # This is not a critical component of the farm, so can safely be removed.
        self.bastion = BastionHostLinux(
            self,
            'Bastion',
            vpc=props.vpc,
            subnet_selection=SubnetSelection(subnet_type=SubnetType.PUBLIC),
            block_devices=[
                BlockDevice(device_name='/dev/xvda',
                            volume=BlockDeviceVolume.ebs(50, encrypted=True))
            ])

        # Granting the bastion access to the file system mount for convenience.
        # This can also safely be removed.
        props.file_system.mount_to_linux_instance(self.bastion.instance,
                                                  location='/mnt/efs')

        recipes = ThinkboxDockerRecipes(self,
                                        'Image',
                                        stage=Stage.from_directory(
                                            props.docker_recipes_stage_path))

        repository = Repository(
            self,
            'Repository',
            vpc=props.vpc,
            version=recipes.version,
            database=props.database,
            file_system=props.file_system,
            repository_installation_timeout=Duration.minutes(20))

        server_cert = X509CertificatePem(
            self,
            'RQCert',
            subject=DistinguishedName(
                cn=f'renderqueue.{props.dns_zone.zone_name}',
                o='RFDK-Sample',
                ou='RenderQueueExternal'),
            signing_certificate=props.root_ca)

        self.render_queue = RenderQueue(
            self,
            'RenderQueue',
            vpc=props.vpc,
            version=recipes.version,
            images=recipes.render_queue_images,
            repository=repository,
            hostname=RenderQueueHostNameProps(hostname='renderqueue',
                                              zone=props.dns_zone),
            traffic_encryption=RenderQueueTrafficEncryptionProps(
                external_tls=RenderQueueExternalTLSProps(
                    rfdk_certificate=server_cert),
                internal_protocol=ApplicationProtocol.HTTPS),
            # TODO - Evaluate deletion protection for your own needs. This is set to false to
            # cleanly remove everything when this stack is destroyed. If you would like to ensure
            # that this resource is not accidentally deleted, you should set this to true.
            deletion_protection=False)
        self.render_queue.connections.allow_default_port_from(self.bastion)

        if props.ubl_licenses:
            if not props.ubl_certs_secret_arn:
                raise ValueError(
                    'UBL certificates secret ARN is required when using UBL but was not specified.'
                )
            ubl_cert_secret = Secret.from_secret_arn(
                self, 'ublcertssecret', props.ubl_certs_secret_arn)
            self.ubl_licensing = UsageBasedLicensing(
                self,
                'usagebasedlicensing',
                vpc=props.vpc,
                images=recipes.ubl_images,
                licenses=props.ubl_licenses,
                render_queue=self.render_queue,
                certificate_secret=ubl_cert_secret,
            )