def test_invalid_config(
    region,
    instance,
    os,
    pcluster_config_reader,
    architecture,
    s3_bucket_factory,
    build_image_custom_resource,
    images_factory,
):
    # Test validation error
    arm64_ami = retrieve_latest_ami(region, os, architecture="arm64")
    image_id = f"integ-test-build-image-{generate_random_string()}"

    # Get custom S3 bucket
    bucket_name = s3_bucket_factory()
    image_config = pcluster_config_reader(config_file="image.config.yaml",
                                          parent_image=arm64_ami,
                                          bucket_name=bucket_name)
    image = images_factory(image_id,
                           image_config,
                           region,
                           raise_on_error=False,
                           log_error=False)

    assert_that(image.configuration_errors).is_length(1)
    assert_that(image.configuration_errors[0]).contains("level")
    assert_that(image.configuration_errors[0]).contains("type")
    assert_that(image.configuration_errors[0]).contains("message")
    assert_that(image.configuration_errors[0]["type"]).is_equal_to(
        "InstanceTypeBaseAMICompatibleValidator")

    # Test Suppression of a validator

    # Get base AMI -- remarkable AMIs are not available for ARM and ubuntu2004, centos7 yet
    if os not in ["ubuntu2004", "centos7"]:
        base_ami = retrieve_latest_ami(region,
                                       os,
                                       ami_type="remarkable",
                                       architecture=architecture)
    else:
        base_ami = retrieve_latest_ami(region, os, architecture=architecture)

    image_config = pcluster_config_reader(
        config_file="warnings.image.config.yaml",
        parent_image=base_ami,
        bucket_name=bucket_name)
    suppressed = images_factory(
        image_id,
        image_config,
        region,
        raise_on_error=False,
        log_error=False,
        dryrun=True,
        suppress_validators="type:UrlValidator",
    )
    assert_that(suppressed.message).contains("Request would have succeeded")
Пример #2
0
def test_custom_image(region, api_client, build_image, os, request,
                      pcluster_config_reader):
    base_ami = retrieve_latest_ami(region, os)

    config_file = pcluster_config_reader(config_file="image.config.yaml",
                                         parent_image=base_ami)
    with open(config_file, encoding="utf-8") as config_file:
        config = config_file.read()

    image_id = generate_stack_name(
        "integ-tests-build-image",
        request.config.getoption("stackname_suffix"))
    client = image_operations_api.ImageOperationsApi(api_client)

    _test_build_image(client, build_image, image_id, config)

    _test_describe_image(region, client, image_id, "BUILD_IN_PROGRESS")
    _test_list_images(region, client, image_id, "PENDING")

    # CFN stack is deleted as soon as image is available
    _cloudformation_wait(region, image_id, "stack_delete_complete")

    _test_describe_image(region, client, image_id, "BUILD_COMPLETE")
    _test_list_images(region, client, image_id, "AVAILABLE")

    _delete_image(region, client, image_id)
Пример #3
0
def test_create_wrong_os(region, os, pcluster_config_reader, clusters_factory,
                         architecture):
    """Test error message when os provide is different from the os of custom AMI"""
    # ubuntu1804 is specified in the config file but an AMI of centos7 is provided
    wrong_os = "centos7"
    logging.info("Asserting os fixture is different from wrong_os variable")
    assert_that(os != wrong_os).is_true()
    custom_ami = retrieve_latest_ami(region,
                                     wrong_os,
                                     ami_type="pcluster",
                                     architecture=architecture)
    cluster_config = pcluster_config_reader(custom_ami=custom_ami)
    cluster = clusters_factory(cluster_config, raise_on_error=False)

    _assert_head_node_is_running(region, cluster)
    username = get_username_for_os(wrong_os)
    remote_command_executor = RemoteCommandExecutor(cluster, username=username)

    logging.info("Verifying error in logs")
    assert_errors_in_logs(
        remote_command_executor,
        ["/var/log/cfn-init.log"],
        [
            "RuntimeError",
            fr"custom AMI.+{wrong_os}.+base.+os.+config file.+{os}"
        ],
    )
Пример #4
0
def test_update_compute_ami(region, os, pcluster_config_reader, ami_copy,
                            clusters_factory, test_datadir, request):
    # Create cluster with initial configuration
    ec2 = boto3.client("ec2", region)
    pcluster_ami_id = retrieve_latest_ami(region,
                                          os,
                                          ami_type="pcluster",
                                          request=request)
    init_config_file = pcluster_config_reader(
        global_custom_ami=pcluster_ami_id)
    cluster = clusters_factory(init_config_file)
    instances = cluster.get_cluster_instance_ids(node_type="Compute")
    logging.info(instances)
    _check_instance_ami_id(ec2, instances, pcluster_ami_id)

    pcluster_copy_ami_id = ami_copy(
        pcluster_ami_id,
        "-".join(["test", "update", "computenode",
                  generate_random_string()]))

    updated_config_file = pcluster_config_reader(
        config_file="pcluster.config.update.yaml",
        global_custom_ami=pcluster_ami_id,
        custom_ami=pcluster_copy_ami_id)
    # stop compute fleet before updating queue image
    cluster.stop()
    cluster.update(str(updated_config_file), force_update="true")
    instances = cluster.get_cluster_instance_ids(node_type="Compute")
    logging.info(instances)
    _check_instance_ami_id(ec2, instances, pcluster_copy_ami_id)
Пример #5
0
def test_slurm_cli_commands(
    request, scheduler, region, os, pcluster_config_reader, clusters_factory, s3_bucket_factory
):
    """Test pcluster cli commands are working."""
    # Use long scale down idle time so we know nodes are terminated by pcluster stop
    cluster_config = pcluster_config_reader(scaledown_idletime=60)
    # Using custom AMI not tagged by pcluser will generate a warning
    custom_ami = retrieve_latest_ami(region, os, ami_type="official", architecture="x86_64")
    config_file = "pcluster.config.with.warnings.yaml"
    cluster_config_with_warning = pcluster_config_reader(config_file=config_file, custom_ami=custom_ami)

    # Test below is not compatible with `--cluster` flag. Therefore, skip it if the flag is provided.
    if not request.config.getoption("cluster"):
        _test_create_with_warnings(cluster_config_with_warning, clusters_factory)

    cluster = _test_create_cluster(clusters_factory, cluster_config, request)
    _test_describe_cluster(cluster)
    _test_list_cluster(cluster.name, "CREATE_COMPLETE")

    _test_update_with_warnings(cluster_config_with_warning, cluster)
    check_status(cluster, "CREATE_COMPLETE", "running", "RUNNING")

    filters = [{}, {"node_type": "HeadNode"}, {"node_type": "Compute"}, {"queue_name": "ondemand1"}]
    for filter_ in filters:
        _test_describe_instances(cluster, **filter_)
    _test_pcluster_export_cluster_logs(s3_bucket_factory, cluster)
    check_pcluster_list_cluster_log_streams(cluster, os)
    _test_pcluster_get_cluster_log_events(cluster)
    _test_pcluster_get_cluster_stack_events(cluster)
    _test_pcluster_compute_fleet(cluster, expected_num_nodes=2)

    remote_command_executor = RemoteCommandExecutor(cluster)
    assert_no_errors_in_logs(remote_command_executor, scheduler)
Пример #6
0
def _write_file_into_efs(region, vpc_stack, efs_stack, request, key_name,
                         cfn_stacks_factory):
    """Write file stack contains a mount target and a instance to write a empty file with random name into the efs."""
    write_file_template = Template()
    write_file_template.set_version("2010-09-09")
    write_file_template.set_description(
        "Stack to write a file to the existing EFS")
    default_security_group_id = get_default_vpc_security_group(
        vpc_stack.cfn_outputs["VpcId"], region)
    write_file_template.add_resource(
        MountTarget(
            "MountTargetResource",
            FileSystemId=efs_stack.cfn_resources["FileSystemResource"],
            SubnetId=vpc_stack.cfn_outputs["PublicSubnetId"],
            SecurityGroups=[default_security_group_id],
        ))
    random_file_name = random_alphanumeric()
    user_data = (
        """
        #cloud-config
        package_update: true
        package_upgrade: true
        runcmd:
        - yum install -y nfs-utils
        - file_system_id_1=""" +
        efs_stack.cfn_resources["FileSystemResource"] + """
        - efs_mount_point_1=/mnt/efs/fs1
        - mkdir -p "${!efs_mount_point_1}"
        - mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,_netdev """
        +
        """"${!file_system_id_1}.efs.${AWS::Region}.${AWS::URLSuffix}:/" "${!efs_mount_point_1}"
        - touch ${!efs_mount_point_1}/""" + random_file_name + """
        - umount ${!efs_mount_point_1}
        - opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} --resource InstanceToWriteEFS --region ${AWS::Region}
        """)
    write_file_template.add_resource(
        Instance(
            "InstanceToWriteEFS",
            CreationPolicy={"ResourceSignal": {
                "Timeout": "PT10M"
            }},
            ImageId=retrieve_latest_ami(region, "alinux2"),
            InstanceType="c5.xlarge",
            SubnetId=vpc_stack.cfn_outputs["PublicSubnetId"],
            UserData=Base64(Sub(user_data)),
            KeyName=key_name,
            DependsOn=["MountTargetResource"],
        ))
    write_file_stack = CfnStack(
        name=generate_stack_name("integ-tests-efs-write-file",
                                 request.config.getoption("stackname_suffix")),
        region=region,
        template=write_file_template.to_json(),
    )
    cfn_stacks_factory.create_stack(write_file_stack)

    cfn_stacks_factory.delete_stack(write_file_stack.name, region)

    return random_file_name
def test_build_image(
    region,
    instance,
    os,
    pcluster_config_reader,
    architecture,
    s3_bucket_factory,
    build_image_custom_resource,
    images_factory,
    request,
):
    """Test build image for given region and os"""
    image_id = generate_stack_name(
        "integ-tests-build-image",
        request.config.getoption("stackname_suffix"))

    # Get custom instance role
    instance_role = build_image_custom_resource(image_id=image_id)

    # Get custom S3 bucket
    bucket_name = s3_bucket_factory()

    # Get base AMI
    # remarkable AMIs are not available for ARM and ubuntu2004, centos7 yet
    if os not in ["ubuntu2004", "centos7"]:
        base_ami = retrieve_latest_ami(region,
                                       os,
                                       ami_type="remarkable",
                                       architecture=architecture)
    else:
        base_ami = retrieve_latest_ami(region, os, architecture=architecture)

    image_config = pcluster_config_reader(config_file="image.config.yaml",
                                          parent_image=base_ami,
                                          instance_role=instance_role,
                                          bucket_name=bucket_name)

    image = images_factory(image_id, image_config, region)
    _test_build_tag(image)
    _test_image_stack_events(image)
    _test_build_image_success(image)
    _test_image_tag_and_volume(image)
    _test_list_image_log_streams(image)
    _test_get_image_log_events(image)
    _test_list_images(image)
    _test_export_logs(s3_bucket_factory, image, region)
Пример #8
0
def test_createami(region, os, instance, request, pcluster_config_reader,
                   vpc_stack, architecture):
    """Test createami for given region and os"""
    cluster_config = pcluster_config_reader()

    # Get base AMI
    # remarkable AMIs are not available for ARM yet
    base_ami = retrieve_latest_ami(region,
                                   os,
                                   ami_type="remarkable",
                                   architecture=architecture)

    # Networking
    vpc_id = vpc_stack.cfn_outputs["VpcId"]
    networking_args = [
        "--vpc-id", vpc_id, "--subnet-id",
        vpc_stack.cfn_outputs["PublicSubnetId"]
    ]

    # Custom Cookbook
    custom_cookbook = request.config.getoption(
        "createami_custom_chef_cookbook")
    custom_cookbook_args = [] if not custom_cookbook else [
        "-cc", custom_cookbook
    ]

    # Custom Node
    # inject PARALLELCLUSTER_NODE_URL into packer environment
    custom_node = request.config.getoption("createami_custom_node_package")
    env = None
    if custom_node:
        env = environ.copy()
        env["PARALLELCLUSTER_NODE_URL"] = custom_node

    # Instance type
    pcluster_version_result = run_command(["pcluster", "version"])
    instance_args = ([]
                     if version.parse(pcluster_version_result.stdout.strip()) <
                     version.parse("2.4.1") else ["-i", instance])

    pcluster_createami_result = run_command(
        [
            "pcluster", "createami", "-ai", base_ami, "-os", os, "-r", region,
            "-c",
            cluster_config.as_posix()
        ] + custom_cookbook_args + instance_args + networking_args,
        env=env,
        timeout=7200,
    )

    stdout_lower = pcluster_createami_result.stdout.lower()
    assert_that(stdout_lower).contains(
        "downloading https://{0}-aws-parallelcluster.s3".format(region))
    assert_that(stdout_lower).does_not_contain("chef.io/chef/install.sh")
    assert_that(stdout_lower).does_not_contain("packages.chef.io")
    assert_that(stdout_lower).contains("thank you for installing cinc client")
    assert_that(stdout_lower).contains("starting cinc client")
    assert_that(stdout_lower).does_not_contain("no custom ami created")
Пример #9
0
def bastion_instance(vpc_stack, cfn_stacks_factory, request, region, key_name):
    """Class to create bastion instance used to execute commands on cluster in private subnet."""
    bastion_stack_name = utils.generate_stack_name(
        "integ-tests-networking-bastion",
        request.config.getoption("stackname_suffix"))

    bastion_template = Template()
    bastion_template.set_version()
    bastion_template.set_description("Create Networking bastion stack")

    bastion_sg = ec2.SecurityGroup(
        "NetworkingTestBastionSG",
        GroupDescription="SecurityGroup for Bastion",
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort="22",
                ToPort="22",
                CidrIp="0.0.0.0/0",
            ),
        ],
        VpcId=vpc_stack.cfn_outputs["VpcId"],
    )

    instance = ec2.Instance(
        "NetworkingBastionInstance",
        InstanceType="c5.xlarge",
        ImageId=retrieve_latest_ami(region, "alinux2"),
        KeyName=key_name,
        SecurityGroupIds=[Ref(bastion_sg)],
        SubnetId=vpc_stack.cfn_outputs["PublicSubnetId"],
    )
    bastion_template.add_resource(bastion_sg)
    bastion_template.add_resource(instance)
    bastion_template.add_output(
        Output("BastionIP",
               Value=GetAtt(instance, "PublicIp"),
               Description="The Bastion Public IP"))
    bastion_stack = CfnStack(
        name=bastion_stack_name,
        region=region,
        template=bastion_template.to_json(),
    )
    cfn_stacks_factory.create_stack(bastion_stack)
    bastion_ip = bastion_stack.cfn_outputs.get("BastionIP")
    logging.info(f"Bastion_ip: {bastion_ip}")

    yield f"ec2-user@{bastion_ip}"

    if not request.config.getoption("no_delete"):
        cfn_stacks_factory.delete_stack(bastion_stack_name, region)
Пример #10
0
def test_createami_post_install(region, os, instance, test_datadir, request,
                                pcluster_config_reader, vpc_stack,
                                architecture):
    """Test post install script and base AMI is ParallelCluster AMI"""
    cluster_config = pcluster_config_reader()

    # Get ParallelCluster AMI as base AMI
    base_ami = retrieve_latest_ami(region,
                                   os,
                                   ami_type="pcluster",
                                   architecture=architecture)

    # Networking
    vpc_id = vpc_stack.cfn_outputs["VpcId"]
    networking_args = [
        "--vpc-id", vpc_id, "--subnet-id",
        vpc_stack.cfn_outputs["PublicSubnetId"]
    ]

    # Custom Cookbook
    custom_cookbook = request.config.getoption(
        "createami_custom_chef_cookbook")
    custom_cookbook_args = [] if not custom_cookbook else [
        "-cc", custom_cookbook
    ]

    # Instance type
    instance_args = ["-i", instance]

    # Post install script
    post_install_script_file = "post_install_ubuntu.sh" if os in [
        "ubuntu1804"
    ] else "post_install.sh"
    post_install_script = "file://{0}".format(test_datadir /
                                              post_install_script_file)
    post_install_args = ["--post-install", post_install_script]

    pcluster_createami_result = run_command(
        [
            "pcluster", "createami", "-ai", base_ami, "-os", os, "-r", region,
            "-c",
            cluster_config.as_posix()
        ] + custom_cookbook_args + instance_args + networking_args +
        post_install_args,
        timeout=7200,
    )

    stdout_lower = pcluster_createami_result.stdout.lower()
    assert_that(stdout_lower).does_not_contain("no post install script")
    assert_that(stdout_lower).does_not_contain("no custom ami created")
Пример #11
0
def _build_image(images_factory, instance_profile, lambda_cleanup_role, os,
                 pcluster_config_reader, region):
    # Generate image ID
    image_id = generate_stack_name("integ-tests-build-image", "")
    # Get base AMI
    base_ami = retrieve_latest_ami(region,
                                   os,
                                   ami_type="pcluster",
                                   architecture="x86_64")
    image_config = pcluster_config_reader(
        config_file="image.config.yaml",
        parent_image=base_ami,
        instance_profile=instance_profile,
        lambda_cleanup_role=lambda_cleanup_role,
    )
    image = images_factory(image_id, image_config, region)
    return image
Пример #12
0
def test_create_imds_secured(
    imds_secured, users_allow_list, region, os, pcluster_config_reader, clusters_factory, architecture
):
    """
    Test IMDS access with different configurations.
    In particular, it also verifies that IMDS access is preserved on instance reboot.
    """
    custom_ami = retrieve_latest_ami(region, os, ami_type="pcluster", architecture=architecture)
    cluster_config = pcluster_config_reader(custom_ami=custom_ami, imds_secured=imds_secured)
    cluster = clusters_factory(cluster_config, raise_on_error=False)

    assert_head_node_is_running(region, cluster)
    assert_aws_identity_access_is_correct(cluster, users_allow_list)

    reboot_head_node(cluster)

    assert_head_node_is_running(region, cluster)
    assert_aws_identity_access_is_correct(cluster, users_allow_list)
Пример #13
0
def test_createami_wrong_os(region, instance, os, request,
                            pcluster_config_reader, vpc_stack, architecture):
    """Test error message when os provide is different from the os of custom AMI"""
    cluster_config = pcluster_config_reader()

    # ubuntu1804 is specified in the config file but an AMI of alinux2 is provided
    wrong_os = "alinux2"
    logging.info("Asserting os fixture is different from wrong_os variable")
    assert_that(os != wrong_os).is_true()
    base_ami = retrieve_latest_ami(region,
                                   wrong_os,
                                   ami_type="pcluster",
                                   architecture=architecture)

    command = _compose_command(region, instance, os, request, vpc_stack,
                               base_ami, cluster_config)
    _createami_and_assert_error(
        command, fr"custom AMI.+{wrong_os}.+base.+os.+config file.+{os}")
def test_runtime_bake(scheduler, os, region, pcluster_config_reader, clusters_factory, test_datadir, architecture):
    """Test cluster creation with runtime bake."""
    # remarkable AMIs are not available for ARM yet
    ami_type = "remarkable" if architecture == "x86_64" else "official"
    cluster_config = pcluster_config_reader(
        custom_ami=retrieve_latest_ami(region, os, ami_type=ami_type, architecture=architecture)
    )
    cluster = clusters_factory(cluster_config)
    remote_command_executor = RemoteCommandExecutor(cluster)

    # Verify no chef.io endpoint is called in cloud-init-output log to download chef installer or chef packages"""
    # on head node
    remote_command_executor.run_remote_script(str(test_datadir / "verify_chef_download.sh"))
    # on compute
    scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor)
    result = scheduler_commands.submit_script(str(test_datadir / "verify_chef_download.sh"))
    job_id = scheduler_commands.assert_job_submitted(result.stdout)
    scheduler_commands.wait_job_completed(job_id)
    scheduler_commands.assert_job_succeeded(job_id)
    def _bastion_factory():
        """Create bastion stack."""
        bastion_template = Template()
        bastion_template.set_version()
        bastion_template.set_description("Create Networking bastion stack")

        bastion_sg = ec2.SecurityGroup(
            "NetworkingTestBastionSG",
            GroupDescription="SecurityGroup for Bastion",
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="22",
                    ToPort="22",
                    CidrIp="0.0.0.0/0",
                ),
            ],
            VpcId=vpc_stack.cfn_outputs["VpcId"],
        )

        bastion_instance = ec2.Instance(
            "NetworkingBastionInstance",
            InstanceType="c5.xlarge",
            ImageId=retrieve_latest_ami(region, "alinux2"),
            KeyName=key_name,
            SecurityGroupIds=[Ref(bastion_sg)],
            SubnetId=vpc_stack.cfn_outputs["PublicSubnetId"],
        )
        bastion_template.add_resource(bastion_sg)
        bastion_template.add_resource(bastion_instance)
        bastion_template.add_output(
            Output("BastionIP",
                   Value=GetAtt(bastion_instance, "PublicIp"),
                   Description="The Bastion Public IP"))
        bastion_stack = CfnStack(
            name=bastion_stack_name,
            region=region,
            template=bastion_template.to_json(),
        )
        cfn_stacks_factory.create_stack(bastion_stack)

        return bastion_stack.cfn_outputs.get("BastionIP")
Пример #16
0
def test_update_compute_ami(region, os, pcluster_config_reader,
                            clusters_factory, test_datadir):
    # Create cluster with initial configuration
    ec2 = boto3.client("ec2", region)
    pcluster_ami_id = retrieve_latest_ami(region, os, ami_type="pcluster")
    init_config_file = pcluster_config_reader(
        global_custom_ami=pcluster_ami_id)
    cluster = clusters_factory(init_config_file)
    instances = cluster.get_cluster_instance_ids(node_type="Compute")
    logging.info(instances)
    _check_instance_ami_id(ec2, instances, pcluster_ami_id)

    # Update cluster with dlami as custom ami for compute queue
    # Fixme it doesn't work on release branch, fix it during release process
    filters = [{
        "Name":
        "name",
        "Values": [
            "dlami-aws-parallelcluster-" +
            get_installed_parallelcluster_version() + "-amzn2-hvm-x86_64*"
        ],
    }]
    pcluster_dlami_id = ec2.describe_images(
        ImageIds=[], Filters=filters,
        Owners=["self"]).get("Images")[0]["ImageId"]
    updated_config_file = pcluster_config_reader(
        config_file="pcluster.config.update.yaml",
        global_custom_ami=pcluster_ami_id,
        custom_ami=pcluster_dlami_id)

    # stop compute fleet before updating queue image
    cluster.stop()
    cluster.update(str(updated_config_file), force_update="true")
    instances = cluster.get_cluster_instance_ids(node_type="Compute")
    logging.info(instances)
    _check_instance_ami_id(ec2, instances, pcluster_dlami_id)
def test_build_image_custom_components(region, os, instance, test_datadir,
                                       pcluster_config_reader, architecture,
                                       s3_bucket_factory, images_factory,
                                       request):
    """Test custom components and base AMI is ParallelCluster AMI"""
    # Custom script
    custom_script_file = "custom_script_ubuntu.sh" if os in [
        "ubuntu1804", "ubuntu2004"
    ] else "custom_script.sh"

    # Create S3 bucket for pre install scripts, to remove epel package if it is installed
    bucket_name = s3_bucket_factory()
    bucket = boto3.resource("s3", region_name=region).Bucket(bucket_name)
    bucket.upload_file(str(test_datadir / custom_script_file),
                       "scripts/custom_script.sh")

    # Get ParallelCluster AMI as base AMI
    base_ami = retrieve_latest_ami(region,
                                   os,
                                   ami_type="pcluster",
                                   architecture=architecture)

    image_id = generate_stack_name(
        "integ-tests-build-image-custom-components",
        request.config.getoption("stackname_suffix"))
    image_config = pcluster_config_reader(
        config_file="image.config.yaml",
        parent_image=base_ami,
        instance_type=instance,
        bucket_name=bucket_name,
        region=region,
    )

    image = images_factory(image_id, image_config, region)

    _test_build_image_success(image)
def _create_directory_stack(
    cfn_stacks_factory,
    request,
    directory_type,
    test_resources_dir,
    ad_admin_password,
    ad_user_password,
    bucket_name,
    region,
    vpc_stack,
):
    directory_stack_name = generate_stack_name(
        f"integ-tests-MultiUserInfraStack{directory_type}",
        request.config.getoption("stackname_suffix"))

    if directory_type not in ("MicrosoftAD", "SimpleAD"):
        raise Exception(f"Unknown directory type: {directory_type}")

    upload_custom_resources(test_resources_dir, bucket_name)
    directory_stack_template_path = os_lib.path.join(test_resources_dir,
                                                     "ad_stack.yaml")
    account_id = (boto3.client(
        "sts", region_name=region, endpoint_url=get_sts_endpoint(
            region)).get_caller_identity().get("Account"))
    config_args = {
        "region": region,
        "account": account_id,
        "admin_node_ami_id": retrieve_latest_ami(region, "alinux2"),
        "admin_node_instance_type": "c5.large",
        "admin_node_key_name": request.config.getoption("key_name"),
        "ad_admin_password": ad_admin_password,
        "ad_user_password": ad_user_password,
        "ad_domain_name": f"{directory_type.lower()}.multiuser.pcluster",
        "default_ec2_domain": "ec2.internal"
        if region == "us-east-1" else f"{region}.compute.internal",
        "ad_admin_user":
        "******" if directory_type == "SimpleAD" else "Admin",
        "num_users_to_create": 100,
        "bucket_name": bucket_name,
        "directory_type": directory_type,
    }
    logging.info("Creating stack %s", directory_stack_name)
    with open(
            render_jinja_template(directory_stack_template_path,
                                  **config_args)) as directory_stack_template:
        params = [
            {
                "ParameterKey": "Vpc",
                "ParameterValue": vpc_stack.cfn_outputs["VpcId"]
            },
            {
                "ParameterKey": "PrivateSubnetOne",
                "ParameterValue": vpc_stack.cfn_outputs["PrivateSubnetId"]
            },
            {
                "ParameterKey":
                "PrivateSubnetTwo",
                "ParameterValue":
                vpc_stack.cfn_outputs["PrivateAdditionalCidrSubnetId"],
            },
            {
                "ParameterKey": "PublicSubnetOne",
                "ParameterValue": vpc_stack.cfn_outputs["PublicSubnetId"]
            },
        ]
        directory_stack = CfnStack(
            name=directory_stack_name,
            region=region,
            template=directory_stack_template.read(),
            parameters=params,
            capabilities=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"],
        )
    cfn_stacks_factory.create_stack(directory_stack)
    logging.info("Creation of stack %s complete", directory_stack_name)
    return directory_stack