def test_build_image_wrong_pcluster_version(
    region,
    os,
    instance,
    pcluster_config_reader,
    architecture,
    pcluster_ami_without_standard_naming,
    images_factory,
    request,
):
    """Test error message when AMI provided was baked by a pcluster whose version is different from current version"""
    current_version = get_installed_parallelcluster_version()
    wrong_version = "2.8.1"
    logging.info("Asserting wrong_version is different from current_version")
    assert_that(current_version != wrong_version).is_true()
    # Retrieve an AMI without 'aws-parallelcluster-<version>' in its name.
    # Therefore, we can bypass the version check in CLI and test version check of .bootstrapped file in Cookbook.
    wrong_ami = pcluster_ami_without_standard_naming(wrong_version)

    image_config = pcluster_config_reader(config_file="image.config.yaml",
                                          parent_image=wrong_ami,
                                          instance_type=instance)
    image_id = generate_stack_name(
        "integ-tests-build-image-wrong-version",
        request.config.getoption("stackname_suffix"))

    image = images_factory(image_id, image_config, region)

    _test_build_image_failed(image)
    log_stream_name = "3.0.0/1"
    log_data = " ".join(
        log["message"]
        for log in image.get_log_events(log_stream_name)["events"])
    assert_that(log_data).matches(
        fr"AMI was created.+{wrong_version}.+is.+used.+{current_version}")
def store_secret_in_secret_manager(request, region, cfn_stacks_factory):

    secret_stack_name = generate_stack_name(
        "integ-tests-secret", request.config.getoption("stackname_suffix"))

    def _store_secret(secret):
        template = Template()
        template.set_version("2010-09-09")
        template.set_description("stack to store a secret string")
        template.add_resource(Secret("Secret", SecretString=secret))
        stack = CfnStack(
            name=secret_stack_name,
            region=region,
            template=template.to_json(),
        )
        cfn_stacks_factory.create_stack(stack)
        return stack.cfn_resources["Secret"]

    yield _store_secret

    if request.config.getoption("no_delete"):
        logging.info(
            "Not deleting stack %s because --no-delete option was specified",
            secret_stack_name)
    else:
        logging.info("Deleting stack %s", secret_stack_name)
        cfn_stacks_factory.delete_stack(secret_stack_name, region)
def hosted_zone_factory(vpc_stack, cfn_stacks_factory, request, region):
    """Create a hosted zone stack."""
    hosted_zone_stack_name = generate_stack_name(
        "integ-tests-hosted-zone",
        request.config.getoption("stackname_suffix"))
    domain_name = hosted_zone_stack_name + ".com"

    def create_hosted_zone():
        hosted_zone_template = Template()
        hosted_zone_template.set_version("2010-09-09")
        hosted_zone_template.set_description(
            "Hosted zone stack created for testing existing DNS")
        hosted_zone_template.add_resource(
            HostedZone(
                "HostedZoneResource",
                Name=domain_name,
                VPCs=[
                    HostedZoneVPCs(VPCId=vpc_stack.cfn_outputs["VpcId"],
                                   VPCRegion=region)
                ],
            ))
        hosted_zone_stack = CfnStack(
            name=hosted_zone_stack_name,
            region=region,
            template=hosted_zone_template.to_json(),
        )
        cfn_stacks_factory.create_stack(hosted_zone_stack)
        return hosted_zone_stack.cfn_resources[
            "HostedZoneResource"], domain_name

    yield create_hosted_zone

    if not request.config.getoption("no_delete"):
        cfn_stacks_factory.delete_stack(hosted_zone_stack_name, region)
Exemple #4
0
def test_custom_image(region, api_client, build_image, os, request,
                      pcluster_config_reader):
    base_ami = retrieve_latest_ami(region, os)

    config_file = pcluster_config_reader(config_file="image.config.yaml",
                                         parent_image=base_ami)
    with open(config_file, encoding="utf-8") as config_file:
        config = config_file.read()

    image_id = generate_stack_name(
        "integ-tests-build-image",
        request.config.getoption("stackname_suffix"))
    client = image_operations_api.ImageOperationsApi(api_client)

    _test_build_image(client, build_image, image_id, config)

    _test_describe_image(region, client, image_id, "BUILD_IN_PROGRESS")
    _test_list_images(region, client, image_id, "PENDING")

    # CFN stack is deleted as soon as image is available
    _cloudformation_wait(region, image_id, "stack_delete_complete")

    _test_describe_image(region, client, image_id, "BUILD_COMPLETE")
    _test_list_images(region, client, image_id, "AVAILABLE")

    _delete_image(region, client, image_id)
def api_with_default_settings(api_infrastructure_s3_uri, public_ecr_image_uri,
                              api_definition_s3_uri, request, region):
    factory = CfnStacksFactory(request.config.getoption("credential"))

    params = []
    if api_definition_s3_uri:
        params.append({
            "ParameterKey": "ApiDefinitionS3Uri",
            "ParameterValue": api_definition_s3_uri
        })
    if public_ecr_image_uri:
        params.append({
            "ParameterKey": "PublicEcrImageUri",
            "ParameterValue": public_ecr_image_uri
        })

    template = (
        api_infrastructure_s3_uri or
        f"s3://{region}-aws-parallelcluster/parallelcluster/{get_installed_parallelcluster_version()}"
        "/api/parallelcluster-api.yaml")
    logging.info(
        f"Creating API Server stack in {region} with template {template}")
    stack = CfnStack(
        name=generate_stack_name("integ-tests-api",
                                 request.config.getoption("stackname_suffix")),
        region=region,
        parameters=params,
        capabilities=["CAPABILITY_NAMED_IAM", "CAPABILITY_AUTO_EXPAND"],
        template=template,
    )
    try:
        factory.create_stack(stack)
        yield stack
    finally:
        factory.delete_all_stacks()
def custom_security_group(vpc_stack, region, request, cfn_stacks_factory):
    template = Template()
    template.set_version("2010-09-09")
    template.set_description("custom security group stack for testing additional_sg and vpc_security_group_id")
    security_group = template.add_resource(
        SecurityGroup(
            "SecurityGroupResource",
            GroupDescription="custom security group for testing additional_sg and vpc_security_group_id",
            VpcId=vpc_stack.cfn_outputs["VpcId"],
        )
    )
    template.add_resource(
        SecurityGroupIngress(
            "SecurityGroupIngressResource",
            IpProtocol="-1",
            FromPort=0,
            ToPort=65535,
            SourceSecurityGroupId=Ref(security_group),
            GroupId=Ref(security_group),
        )
    )
    stack = CfnStack(
        name=generate_stack_name("integ-tests-custom-sg", request.config.getoption("stackname_suffix")),
        region=region,
        template=template.to_json(),
    )
    cfn_stacks_factory.create_stack(stack)

    yield stack

    if not request.config.getoption("no_delete"):
        cfn_stacks_factory.delete_stack(stack.name, region)
def _write_file_into_efs(region, vpc_stack, efs_stack, request, key_name,
                         cfn_stacks_factory):
    """Write file stack contains a mount target and a instance to write a empty file with random name into the efs."""
    write_file_template = Template()
    write_file_template.set_version("2010-09-09")
    write_file_template.set_description(
        "Stack to write a file to the existing EFS")
    default_security_group_id = get_default_vpc_security_group(
        vpc_stack.cfn_outputs["VpcId"], region)
    write_file_template.add_resource(
        MountTarget(
            "MountTargetResource",
            FileSystemId=efs_stack.cfn_resources["FileSystemResource"],
            SubnetId=vpc_stack.cfn_outputs["PublicSubnetId"],
            SecurityGroups=[default_security_group_id],
        ))
    random_file_name = random_alphanumeric()
    user_data = (
        """
        #cloud-config
        package_update: true
        package_upgrade: true
        runcmd:
        - yum install -y nfs-utils
        - file_system_id_1=""" +
        efs_stack.cfn_resources["FileSystemResource"] + """
        - efs_mount_point_1=/mnt/efs/fs1
        - mkdir -p "${!efs_mount_point_1}"
        - mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,_netdev """
        +
        """"${!file_system_id_1}.efs.${AWS::Region}.${AWS::URLSuffix}:/" "${!efs_mount_point_1}"
        - touch ${!efs_mount_point_1}/""" + random_file_name + """
        - umount ${!efs_mount_point_1}
        - opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} --resource InstanceToWriteEFS --region ${AWS::Region}
        """)
    write_file_template.add_resource(
        Instance(
            "InstanceToWriteEFS",
            CreationPolicy={"ResourceSignal": {
                "Timeout": "PT10M"
            }},
            ImageId=retrieve_latest_ami(region, "alinux2"),
            InstanceType="c5.xlarge",
            SubnetId=vpc_stack.cfn_outputs["PublicSubnetId"],
            UserData=Base64(Sub(user_data)),
            KeyName=key_name,
            DependsOn=["MountTargetResource"],
        ))
    write_file_stack = CfnStack(
        name=generate_stack_name("integ-tests-efs-write-file",
                                 request.config.getoption("stackname_suffix")),
        region=region,
        template=write_file_template.to_json(),
    )
    cfn_stacks_factory.create_stack(write_file_stack)

    cfn_stacks_factory.delete_stack(write_file_stack.name, region)

    return random_file_name
def _create_nlb_stack(cfn_stacks_factory, request, directory_stack, region,
                      test_resources_dir):
    nlb_stack_template_path = os_lib.path.join(test_resources_dir,
                                               "NLB_SimpleAD.yaml")
    nlb_stack_name = generate_stack_name(
        "integ-tests-MultiUserInfraStackNLB",
        request.config.getoption("stackname_suffix"))
    logging.info("Creating stack %s", nlb_stack_name)
    # TODO: don't hardcode this ARN
    certificate_arn = "arn:aws:acm:us-east-1:447714826191:certificate/a17e8574-0cea-4d4c-8e79-a8ebb60f6f47"
    nlb_stack = None
    with open(nlb_stack_template_path) as nlb_stack_template:
        nlb_stack = CfnStack(
            name=nlb_stack_name,
            region=region,
            template=nlb_stack_template.read(),
            parameters=[
                {
                    "ParameterKey": "LDAPSCertificateARN",
                    "ParameterValue": certificate_arn,
                },
                {
                    "ParameterKey": "VPCId",
                    "ParameterValue": directory_stack.cfn_outputs["VpcId"],
                },
                {
                    "ParameterKey":
                    "SubnetId1",
                    "ParameterValue":
                    directory_stack.cfn_outputs["PrivateSubnetIds"].split(",")
                    [0],
                },
                {
                    "ParameterKey":
                    "SubnetId2",
                    "ParameterValue":
                    directory_stack.cfn_outputs["PrivateSubnetIds"].split(",")
                    [1],
                },
                {
                    "ParameterKey":
                    "SimpleADPriIP",
                    "ParameterValue":
                    directory_stack.cfn_outputs["DirectoryDnsIpAddresses"].
                    split(",")[0],
                },
                {
                    "ParameterKey":
                    "SimpleADSecIP",
                    "ParameterValue":
                    directory_stack.cfn_outputs["DirectoryDnsIpAddresses"].
                    split(",")[1],
                },
            ],
        )
    cfn_stacks_factory.create_stack(nlb_stack)
    logging.info("Creation of NLB stack %s complete", nlb_stack_name)
    return nlb_stack
 def _create_network(region, template_path, parameters):
     file_content = extract_template(template_path)
     stack = CfnStack(
         name=generate_stack_name("integ-tests-networking", request.config.getoption("stackname_suffix")),
         region=region,
         template=file_content,
         parameters=parameters,
     )
     factory.create_stack(stack)
     return stack
Exemple #10
0
def bastion_instance(vpc_stack, cfn_stacks_factory, request, region, key_name):
    """Class to create bastion instance used to execute commands on cluster in private subnet."""
    bastion_stack_name = utils.generate_stack_name(
        "integ-tests-networking-bastion",
        request.config.getoption("stackname_suffix"))

    bastion_template = Template()
    bastion_template.set_version()
    bastion_template.set_description("Create Networking bastion stack")

    bastion_sg = ec2.SecurityGroup(
        "NetworkingTestBastionSG",
        GroupDescription="SecurityGroup for Bastion",
        SecurityGroupIngress=[
            ec2.SecurityGroupRule(
                IpProtocol="tcp",
                FromPort="22",
                ToPort="22",
                CidrIp="0.0.0.0/0",
            ),
        ],
        VpcId=vpc_stack.cfn_outputs["VpcId"],
    )

    instance = ec2.Instance(
        "NetworkingBastionInstance",
        InstanceType="c5.xlarge",
        ImageId=retrieve_latest_ami(region, "alinux2"),
        KeyName=key_name,
        SecurityGroupIds=[Ref(bastion_sg)],
        SubnetId=vpc_stack.cfn_outputs["PublicSubnetId"],
    )
    bastion_template.add_resource(bastion_sg)
    bastion_template.add_resource(instance)
    bastion_template.add_output(
        Output("BastionIP",
               Value=GetAtt(instance, "PublicIp"),
               Description="The Bastion Public IP"))
    bastion_stack = CfnStack(
        name=bastion_stack_name,
        region=region,
        template=bastion_template.to_json(),
    )
    cfn_stacks_factory.create_stack(bastion_stack)
    bastion_ip = bastion_stack.cfn_outputs.get("BastionIP")
    logging.info(f"Bastion_ip: {bastion_ip}")

    yield f"ec2-user@{bastion_ip}"

    if not request.config.getoption("no_delete"):
        cfn_stacks_factory.delete_stack(bastion_stack_name, region)
def fsx_factory(vpc_stack, cfn_stacks_factory, request, region, key_name):
    """
    Define a fixture to manage the creation and destruction of fsx.

    return fsx_id
    """
    fsx_stack_name = utils.generate_stack_name(
        "integ-tests-fsx", request.config.getoption("stackname_suffix"))

    def _fsx_factory(**kwargs):
        # FSx stack
        fsx_template = Template()
        fsx_template.set_version()
        fsx_template.set_description("Create FSx stack")

        # Create security group. If using an existing file system
        # It must be associated to a security group that allows inbound TCP traffic to port 988
        fsx_sg = ec2.SecurityGroup(
            "FSxSecurityGroup",
            GroupDescription="SecurityGroup for testing existing FSx",
            SecurityGroupIngress=[
                ec2.SecurityGroupRule(
                    IpProtocol="tcp",
                    FromPort="988",
                    ToPort="988",
                    CidrIp="0.0.0.0/0",
                ),
            ],
            VpcId=vpc_stack.cfn_outputs["VpcId"],
        )

        fsx_filesystem = FileSystem(
            SecurityGroupIds=[Ref(fsx_sg)],
            SubnetIds=[vpc_stack.cfn_outputs["PublicSubnetId"]],
            **kwargs)
        fsx_template.add_resource(fsx_sg)
        fsx_template.add_resource(fsx_filesystem)
        fsx_stack = CfnStack(
            name=fsx_stack_name,
            region=region,
            template=fsx_template.to_json(),
        )
        cfn_stacks_factory.create_stack(fsx_stack)

        return fsx_stack.cfn_resources[kwargs.get("title")]

    yield _fsx_factory
    if not request.config.getoption("no_delete"):
        cfn_stacks_factory.delete_stack(fsx_stack_name, region)
def _create_vpc_stack(request, template, region, cfn_stacks_factory):
    try:
        set_credentials(region, request.config.getoption("credential"))
        if request.config.getoption("vpc_stack"):
            logging.info("Using stack {0} in region {1}".format(request.config.getoption("vpc_stack"), region))
            stack = CfnStack(name=request.config.getoption("vpc_stack"), region=region, template=template.to_json())
        else:
            stack = CfnStack(
                name=generate_stack_name("integ-tests-vpc", request.config.getoption("stackname_suffix")),
                region=region,
                template=template.to_json(),
            )
            cfn_stacks_factory.create_stack(stack)

    finally:
        unset_credentials()
    return stack
Exemple #13
0
def existing_eip(region, request, cfn_stacks_factory):
    template = Template()
    template.set_version("2010-09-09")
    template.set_description("EIP stack for testing existing EIP")
    template.add_resource(EIP("ElasticIP", Domain="vpc"))
    stack = CfnStack(
        name=generate_stack_name("integ-tests-eip",
                                 request.config.getoption("stackname_suffix")),
        region=region,
        template=template.to_json(),
    )
    cfn_stacks_factory.create_stack(stack)

    yield stack.cfn_resources["ElasticIP"]

    if not request.config.getoption("no_delete"):
        cfn_stacks_factory.delete_stack(stack.name, region)
def _build_image(images_factory, instance_profile, lambda_cleanup_role, os,
                 pcluster_config_reader, region):
    # Generate image ID
    image_id = generate_stack_name("integ-tests-build-image", "")
    # Get base AMI
    base_ami = retrieve_latest_ami(region,
                                   os,
                                   ami_type="pcluster",
                                   architecture="x86_64")
    image_config = pcluster_config_reader(
        config_file="image.config.yaml",
        parent_image=base_ami,
        instance_profile=instance_profile,
        lambda_cleanup_role=lambda_cleanup_role,
    )
    image = images_factory(image_id, image_config, region)
    return image
def test_build_image(
    region,
    instance,
    os,
    pcluster_config_reader,
    architecture,
    s3_bucket_factory,
    build_image_custom_resource,
    images_factory,
    request,
):
    """Test build image for given region and os"""
    image_id = generate_stack_name(
        "integ-tests-build-image",
        request.config.getoption("stackname_suffix"))

    # Get custom instance role
    instance_role = build_image_custom_resource(image_id=image_id)

    # Get custom S3 bucket
    bucket_name = s3_bucket_factory()

    # Get base AMI
    # remarkable AMIs are not available for ARM and ubuntu2004, centos7 yet
    if os not in ["ubuntu2004", "centos7"]:
        base_ami = retrieve_latest_ami(region,
                                       os,
                                       ami_type="remarkable",
                                       architecture=architecture)
    else:
        base_ami = retrieve_latest_ami(region, os, architecture=architecture)

    image_config = pcluster_config_reader(config_file="image.config.yaml",
                                          parent_image=base_ami,
                                          instance_role=instance_role,
                                          bucket_name=bucket_name)

    image = images_factory(image_id, image_config, region)
    _test_build_tag(image)
    _test_image_stack_events(image)
    _test_build_image_success(image)
    _test_image_tag_and_volume(image)
    _test_list_image_log_streams(image)
    _test_get_image_log_events(image)
    _test_list_images(image)
    _test_export_logs(s3_bucket_factory, image, region)
Exemple #16
0
def _test_cluster_workflow(region, api_client, create_cluster, request, pcluster_config_reader, scheduler, instance):
    if scheduler == "slurm":
        initial_config_file = pcluster_config_reader()
        updated_config_file = pcluster_config_reader("pcluster.config.update.yaml")
    else:
        vcpus = get_instance_vcpus(region, instance) * NUM_OF_COMPUTE_INSTANCES
        initial_config_file = pcluster_config_reader(vcpus=vcpus)
        updated_config_file = pcluster_config_reader("pcluster.config.update.yaml", vcpus=vcpus)

    cluster_name = generate_stack_name("integ-tests", request.config.getoption("stackname_suffix"))
    cluster_operations_client = cluster_operations_api.ClusterOperationsApi(api_client)
    cluster_compute_fleet_client = cluster_compute_fleet_api.ClusterComputeFleetApi(api_client)
    cluster_instances_client = cluster_instances_api.ClusterInstancesApi(api_client)

    cluster = _test_create_cluster(cluster_operations_client, create_cluster, cluster_name, initial_config_file)

    _test_list_clusters(region, cluster_operations_client, cluster_name, "CREATE_IN_PROGRESS")
    _test_describe_cluster(region, cluster_operations_client, cluster_name, "CREATE_IN_PROGRESS")

    _cloudformation_wait(region, cluster_name, "stack_create_complete")

    cluster.mark_as_created()

    _test_list_clusters(region, cluster_operations_client, cluster_name, "CREATE_COMPLETE")
    _test_describe_cluster(region, cluster_operations_client, cluster_name, "CREATE_COMPLETE")

    # We wait for instances to be ready before transitioning stack to CREATE_COMPLETE only when using Slurm
    if scheduler == "awsbatch":
        wait_for_num_instances_in_cluster(region=region, cluster_name=cluster_name, desired=NUM_OF_COMPUTE_INSTANCES)

    # Update cluster with new configuration
    with open(updated_config_file, encoding="utf-8") as config_file:
        updated_cluster_config = config_file.read()
    _test_update_cluster_dryrun(region, cluster_operations_client, cluster_name, updated_cluster_config)

    head_node = _test_describe_cluster_head_node(region, cluster_instances_client, cluster_name)
    compute_node_map = _test_describe_cluster_compute_nodes(region, cluster_instances_client, cluster_name)
    if scheduler == "slurm":
        _test_delete_cluster_instances(region, cluster_instances_client, cluster_name, head_node, compute_node_map)

    running_state = "RUNNING" if scheduler == "slurm" else "ENABLED"
    _test_describe_compute_fleet(region, cluster_compute_fleet_client, cluster_name, running_state)
    _test_stop_compute_fleet(region, cluster_compute_fleet_client, cluster_instances_client, cluster_name, scheduler)

    _test_delete_cluster(region, cluster_operations_client, cluster_name)
Exemple #17
0
def efs_stack(cfn_stacks_factory, request, region):
    """EFS stack contains a single efs resource."""
    efs_template = Template()
    efs_template.set_version("2010-09-09")
    efs_template.set_description("EFS stack created for testing existing EFS")
    efs_template.add_resource(FileSystem("FileSystemResource"))
    stack = CfnStack(
        name=generate_stack_name("integ-tests-efs",
                                 request.config.getoption("stackname_suffix")),
        region=region,
        template=efs_template.to_json(),
    )
    cfn_stacks_factory.create_stack(stack)

    yield stack

    if not request.config.getoption("no_delete"):
        cfn_stacks_factory.delete_stack(stack.name, region)
Exemple #18
0
def placement_group_stack(cfn_stacks_factory, request, region):
    """Placement group stack contains a placement group."""
    placement_group_template = Template()
    placement_group_template.set_version()
    placement_group_template.set_description(
        "Placement group stack created for testing existing placement group")
    placement_group_template.add_resource(
        PlacementGroup("PlacementGroup", Strategy="cluster"))
    stack = CfnStack(
        name=generate_stack_name("integ-tests-placement-group",
                                 request.config.getoption("stackname_suffix")),
        region=region,
        template=placement_group_template.to_json(),
    )
    cfn_stacks_factory.create_stack(stack)

    yield stack

    cfn_stacks_factory.delete_stack(stack.name, region)
def test_build_image_custom_components(region, os, instance, test_datadir,
                                       pcluster_config_reader, architecture,
                                       s3_bucket_factory, images_factory,
                                       request):
    """Test custom components and base AMI is ParallelCluster AMI"""
    # Custom script
    custom_script_file = "custom_script_ubuntu.sh" if os in [
        "ubuntu1804", "ubuntu2004"
    ] else "custom_script.sh"

    # Create S3 bucket for pre install scripts, to remove epel package if it is installed
    bucket_name = s3_bucket_factory()
    bucket = boto3.resource("s3", region_name=region).Bucket(bucket_name)
    bucket.upload_file(str(test_datadir / custom_script_file),
                       "scripts/custom_script.sh")

    # Get ParallelCluster AMI as base AMI
    base_ami = retrieve_latest_ami(region,
                                   os,
                                   ami_type="pcluster",
                                   architecture=architecture)

    image_id = generate_stack_name(
        "integ-tests-build-image-custom-components",
        request.config.getoption("stackname_suffix"))
    image_config = pcluster_config_reader(
        config_file="image.config.yaml",
        parent_image=base_ami,
        instance_type=instance,
        bucket_name=bucket_name,
        region=region,
    )

    image = images_factory(image_id, image_config, region)

    _test_build_image_success(image)
def _create_directory_stack(
    cfn_stacks_factory,
    request,
    directory_type,
    test_resources_dir,
    ad_admin_password,
    ad_user_password,
    bucket_name,
    region,
    vpc_stack,
):
    directory_stack_name = generate_stack_name(
        f"integ-tests-MultiUserInfraStack{directory_type}",
        request.config.getoption("stackname_suffix"))

    if directory_type not in ("MicrosoftAD", "SimpleAD"):
        raise Exception(f"Unknown directory type: {directory_type}")

    upload_custom_resources(test_resources_dir, bucket_name)
    directory_stack_template_path = os_lib.path.join(test_resources_dir,
                                                     "ad_stack.yaml")
    account_id = (boto3.client(
        "sts", region_name=region, endpoint_url=get_sts_endpoint(
            region)).get_caller_identity().get("Account"))
    config_args = {
        "region": region,
        "account": account_id,
        "admin_node_ami_id": retrieve_latest_ami(region, "alinux2"),
        "admin_node_instance_type": "c5.large",
        "admin_node_key_name": request.config.getoption("key_name"),
        "ad_admin_password": ad_admin_password,
        "ad_user_password": ad_user_password,
        "ad_domain_name": f"{directory_type.lower()}.multiuser.pcluster",
        "default_ec2_domain": "ec2.internal"
        if region == "us-east-1" else f"{region}.compute.internal",
        "ad_admin_user":
        "******" if directory_type == "SimpleAD" else "Admin",
        "num_users_to_create": 100,
        "bucket_name": bucket_name,
        "directory_type": directory_type,
    }
    logging.info("Creating stack %s", directory_stack_name)
    with open(
            render_jinja_template(directory_stack_template_path,
                                  **config_args)) as directory_stack_template:
        params = [
            {
                "ParameterKey": "Vpc",
                "ParameterValue": vpc_stack.cfn_outputs["VpcId"]
            },
            {
                "ParameterKey": "PrivateSubnetOne",
                "ParameterValue": vpc_stack.cfn_outputs["PrivateSubnetId"]
            },
            {
                "ParameterKey":
                "PrivateSubnetTwo",
                "ParameterValue":
                vpc_stack.cfn_outputs["PrivateAdditionalCidrSubnetId"],
            },
            {
                "ParameterKey": "PublicSubnetOne",
                "ParameterValue": vpc_stack.cfn_outputs["PublicSubnetId"]
            },
        ]
        directory_stack = CfnStack(
            name=directory_stack_name,
            region=region,
            template=directory_stack_template.read(),
            parameters=params,
            capabilities=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"],
        )
    cfn_stacks_factory.create_stack(directory_stack)
    logging.info("Creation of stack %s complete", directory_stack_name)
    return directory_stack
Exemple #21
0
def enable_vpc_endpoints(vpc_stack, region, cfn_stacks_factory, request):
    prefix = "cn." if region.startswith("cn-") else ""
    # Note that the endpoints service name in China is irregular.
    vpc_endpoints = [
        VPCEndpointConfig(
            name="LogsEndpoint",
            service_name=f"com.amazonaws.{region}.logs",
            type=VPCEndpointConfig.EndpointType.INTERFACE,
            enable_private_dns=True,
        ),
        VPCEndpointConfig(
            name="CFNEndpoint",
            service_name=prefix + f"com.amazonaws.{region}.cloudformation",
            type=VPCEndpointConfig.EndpointType.INTERFACE,
            enable_private_dns=True,
        ),
        VPCEndpointConfig(
            name="EC2Endpoint",
            service_name=prefix + f"com.amazonaws.{region}.ec2",
            type=VPCEndpointConfig.EndpointType.INTERFACE,
            enable_private_dns=True,
        ),
        VPCEndpointConfig(
            name="S3Endpoint",
            service_name=f"com.amazonaws.{region}.s3",
            type=VPCEndpointConfig.EndpointType.GATEWAY,
            enable_private_dns=False,
        ),
        VPCEndpointConfig(
            name="DynamoEndpoint",
            service_name=f"com.amazonaws.{region}.dynamodb",
            type=VPCEndpointConfig.EndpointType.GATEWAY,
            enable_private_dns=False,
        ),
    ]
    vpc_id = vpc_stack.cfn_outputs["VpcId"]
    subnet_id = vpc_stack.cfn_outputs["NoInternetSubnetId"]
    route_table_ids = get_route_tables(subnet_id, region)
    troposphere_template = Template()
    for vpc_endpoint in vpc_endpoints:
        vpc_endpoint_kwargs = {
            "ServiceName": vpc_endpoint.service_name,
            "PrivateDnsEnabled": vpc_endpoint.enable_private_dns,
            "VpcEndpointType": str(vpc_endpoint.type),
            "VpcId": vpc_id,
        }
        if vpc_endpoint.type == VPCEndpointConfig.EndpointType.INTERFACE:
            vpc_endpoint_kwargs["SubnetIds"] = [subnet_id]
        elif vpc_endpoint.type == VPCEndpointConfig.EndpointType.GATEWAY:
            vpc_endpoint_kwargs["RouteTableIds"] = route_table_ids
        troposphere_template.add_resource(
            VPCEndpoint(
                vpc_endpoint.name,
                **vpc_endpoint_kwargs,
            ))
    vpc_endpoints_stack = CfnStack(
        name=generate_stack_name("integ-tests-vpc-endpoints",
                                 request.config.getoption("stackname_suffix")),
        region=region,
        template=troposphere_template.to_json(),
    )

    cfn_stacks_factory.create_stack(vpc_endpoints_stack)
    yield
    if not request.config.getoption("no_delete"):
        cfn_stacks_factory.delete_stack(vpc_endpoints_stack.name, region)
    def _custom_resource(image_id):
        nonlocal stack_name_post_test
        # custom resource stack
        custom_resource_stack_name = generate_stack_name(
            "-".join([image_id, "custom", "resource"]),
            request.config.getoption("stackname_suffix"))
        stack_name_post_test = custom_resource_stack_name
        custom_resource_template = Template()
        custom_resource_template.set_version()
        custom_resource_template.set_description(
            "Create build image custom resource stack")

        # Create a instance role
        managed_policy_arns = [
            "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore",
            "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilder",
        ]

        policy_document = iam.Policy(
            PolicyName="myInstanceRoleInlinePolicy",
            PolicyDocument={
                "Statement": [{
                    "Effect":
                    "Allow",
                    "Action": [
                        "ec2:CreateTags",
                        "ec2:ModifyImageAttribute",
                        "s3:GetObject",
                        "cloudformation:ListStacks",
                    ],
                    "Resource":
                    "*",
                }]
            },
        )
        role_name = "".join(["dummyInstanceRole", generate_random_string()])
        instance_role = iam.Role(
            "CustomInstanceRole",
            AssumeRolePolicyDocument={
                "Statement": [{
                    "Effect": "Allow",
                    "Principal": {
                        "Service": ["ec2.amazonaws.com"]
                    },
                    "Action": ["sts:AssumeRole"]
                }]
            },
            Description="custom instance role for build image test.",
            ManagedPolicyArns=managed_policy_arns,
            Path="/parallelcluster/",
            Policies=[policy_document],
            RoleName=role_name,
        )

        custom_resource_template.add_resource(instance_role)
        custom_resource_stack = CfnStack(
            name=custom_resource_stack_name,
            region=region,
            template=custom_resource_template.to_json(),
            capabilities=["CAPABILITY_NAMED_IAM"],
        )
        cfn_stacks_factory.create_stack(custom_resource_stack)

        instance_role_arn = boto3.client("iam").get_role(
            RoleName=role_name).get("Role").get("Arn")
        logging.info("Custom instance role arn %s", instance_role_arn)

        return instance_role_arn