Esempio n. 1
0
def _assert_root_volume_configuration(cluster, os, region, scheduler):
    logging.info("Testing root volume type, iops, throughput.")

    # Test root volume of head node
    head_node = cluster.cfn_resources["HeadNode"]
    if utils.dict_has_nested_key(cluster.config,
                                 ("HeadNode", "LocalStorage", "RootVolume")):
        logging.info("Checking head node root volume settings")
        root_volume_id = utils.get_root_volume_id(head_node, region, os)
        expected_settings = cluster.config["HeadNode"]["LocalStorage"][
            "RootVolume"]
        _assert_volume_configuration(expected_settings, root_volume_id, region)
    if scheduler == "slurm":
        # Only if the scheduler is slurm, root volumes both on compute can be configured
        instance_ids = cluster.get_cluster_instance_ids()
        for instance in instance_ids:
            if instance == head_node:
                # head node is already checked
                continue
            root_volume_id = utils.get_root_volume_id(instance, region, os)
            if utils.dict_has_nested_key(
                    cluster.config,
                ("Scheduling", "SlurmQueues", 0, "ComputeSettings",
                 "LocalStorage", "RootVolume")):
                logging.info("Checking compute node root volume settings")
                expected_settings = cluster.config["Scheduling"][
                    "SlurmQueues"][0]["ComputeSettings"]["LocalStorage"][
                        "RootVolume"]
                _assert_volume_configuration(expected_settings, root_volume_id,
                                             region)
Esempio n. 2
0
def _test_root_volume_encryption(cluster, os, region, scheduler, encrypted):
    logging.info("Testing root volume encryption.")
    if scheduler == "slurm":
        # If the scheduler is slurm, root volumes both on head and compute can be encrypted
        instance_ids = cluster.get_cluster_instance_ids()
        for instance in instance_ids:
            root_volume_id = utils.get_root_volume_id(instance, region, os)
            _test_ebs_encrypted_with_kms(root_volume_id, region, encrypted=encrypted)
    else:
        # If the scheduler is awsbatch, only the headnode root volume can be encrypted.
        root_volume_id = utils.get_root_volume_id(cluster.cfn_resources["HeadNode"], region, os)
        _test_ebs_encrypted_with_kms(root_volume_id, region, encrypted=encrypted)
def test_retain_on_deletion(pcluster_config_reader, clusters_factory, region,
                            os):
    cluster_config = pcluster_config_reader()
    cluster = clusters_factory(cluster_config)

    stack_arn = cluster.cfn_stack_arn
    retained_volume = cluster.cfn_resources["EBS0"]
    head_node_instance_id = get_head_node_instance_id(cluster)
    head_node_root_volume = get_root_volume_id(head_node_instance_id, region,
                                               os)
    compute_node_instance_ids = get_compute_nodes_instance_ids(
        cluster.name, region)
    logging.info("Checking at least one compute node is running")
    assert_that(len(compute_node_instance_ids)).is_greater_than_or_equal_to(1)
    compute_root_volumes = []
    for compute_node in compute_node_instance_ids:
        compute_root_volumes.append(
            get_root_volume_id(compute_node, region, os))
    logging.info("Compute root volume %s", compute_root_volumes)

    ec2_client = boto3.client("ec2")
    logging.info(
        "Checking no snapshot with the tag of stack id is created before stack deletion"
    )
    snapshots = _get_snapshots(ec2_client, stack_arn)
    assert_that(snapshots).is_length(0)

    cluster.delete()

    logging.info(
        "Checking a snapshot with the tag of stack id is created after stack deletion"
    )
    snapshots = _get_snapshots(ec2_client, stack_arn)
    assert_that(snapshots).is_length(1)

    logging.info("Checking retained volume after stack deletion")
    _check_volume(ec2_client, retained_volume)

    logging.info(
        "Checking retained head node root volume after stack deletion")
    _check_volume(ec2_client, head_node_root_volume)

    logging.info(
        "Checking compute node root volumes are deleted after stack deletion")
    with pytest.raises(ClientError, match="InvalidVolume.NotFound"):
        ec2_client.describe_volumes(VolumeIds=compute_root_volumes)["Volumes"]
Esempio n. 4
0
def get_compute_node_root_volume_tags(cluster, os):
    """Return the given cluster's compute node's root volume's tags."""
    compute_nodes = cluster.get_cluster_instance_ids(node_type="Compute")
    assert_that(compute_nodes).is_length(1)
    root_volume_id = get_root_volume_id(compute_nodes[0], cluster.region, os)
    return get_tags_for_volume(root_volume_id, cluster.region)
Esempio n. 5
0
def get_head_node_root_volume_tags(cluster, os):
    """Return the given cluster's head node's root volume's tags."""
    head_node_instance_id = get_head_node_instance_id(cluster)
    root_volume_id = get_root_volume_id(head_node_instance_id, cluster.region,
                                        os)
    return get_tags_for_volume(root_volume_id, cluster.region)