Пример #1
0
def test_create_wrong_pcluster_version(region, pcluster_config_reader,
                                       pcluster_ami_without_standard_naming,
                                       clusters_factory):
    """Test error message when AMI provided was baked by a pcluster whose version is different from current version"""
    current_version = get_installed_parallelcluster_version()
    wrong_version = "2.8.1"
    logging.info("Asserting wrong_version is different from current_version")
    assert_that(current_version != wrong_version).is_true()
    # Retrieve an AMI without 'aws-parallelcluster-<version>' in its name.
    # Therefore, we can bypass the version check in CLI and test version check of .bootstrapped file in Cookbook.
    wrong_ami = pcluster_ami_without_standard_naming(wrong_version)
    cluster_config = pcluster_config_reader(custom_ami=wrong_ami)
    cluster = clusters_factory(cluster_config, raise_on_error=False)

    assert_head_node_is_running(region, cluster)
    remote_command_executor = RemoteCommandExecutor(cluster)

    logging.info("Verifying error in logs")
    assert_errors_in_logs(
        remote_command_executor,
        ["/var/log/cloud-init-output.log"],
        [
            "error_exit",
            fr"AMI was created.+{wrong_version}.+is.+used.+{current_version}"
        ],
    )
Пример #2
0
def test_create_wrong_os(region, os, pcluster_config_reader, clusters_factory,
                         architecture, request):
    """Test error message when os provide is different from the os of custom AMI"""
    # ubuntu1804 is specified in the config file but an AMI of ubuntu2004 is provided
    wrong_os = "ubuntu2004"
    logging.info("Asserting os fixture is different from wrong_os variable")
    assert_that(os != wrong_os).is_true()
    custom_ami = retrieve_latest_ami(region,
                                     wrong_os,
                                     ami_type="pcluster",
                                     architecture=architecture,
                                     request=request)
    cluster_config = pcluster_config_reader(custom_ami=custom_ami)
    cluster = clusters_factory(cluster_config, raise_on_error=False)

    assert_head_node_is_running(region, cluster)
    username = get_username_for_os(wrong_os)
    remote_command_executor = RemoteCommandExecutor(cluster, username=username)

    logging.info("Verifying error in logs")
    assert_errors_in_logs(
        remote_command_executor,
        ["/var/log/chef-client.log"],
        [
            "RuntimeError",
            fr"custom AMI.+{wrong_os}.+base.+os.+config file.+{os}"
        ],
    )
def test_scheduler_plugin_integration(
    region, os, architecture, instance, pcluster_config_reader, s3_bucket_factory, clusters_factory, test_datadir
):
    """Test usage of a custom scheduler integration."""
    logging.info("Testing plugin scheduler integration.")

    # Setup:
    # Get EC2 client
    ec2_client = boto3.client("ec2", region_name=region)
    # Create bucket and upload resources
    bucket_name = s3_bucket_factory()
    bucket = boto3.resource("s3", region_name=region).Bucket(bucket_name)
    for file in ["scheduler_plugin_infra.cfn.yaml", "artifact"]:
        bucket.upload_file(str(test_datadir / file), f"scheduler_plugin/{file}")
    # Create cluster
    cluster_config = pcluster_config_reader(
        bucket=bucket_name,
        another_instance=ANOTHER_INSTANCE_TYPE,
        user1=SCHEDULER_PLUGIN_USERS_LIST[0],
        user2=SCHEDULER_PLUGIN_USERS_LIST[1],
    )
    cluster = clusters_factory(cluster_config)
    # Verify head node is running
    assert_head_node_is_running(region, cluster)
    head_node = _get_ec2_instance_from_id(
        ec2_client, cluster.describe_cluster_instances(node_type="HeadNode")[0].get("instanceId")
    )
    # Command executor
    command_executor = RemoteCommandExecutor(cluster)
    # Start and wait for compute node to setup
    compute_node = _start_compute_node(ec2_client, region, cluster, command_executor)

    # Tests:
    # Test even handler execution
    _test_event_handler_execution(cluster, region, os, architecture, command_executor, head_node, compute_node)
    # Test artifacts are downloaded
    _test_artifacts_download(command_executor)
    # Test artifacts shared from head to compute node
    _test_artifacts_shared_from_head(command_executor, compute_node)
    # Test substack outputs
    _test_subtack_outputs(command_executor)
    # Test users are created
    _test_users(command_executor, compute_node)
    # Test user imds
    _test_imds(command_executor)
    # Test cluster configuration
    _test_cluster_config(command_executor, cluster_config)
    # Test instance types data
    _test_instance_types_data(command_executor, instance)
    # Test error log
    _test_error_log(command_executor)
    # Test computes are terminated on cluster deletion
    cluster.delete()
    _test_compute_terminated(compute_node, region)
Пример #4
0
def test_create_imds_secured(
    imds_secured, users_allow_list, region, os, pcluster_config_reader, clusters_factory, architecture
):
    """
    Test IMDS access with different configurations.
    In particular, it also verifies that IMDS access is preserved on instance reboot.
    """
    custom_ami = retrieve_latest_ami(region, os, ami_type="pcluster", architecture=architecture)
    cluster_config = pcluster_config_reader(custom_ami=custom_ami, imds_secured=imds_secured)
    cluster = clusters_factory(cluster_config, raise_on_error=False)

    assert_head_node_is_running(region, cluster)
    assert_aws_identity_access_is_correct(cluster, users_allow_list)

    reboot_head_node(cluster)

    assert_head_node_is_running(region, cluster)
    assert_aws_identity_access_is_correct(cluster, users_allow_list)