def test_build_image_wrong_pcluster_version(
    region,
    os,
    instance,
    pcluster_config_reader,
    architecture,
    pcluster_ami_without_standard_naming,
    images_factory,
    request,
):
    """Test error message when AMI provided was baked by a pcluster whose version is different from current version"""
    current_version = get_installed_parallelcluster_version()
    wrong_version = "2.8.1"
    logging.info("Asserting wrong_version is different from current_version")
    assert_that(current_version != wrong_version).is_true()
    # Retrieve an AMI without 'aws-parallelcluster-<version>' in its name.
    # Therefore, we can bypass the version check in CLI and test version check of .bootstrapped file in Cookbook.
    wrong_ami = pcluster_ami_without_standard_naming(wrong_version)

    image_config = pcluster_config_reader(config_file="image.config.yaml",
                                          parent_image=wrong_ami,
                                          instance_type=instance)
    image_id = generate_stack_name(
        "integ-tests-build-image-wrong-version",
        request.config.getoption("stackname_suffix"))

    image = images_factory(image_id, image_config, region)

    _test_build_image_failed(image)
    log_stream_name = "3.0.0/1"
    log_data = " ".join(
        log["message"]
        for log in image.get_log_events(log_stream_name)["events"])
    assert_that(log_data).matches(
        fr"AMI was created.+{wrong_version}.+is.+used.+{current_version}")
예제 #2
0
def test_create_wrong_pcluster_version(region, pcluster_config_reader,
                                       clusters_factory,
                                       pcluster_ami_without_standard_naming):
    """Test error message when AMI provided was baked by a pcluster whose version is different from current version"""
    current_version = get_installed_parallelcluster_version()
    wrong_version = "2.8.1"
    logging.info("Asserting wrong_version is different from current_version")
    assert_that(current_version != wrong_version).is_true()
    # Retrieve an AMI without 'aws-parallelcluster-<version>' in its name.
    # Therefore, we can bypass the version check in CLI and test version check of .bootstrapped file in Cookbook.
    wrong_ami = pcluster_ami_without_standard_naming(wrong_version)
    cluster_config = pcluster_config_reader(custom_ami=wrong_ami)
    cluster = clusters_factory(cluster_config, raise_on_error=False)

    _assert_head_node_is_running(region, cluster)
    remote_command_executor = RemoteCommandExecutor(cluster)

    logging.info("Verifying error in logs")
    assert_errors_in_logs(
        remote_command_executor,
        ["/var/log/cloud-init-output.log"],
        [
            "error_exit",
            fr"AMI was created.+{wrong_version}.+is.+used.+{current_version}"
        ],
    )
예제 #3
0
def test_createami_wrong_pcluster_version(
        region, instance, os, request, pcluster_config_reader, vpc_stack,
        pcluster_ami_without_standard_naming):
    """Test error message when AMI provided was baked by a pcluster whose version is different from current version"""
    cluster_config = pcluster_config_reader()
    current_version = get_installed_parallelcluster_version()
    wrong_version = "2.8.1"
    logging.info("Asserting wrong_version is different from current_version")
    assert_that(current_version != wrong_version).is_true()
    # Retrieve an AMI without 'aws-parallelcluster-<version>' in its name.
    # Therefore, we can bypass the version check in CLI and test version check of .bootstrapped file in Cookbook.
    wrong_ami = pcluster_ami_without_standard_naming(wrong_version)

    command = _compose_command(region, instance, os, request, vpc_stack,
                               wrong_ami, cluster_config)
    _createami_and_assert_error(
        command,
        fr"AMI was created.+{wrong_version}.+is.+used.+{current_version}")
예제 #4
0
def _test_create_cluster(clusters_factory, cluster_config, request):
    cluster = clusters_factory(cluster_config, wait=False)
    if request.config.getoption("cluster"):
        return cluster

    expected_creation_response = {
        "clusterName": cluster.name,
        "cloudformationStackStatus": "CREATE_IN_PROGRESS",
        "cloudformationStackArn": cluster.cfn_stack_arn,
        "region": cluster.region,
        "version": get_installed_parallelcluster_version(),
        "clusterStatus": "CREATE_IN_PROGRESS",
    }
    assert_that(cluster.creation_response.get("cluster")).is_equal_to(expected_creation_response)
    _test_list_cluster(cluster.name, "CREATE_IN_PROGRESS")
    logging.info("Waiting for CloudFormation stack creation completion")
    cloud_formation = boto3.client("cloudformation")
    waiter = cloud_formation.get_waiter("stack_create_complete")
    waiter.wait(StackName=cluster.name)
    return cluster
예제 #5
0
def test_update_compute_ami(region, os, pcluster_config_reader,
                            clusters_factory, test_datadir):
    # Create cluster with initial configuration
    ec2 = boto3.client("ec2", region)
    pcluster_ami_id = retrieve_latest_ami(region, os, ami_type="pcluster")
    init_config_file = pcluster_config_reader(
        global_custom_ami=pcluster_ami_id)
    cluster = clusters_factory(init_config_file)
    instances = cluster.get_cluster_instance_ids(node_type="Compute")
    logging.info(instances)
    _check_instance_ami_id(ec2, instances, pcluster_ami_id)

    # Update cluster with dlami as custom ami for compute queue
    # Fixme it doesn't work on release branch, fix it during release process
    filters = [{
        "Name":
        "name",
        "Values": [
            "dlami-aws-parallelcluster-" +
            get_installed_parallelcluster_version() + "-amzn2-hvm-x86_64*"
        ],
    }]
    pcluster_dlami_id = ec2.describe_images(
        ImageIds=[], Filters=filters,
        Owners=["self"]).get("Images")[0]["ImageId"]
    updated_config_file = pcluster_config_reader(
        config_file="pcluster.config.update.yaml",
        global_custom_ami=pcluster_ami_id,
        custom_ami=pcluster_dlami_id)

    # stop compute fleet before updating queue image
    cluster.stop()
    cluster.update(str(updated_config_file), force_update="true")
    instances = cluster.get_cluster_instance_ids(node_type="Compute")
    logging.info(instances)
    _check_instance_ami_id(ec2, instances, pcluster_dlami_id)