Пример #1
0
 def _validate(self, os, scheduler):
     supported_os = get_supported_os_for_scheduler(scheduler)
     if os not in supported_os:
         self._add_failure(
             f"{scheduler} scheduler supports the following operating systems: {supported_os}.",
             FailureLevel.ERROR,
         )
Пример #2
0
 def prompt_os(self):
     """Ask for os, if necessary."""
     if not self.is_aws_batch:
         self.base_os = prompt_iterable(
             "Operating System",
             get_supported_os_for_scheduler(self.scheduler),
             default_value=self.cluster_section.get_param_value("base_os"),
         )
Пример #3
0
def test_get_supported_os_for_scheduler(scheduler, supported_oses):
    """Verify that the expected OSes are supported based on a given architecture."""
    assert_that(utils.get_supported_os_for_scheduler(scheduler)).contains_only(
        *supported_oses).does_not_contain_duplicates()
Пример #4
0
def configure(args):  # noqa: C901

    config_file_path = args.config
    # Check for invalid path (eg. a directory)
    if os.path.exists(config_file_path):
        error(
            f"A file/folder exists at {config_file_path}. Please specify a different file path"
        )

    print(f"INFO: Configuration file {config_file_path} will be written.")
    print("Press CTRL-C to interrupt the procedure.\n\n")

    if not args.region:
        # Use built in boto regions as an available option
        available_regions = get_regions()
        aws_region_name = prompt_iterable(
            "AWS Region ID",
            available_regions,
            default_value=boto3.session.Session().region_name)
        # Set provided region into os environment for suggestions and validations from here on
        os.environ["AWS_DEFAULT_REGION"] = aws_region_name
    else:
        os.environ["AWS_DEFAULT_REGION"] = args.region

    # Get the key name from the current region, if any
    available_keys = _get_keys()
    key_name = prompt_iterable("EC2 Key Pair Name", available_keys)

    scheduler = prompt_iterable("Scheduler", SUPPORTED_SCHEDULERS)

    if scheduler == "awsbatch":
        base_os = "alinux2"
    else:
        base_os = prompt_iterable("Operating System",
                                  get_supported_os_for_scheduler(scheduler))

    default_instance_type = AWSApi.instance().ec2.get_default_instance_type()
    head_node_instance_type = prompt(
        "Head node instance type",
        lambda x: x in AWSApi.instance().ec2.list_instance_types() and
        (  # pcluster doesn't support CentOS7 with ARM
            base_os != "centos7" or AWSApi.instance().ec2.
            get_instance_type_info(x).supported_architecture()[0] == "x86_64"),
        default_value=default_instance_type,
    )
    if scheduler == "awsbatch":
        number_of_queues = 1
        size_name = "vCPU"
    else:
        number_of_queues = int(
            prompt(
                "Number of queues",
                lambda x: str(x).isdigit() and int(x) >= 1 and int(x) <=
                MAX_NUMBER_OF_QUEUES,
                default_value=1,
            ))
        size_name = "instance count"

    queues = []
    queue_names = []
    compute_instance_types = []
    cluster_size = 0  # Sum of maximum count through all the compute resources
    for queue_index in range(number_of_queues):
        while True:
            queue_name = prompt(
                f"Name of queue {queue_index+1}",
                validator=lambda x: len(NameValidator().execute(x)) == 0,
                default_value=f"queue{queue_index+1}",
            )
            if queue_name not in queue_names:
                break
            print(
                f"Error: The name {queue_name} cannot be used for multiple queues. Please insert a different queue "
                "name.")

        if scheduler == "awsbatch":
            number_of_compute_resources = 1
        else:
            number_of_compute_resources = int(
                prompt(
                    f"Number of compute resources for {queue_name}",
                    validator=lambda x: str(x).isdigit() and int(x) >= 1 and
                    int(x) <= MAX_NUMBER_OF_COMPUTE_RESOURCES,
                    default_value=1,
                ))
        compute_resources = []
        for compute_resource_index in range(number_of_compute_resources):
            if scheduler != "awsbatch":
                while True:
                    compute_instance_type = prompt(
                        f"Compute instance type for compute resource {compute_resource_index+1} in {queue_name}",
                        validator=lambda x: x in AWSApi.instance().ec2.
                        list_instance_types(),
                        default_value=default_instance_type,
                    )
                    if compute_instance_type not in [
                            compute_resource["InstanceType"]
                            for compute_resource in compute_resources
                    ]:
                        break
                    print(
                        f"Error: Instance type {compute_instance_type} cannot be specified for multiple compute "
                        "resources in the same queue. Please insert a different instance type."
                    )
                compute_resource_name = re.sub(r"[^A-Za-z0-9]", "",
                                               compute_instance_type)
            min_cluster_size = DEFAULT_MIN_COUNT
            max_cluster_size = int(
                prompt(
                    "Maximum {0}".format(size_name),
                    validator=lambda x: str(x).isdigit() and int(x) >=
                    min_cluster_size,  # pylint: disable=W0640
                    default_value=DEFAULT_MAX_COUNT,
                ))
            if scheduler == "awsbatch":
                compute_resources.append({
                    "Name": "optimal",
                    "InstanceTypes": ["optimal"],
                    "MinvCpus": min_cluster_size,
                    "DesiredvCpus": min_cluster_size,
                    "MaxvCpus": max_cluster_size,
                })
            else:
                compute_resources.append({
                    "Name": compute_resource_name,
                    "InstanceType": compute_instance_type,
                    "MinCount": min_cluster_size,
                    "MaxCount": max_cluster_size,
                })
                compute_instance_types.append(compute_instance_type)

            queue_names.append(queue_name)
            cluster_size += max_cluster_size  # Fixme: is it the right calculation for awsbatch?
        queues.append({
            "Name": queue_name,
            "ComputeResources": compute_resources
        })

    vpc_parameters = _create_vpc_parameters(scheduler, head_node_instance_type,
                                            compute_instance_types,
                                            cluster_size)

    # Here is the end of prompt. Code below assembles config and write to file
    for queue in queues:
        queue["Networking"] = {
            "SubnetIds": [vpc_parameters["compute_subnet_id"]]
        }

    head_node_config = {
        "InstanceType": head_node_instance_type,
        "Networking": {
            "SubnetId": vpc_parameters["head_node_subnet_id"]
        },
        "Ssh": {
            "KeyName": key_name
        },
    }
    if scheduler == "awsbatch":
        scheduler_prefix = "AwsBatch"
        head_node_config["Imds"] = {"Secured": False}
    else:
        scheduler_prefix = scheduler.capitalize()

    result = {
        "Region": os.environ.get("AWS_DEFAULT_REGION"),
        "Image": {
            "Os": base_os
        },
        "HeadNode": head_node_config,
        "Scheduling": {
            "Scheduler": scheduler,
            f"{scheduler_prefix}Queues": queues
        },
    }

    _write_configuration_file(config_file_path, result)
    print(
        "You can edit your configuration file or simply run 'pcluster create-cluster --cluster-configuration "
        f"{config_file_path} --cluster-name cluster-name --region {get_region()}' to create your cluster."
    )