def __init_sections_from_cfn(self, cluster_name): try: self.cfn_stack = get_stack(get_stack_name(cluster_name)) if self.__enforce_version and get_stack_version( self.cfn_stack) != get_installed_version(): self.error( "The cluster {0} was created with a different version of ParallelCluster: {1}. " "Installed version is {2}. This operation may only be performed using the same ParallelCluster " "version used to create the cluster.".format( cluster_name, get_stack_version(self.cfn_stack), get_installed_version())) cfn_params = self.cfn_stack.get("Parameters") json_params = self.__load_json_config( self.cfn_stack) if not self.__skip_load_json_config else None cfn_tags = self.cfn_stack.get("Tags") # Infer cluster model and load cluster section accordingly cluster_model = infer_cluster_model(cfn_stack=self.cfn_stack) section = ClusterCfnSection(section_definition=cluster_model. get_cluster_section_definition(), pcluster_config=self) self.add_section(section) section.from_storage(StorageData(cfn_params, json_params, cfn_tags)) except ClientError as e: self.error( "Unable to retrieve the configuration of the cluster '{0}'.\n{1}" .format(cluster_name, e.response.get("Error").get("Message")))
def check_cluster_version(cluster: Cluster, exact_match: bool = False) -> bool: if not cluster.stack.version: return False if exact_match: return packaging.version.parse( cluster.stack.version) == packaging.version.parse( get_installed_version()) else: return (packaging.version.parse(get_installed_version()) >= packaging.version.parse( cluster.stack.version) >= packaging.version.parse("3.0.0"))
def _validate(self, custom_ami: str): tags = AWSApi.instance().ec2.describe_image(custom_ami).tags tags_dict = {} if tags: # tags can be None if there is no tag for tag in tags: tags_dict[tag["Key"]] = tag["Value"] current_version = get_installed_version() if PCLUSTER_VERSION_TAG not in tags_dict: self._add_failure( ("The custom AMI may not have been created by pcluster. " "You can ignore this warning if the AMI is shared or copied from another pcluster AMI. " "If the AMI is indeed not created by pcluster, cluster creation will fail. " "If the cluster creation fails, please go to " "https://docs.aws.amazon.com/parallelcluster/latest/ug/troubleshooting.html" "#troubleshooting-stack-creation-failures for troubleshooting." ), FailureLevel.WARNING, ) elif tags_dict[PCLUSTER_VERSION_TAG] != current_version: self._add_failure( (f"The custom AMI was created with pcluster {tags_dict[PCLUSTER_VERSION_TAG]}, " f"but is trying to be used with pcluster {current_version}. " f"Please either use an AMI created with {current_version} or" f" change your ParallelCluster to {tags_dict[PCLUSTER_VERSION_TAG]}" ), FailureLevel.ERROR, ) elif PCLUSTER_IMAGE_BUILD_STATUS_TAG not in tags_dict: self._add_failure( ("The custom AMI did not pass the tests in image builder. " "Cluster created from this AMI may have unexpected behaviors." ), FailureLevel.ERROR, )
def upload_dashboard_resource(bucket_name, artifact_directory, pcluster_config, json_params, cfn_params): params = {"json_params": json_params, "cfn_params": cfn_params} cw_dashboard_template_url = pcluster_config.get_section("cluster").get_param_value( "cw_dashboard_template_url" ) or "{bucket_url}/templates/cw-dashboard-substack-{version}.cfn.yaml".format( bucket_url=utils.get_bucket_url(pcluster_config.region), version=utils.get_installed_version(), ) try: file_contents = utils.read_remote_file(cw_dashboard_template_url) rendered_template = utils.render_template(file_contents, params, {}) except Exception as e: LOGGER.error( "Error when generating CloudWatch Dashboard template from path %s: %s", cw_dashboard_template_url, e ) raise try: boto3.client("s3").put_object( Bucket=bucket_name, Body=rendered_template, Key="{artifact_directory}/templates/cw-dashboard-substack.rendered.cfn.yaml".format( artifact_directory=artifact_directory ), ) except Exception as e: LOGGER.error("Error when uploading CloudWatch Dashboard template to bucket %s: %s", bucket_name, e)
def _get_default_template_url(region): return ( "https://{REGION}-aws-parallelcluster.s3.{REGION}.amazonaws.com{SUFFIX}/templates/" "aws-parallelcluster-{VERSION}.cfn.json".format( REGION=region, SUFFIX=".cn" if region.startswith("cn") else "", VERSION=utils.get_installed_version() ) )
def _add_imagebuilder_image_recipe(self, build_tags, components, lambda_cleanup_policy_statements): # ImageBuilderImageRecipe image_recipe_resource = imagebuilder.CfnImageRecipe( self, "ImageRecipe", name=self._build_image_recipe_name(), version=utils.get_installed_version(base_version_only=True), tags=build_tags, parent_image=self.config.build.parent_image, components=components, block_device_mappings=[ imagebuilder.CfnImageRecipe.InstanceBlockDeviceMappingProperty( device_name=self._get_root_device_name(), ebs=self._set_ebs_volume(), ) ], ) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteImageRecipe"], [ self.format_arn( service="imagebuilder", resource="image-recipe", resource_name="{0}/*".format( self._build_image_recipe_name(to_lower=True)), ) ], ) return image_recipe_resource
def get_default_instance_tags( stack_name: str, config: BaseClusterConfig, node: Union[HeadNode, BaseComputeResource], node_type: str, shared_storage_ids: dict, raw_dict: bool = False, ): """Return a list of default tags to be used for instances.""" tags = { "Name": node_type, PCLUSTER_CLUSTER_NAME_TAG: stack_name, PCLUSTER_NODE_TYPE_TAG: node_type, "parallelcluster:attributes": "{BaseOS}, {Scheduler}, {Version}, {Architecture}".format( BaseOS=config.image.os, Scheduler=config.scheduling.scheduler, Version=get_installed_version(), Architecture=node.architecture if hasattr(node, "architecture") else "NONE", ), "parallelcluster:networking": "EFA={0}".format( "true" if hasattr(node, "efa") and node.efa and node.efa.enabled else "NONE" ), "parallelcluster:filesystem": "efs={efs}, multiebs={multiebs}, raid={raid}, fsx={fsx}".format( efs=len(shared_storage_ids[SharedStorageType.EFS]), multiebs=len(shared_storage_ids[SharedStorageType.EBS]), raid=len(shared_storage_ids[SharedStorageType.RAID]), fsx=len(shared_storage_ids[SharedStorageType.FSX]), ), } if config.is_intel_hpc_platform_enabled: tags["parallelcluster:intel-hpc"] = "enable_intel_hpc_platform=true" return tags if raw_dict else [CfnTag(key=key, value=value) for key, value in tags.items()]
def _image_info_to_ami_info(image): return AmiInfo( ami_id=image.id, os=Ec2Client.extract_os_from_official_image_name(image.name), name=image.name, architecture=image.architecture, version=get_installed_version(), )
def _list_official_images_expected_response(version, os, architecture): return { "amiId": "ami-test", "os": os, "name": f"aws-parallelcluster-{version}-{OS_TO_IMAGE_NAME_PART_MAP[os]}-{architecture}-other", "architecture": architecture, "version": get_installed_version(), }
def _get_target_config_tags_list(target_config): """Construct the target config's tag list.""" # At cluster creation time we add a version tag. # Make sure that's included as well to avoid an unintended change. tags_dict = {"Version": utils.get_installed_version()} target_config_tags_dict = target_config.get_section("cluster").get_param_value("tags") if target_config_tags_dict: tags_dict.update(target_config_tags_dict) return [{"Key": tag_name, "Value": tag_value} for tag_name, tag_value in tags_dict.items()]
def _add_version_tag(self): """Add version tag to the stack.""" if self.config.tags is None: self.config.tags = [] # Remove PCLUSTER_VERSION_TAG if already exists self.config.tags = [ tag for tag in self.config.tags if tag.key != PCLUSTER_VERSION_TAG ] # Add PCLUSTER_VERSION_TAG self.config.tags.append( Tag(key=PCLUSTER_VERSION_TAG, value=get_installed_version()))
def _add_custom_components(self, components, policy_statements, components_resources): """Set custom component in imagebuilder cfn template.""" initial_components_len = len(components) arn_components_len = 0 for custom_component in self.config.build.components: custom_components_len = len( components) - initial_components_len - arn_components_len if custom_component.type == "arn": components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=custom_component.value)) arn_components_len += 1 else: component_script_name = custom_component.value.split("/")[-1] component_id = "ScriptComponent" + str(custom_components_len) custom_component_resource = imagebuilder.CfnComponent( self, component_id, name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Script-{0}".format(str(custom_components_len))), version=utils.get_installed_version( base_version_only=True), description= "This component is custom component for script, script name is {0}, script url is " "{1}".format(component_script_name, custom_component.value), platform="Linux", data=wrap_script_to_component(custom_component.value), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref(component_id))) components_resources.append(custom_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Script-{0}".format( str(custom_components_len)), to_lower=True, )), ) ], )
def _get_official_image_name_prefix(self, os, architecture): """Return the prefix of the current official image, for the provided os-architecture combination.""" suffixes = { "alinux2": "amzn2-hvm", "centos7": "centos7-hvm", "centos8": "centos8-hvm", "ubuntu1804": "ubuntu-1804-lts-hvm", } return "aws-parallelcluster-{version}-{suffix}-{arch}".format( version=get_installed_version(), suffix=suffixes[os], arch=architecture)
def _generate_artifact_dir(self): """ Generate artifact directory in S3 bucket. cluster artifact dir is generated before cfn stack creation and only generate once. artifact_directory: e.g. parallelcluster/{version}/clusters/{cluster_name}-jfr4odbeonwb1w5k """ service_directory = generate_random_name_with_prefix(self.name) self.__s3_artifact_dir = "/".join([ PCLUSTER_S3_ARTIFACTS_DICT.get("root_directory"), get_installed_version(), PCLUSTER_S3_ARTIFACTS_DICT.get("root_cluster_directory"), service_directory, ])
def _generate_artifact_dir(self): """ Generate artifact directory in S3 bucket. Image artifact dir is generated before cfn stack creation and only generate once. artifact_directory: e.g. parallelcluster/{version}/images/{image_id}-jfr4odbeonwb1w5k """ service_directory = generate_random_name_with_prefix(self.image_id) self.__s3_artifact_dir = "/".join( [ self._s3_artifacts_dict.get("root_directory"), get_installed_version(), self._s3_artifacts_dict.get("root_image_directory"), service_directory, ] )
def _add_cfn_parameters(self): if (self.config.dev_settings and self.config.dev_settings.cookbook and self.config.dev_settings.cookbook.chef_cookbook): dev_settings_cookbook_value = self.config.dev_settings.cookbook.chef_cookbook custom_chef_cookbook = ( create_s3_presigned_url(dev_settings_cookbook_value) if dev_settings_cookbook_value.startswith("s3://") else dev_settings_cookbook_value) else: custom_chef_cookbook = "" CfnParameter( self, "CfnParamCookbookVersion", type="String", default=utils.get_installed_version(), description="CookbookVersion", ) CfnParameter(self, "CfnParamChefCookbook", type="String", default=custom_chef_cookbook, description="ChefCookbook") CfnParameter(self, "CfnParamCincInstaller", type="String", default="", description="CincInstaller") CfnParameter( self, "CfnParamChefDnaJson", type="String", default=ImageBuilderExtraChefAttributes( self.config.dev_settings).dump_json(), description="ChefAttributes", ) CfnParameter( self, "CfnParamUpdateOsAndReboot", type="String", default="true" if self.config.build and self.config.build.update_os_packages and self.config.build.update_os_packages.enabled else "false", description="UpdateOsAndReboot", )
def _get_image_tags(self): """Get image tags.""" image_tags = copy.deepcopy(self.config.image.tags) if self.config.image and self.config.image.tags else [] tag_list = [ { "key": PCLUSTER_IMAGE_NAME_TAG, "value": self.config.image.name if self.config.image and self.config.image.name else self.image_id, }, {"key": PCLUSTER_VERSION_TAG, "value": utils.get_installed_version()}, {"key": PCLUSTER_IMAGE_ID_TAG, "value": self.image_id}, {"key": PCLUSTER_S3_BUCKET_TAG, "value": self.bucket.name}, {"key": PCLUSTER_S3_IMAGE_DIR_TAG, "value": self.bucket.artifact_directory}, {"key": PCLUSTER_IMAGE_BUILD_LOG_TAG, "value": self._get_log_group_arn()}, {"key": PCLUSTER_IMAGE_CONFIG_TAG, "value": self.bucket.get_config_s3_url("image-config.yaml")}, ] for tag in tag_list: image_tags.append(BaseTag(key=tag.get("key"), value=tag.get("value"))) return {tag.key: tag.value for tag in image_tags}
def update_cluster( cluster_config: str, cluster_name: str, region: str, suppress_validators: bool = False, validation_failure_level: FailureLevel = FailureLevel.ERROR, force: bool = False, ): """ Update existing cluster. :param cluster_config: cluster configuration (str) :param cluster_name: the name to assign to the cluster :param region: AWS region :param suppress_validators: bool = False, :param validation_failure_level: FailureLevel = FailureLevel.ERROR, :param force: set to True to force stack update """ try: if region: os.environ["AWS_DEFAULT_REGION"] = region # Check if stack version matches with running version. cluster = Cluster(cluster_name) installed_version = get_installed_version() if cluster.stack.version != installed_version: raise ClusterActionError( "The cluster was created with a different version of " f"ParallelCluster: {cluster.stack.version}. Installed version is {installed_version}. " "This operation may only be performed using the same ParallelCluster " "version used to create the cluster.") validator_suppressors = set() if suppress_validators: validator_suppressors.add(AllValidatorsSuppressor()) cluster.update(cluster_config, validator_suppressors, validation_failure_level, force) # TODO add dryrun return ClusterInfo(cluster.stack) except ConfigValidationError as e: return ApiFailure(str(e), validation_failures=e.validation_failures) except ClusterUpdateError as e: return ApiFailure(str(e), update_changes=e.update_changes) except Exception as e: return ApiFailure(str(e))
def _get_cfn_tags(self): """Get cfn tags.""" cfn_tags = copy.deepcopy(self.config.build.tags) or [] self.__config_url = self.bucket.get_config_s3_url(self._s3_artifacts_dict.get("config_name")) tag_list = [ { "key": PCLUSTER_IMAGE_NAME_TAG, "value": self.config.image.name if self.config.image and self.config.image.name else self.image_id, }, {"key": PCLUSTER_VERSION_TAG, "value": get_installed_version()}, {"key": PCLUSTER_IMAGE_ID_TAG, "value": self.image_id}, {"key": PCLUSTER_S3_BUCKET_TAG, "value": self.bucket.name}, {"key": PCLUSTER_S3_IMAGE_DIR_TAG, "value": self.s3_artifact_dir}, {"key": PCLUSTER_IMAGE_BUILD_LOG_TAG, "value": self._get_log_group_arn}, {"key": PCLUSTER_IMAGE_CONFIG_TAG, "value": self.config_url}, ] for tag in tag_list: cfn_tags.append(BaseTag(key=tag.get("key"), value=tag.get("value"))) return [{"Key": tag.key, "Value": tag.value} for tag in cfn_tags]
def _create_network_stack(configuration, parameters): print("Creating CloudFormation stack...") print("Do not leave the terminal until the process has finished") stack_name = "parallelclusternetworking-{0}{1}".format( configuration.stack_name_prefix, TIMESTAMP) try: cfn_client = boto3.client("cloudformation") stack = cfn_client.create_stack( StackName=stack_name, TemplateURL=get_templates_bucket_path() + "networking/%s-%s.cfn.json" % (configuration.template_name, get_installed_version()), Parameters=parameters, Capabilities=["CAPABILITY_IAM"], ) print("Stack Name: %s (id: %s)", stack_name, stack.get("StackId")) if not verify_stack_status(stack_name, waiting_states=["CREATE_IN_PROGRESS"], successful_states=["CREATE_COMPLETE"]): print("Could not create the network configuration") sys.exit(0) print() print("The stack has been created") return AWSApi.instance().cfn.describe_stack(stack_name).get("Outputs") except KeyboardInterrupt: print() print( "Unable to update the configuration file with the selected network configuration. " "Please manually check the status of the CloudFormation stack: %s", stack_name, ) sys.exit(0) except Exception as e: # Any exception is a problem print() print( "An exception occured while creating the CloudFormation stack: %s. " "For details please check log file: %s", stack_name, get_cli_log_file(), ) LOGGER.critical(e) sys.exit(1)
def test_get_official_images(boto3_stubber, os, architecture, boto3_response, expected_response, error_message): filter_version = get_installed_version() filter_os = OS_TO_IMAGE_NAME_PART_MAP[os] if os else "*" filter_arch = architecture or "*" expected_params = { "Filters": [ { "Name": "name", "Values": [ f"aws-parallelcluster-{filter_version}-{filter_os}-{filter_arch}*" ] }, ], "ImageIds": [], "Owners": ["amazon"], } mocked_requests = [ MockedBoto3Request( method="describe_images", expected_params=expected_params, response=str(boto3_response) if isinstance(boto3_response, Exception) else boto3_response, generate_error=isinstance(boto3_response, Exception), ) ] boto3_stubber("ec2", mocked_requests) if error_message: with pytest.raises(AWSClientError, match=error_message): Ec2Client().get_official_images(os, architecture) else: response = Ec2Client().get_official_images(os, architecture) with soft_assertions(): assert_that(len(response)).is_equal_to(len(expected_response)) for i in range(len(response)): assert_that(response[i].name).is_equal_to( expected_response[i].name)
def _evaluate_tags(pcluster_config, preferred_tags=None): """ Merge given tags to the ones defined in the configuration file and convert them into the Key/Value format. :param pcluster_config: PclusterConfig, it can contain tags :param preferred_tags: tags that must take the precedence before the configured ones :return: a merge of the tags + version tag """ tags = {} configured_tags = pcluster_config.get_section("cluster").get_param_value("tags") if configured_tags: tags.update(configured_tags) if preferred_tags: # add tags from command line parameter, by overriding configured ones tags.update(preferred_tags) # add pcluster version tags["Version"] = utils.get_installed_version() # convert to CFN tags return [{"Key": tag, "Value": tags[tag]} for tag in tags]
def upload_hit_resources(bucket_name, artifact_directory, pcluster_config, json_params, tags=None): if tags is None: tags = [] hit_template_url = pcluster_config.get_section("cluster").get_param_value( "hit_template_url" ) or "{bucket_url}/templates/compute-fleet-hit-substack-{version}.cfn.yaml".format( bucket_url=utils.get_bucket_url(pcluster_config.region), version=utils.get_installed_version() ) s3_client = boto3.client("s3") try: result = s3_client.put_object( Bucket=bucket_name, Body=json.dumps(json_params), Key="{artifact_directory}/configs/cluster-config.json".format(artifact_directory=artifact_directory), ) file_contents = utils.read_remote_file(hit_template_url) rendered_template = utils.render_template(file_contents, json_params, tags, result.get("VersionId")) except ClientError as client_error: LOGGER.error("Error when uploading cluster configuration file to bucket %s: %s", bucket_name, client_error) raise except Exception as e: LOGGER.error("Error when generating CloudFormation template from url %s: %s", hit_template_url, e) raise try: s3_client.put_object( Bucket=bucket_name, Body=rendered_template, Key="{artifact_directory}/templates/compute-fleet-hit-substack.rendered.cfn.yaml".format( artifact_directory=artifact_directory ), ) except Exception as e: LOGGER.error("Error when uploading CloudFormation template to bucket %s: %s", bucket_name, e) raise
def version(): return utils.get_installed_version()
def create_cluster( create_cluster_request_content: Dict, region: str = None, suppress_validators: List[str] = None, validation_failure_level: str = None, dryrun: bool = None, rollback_on_failure: bool = None, ) -> CreateClusterResponseContent: """ Create a managed cluster in a given region. :param create_cluster_request_content: :type create_cluster_request_content: dict | bytes :param region: AWS Region that the operation corresponds to. :type region: str :param suppress_validators: Identifies one or more config validators to suppress. Format: (ALL|type:[A-Za-z0-9]+) :param validation_failure_level: Min validation level that will cause the cluster creation to fail. (Defaults to 'ERROR'.) :param dryrun: Only perform request validation without creating any resource. May be used to validate the cluster configuration. (Defaults to 'false'.) :type dryrun: bool :param rollback_on_failure: When set it automatically initiates a cluster stack rollback on failures. (Defaults to 'true'.) :type rollback_on_failure: bool """ # Set defaults rollback_on_failure = rollback_on_failure in {True, None} validation_failure_level = validation_failure_level or ValidationLevel.ERROR dryrun = dryrun is True create_cluster_request_content = CreateClusterRequestContent.from_dict(create_cluster_request_content) cluster_config = create_cluster_request_content.cluster_configuration if not cluster_config: LOGGER.error("Failed: configuration is required and cannot be empty") raise BadRequestException("configuration is required and cannot be empty") try: cluster = Cluster(create_cluster_request_content.cluster_name, cluster_config) if dryrun: ignored_validation_failures = cluster.validate_create_request( get_validator_suppressors(suppress_validators), FailureLevel[validation_failure_level] ) validation_messages = validation_results_to_config_validation_errors(ignored_validation_failures) raise DryrunOperationException(validation_messages=validation_messages or None) stack_id, ignored_validation_failures = cluster.create( disable_rollback=not rollback_on_failure, validator_suppressors=get_validator_suppressors(suppress_validators), validation_failure_level=FailureLevel[validation_failure_level], ) return CreateClusterResponseContent( ClusterInfoSummary( cluster_name=create_cluster_request_content.cluster_name, cloudformation_stack_status=CloudFormationStackStatus.CREATE_IN_PROGRESS, cloudformation_stack_arn=stack_id, region=os.environ.get("AWS_DEFAULT_REGION"), version=get_installed_version(), cluster_status=cloud_formation_status_to_cluster_status(CloudFormationStackStatus.CREATE_IN_PROGRESS), ), validation_messages=validation_results_to_config_validation_errors(ignored_validation_failures) or None, ) except ConfigValidationError as e: config_validation_messages = validation_results_to_config_validation_errors(e.validation_failures) or None raise CreateClusterBadRequestException( CreateClusterBadRequestExceptionResponseContent( configuration_validation_errors=config_validation_messages, message=str(e) ) )
"Ec2ImageBuilderArn", "Value": "arn:aws:imagebuilder:us-east-1:xxxxxxxxxxxx:image/parallelclusterimagerecipe-" "87ofwi610f0aiktu/2.10.1/1", }, { "Key": "parallelcluster:bootstrap_file", "Value": "aws-parallelcluster-cookbook-2.10.1" }, { "Key": "parallelcluster:nvidia", "Value": "nvidia-450.80.02" }, { "Key": "parallelcluster:version", "Value": get_installed_version() }, { "Key": "parallelcluster:pmix", "Value": "pmix-3.1.5" }, { "Key": "parallelcluster:efa_openmpi40_aws", "Value": "openmpi40-aws-4.0.5-1.amzn2.x86_64" }, { "Key": "parallelcluster:kernel", "Value": "4.14.203-156.332.amzn2.x86_64" }, { "Key": "parallelcluster:dcv_xdcv",
def _add_imagebuilder_components(self, build_tags, lambda_cleanup_policy_statements): imagebuilder_resources_dir = os.path.join( imagebuilder_utils.get_resources_directory(), "imagebuilder") # ImageBuilderComponents components = [] components_resources = [] if self.config.build and self.config.build.update_os_packages and self.config.build.update_os_packages.enabled: update_os_component_resource = imagebuilder.CfnComponent( self, "UpdateOSComponent", name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-UpdateOS"), version=utils.get_installed_version(base_version_only=True), tags=build_tags, description="Update OS and Reboot", platform="Linux", data=Fn.sub( _load_yaml(imagebuilder_resources_dir, "update_and_reboot.yaml")), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref("UpdateOSComponent"))) components_resources.append(update_os_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-UpdateOS", to_lower=True)), ) ], ) disable_pcluster_component = ( self.config.dev_settings.disable_pcluster_component if self.config.dev_settings and self.config.dev_settings.disable_pcluster_component else False) if not disable_pcluster_component: parallelcluster_component_resource = imagebuilder.CfnComponent( self, "ParallelClusterComponent", name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX), version=utils.get_installed_version(base_version_only=True), tags=build_tags, description="Install ParallelCluster software stack", platform="Linux", data=Fn.sub( _load_yaml(imagebuilder_resources_dir, "parallelcluster.yaml")), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref("ParallelClusterComponent"))) components_resources.append(parallelcluster_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX, to_lower=True)), ) ], ) tag_component_resource = imagebuilder.CfnComponent( self, "ParallelClusterTagComponent", name=self._build_resource_name(IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Tag"), version=utils.get_installed_version(base_version_only=True), tags=build_tags, description="Tag ParallelCluster AMI", platform="Linux", data=_load_yaml(imagebuilder_resources_dir, "parallelcluster_tag.yaml"), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref("ParallelClusterTagComponent"))) components_resources.append(tag_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Tag", to_lower=True)), ) ], ) if self.config.build.components: self._add_custom_components(components, lambda_cleanup_policy_statements, components_resources) disable_validate_and_test_component = ( self.config.dev_settings.disable_validate_and_test if self.config.dev_settings and self.config.dev_settings.disable_validate_and_test else False) if not disable_pcluster_component and not disable_validate_and_test_component: validate_component_resource = imagebuilder.CfnComponent( self, id="ParallelClusterValidateComponent", name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Validate"), version=utils.get_installed_version(base_version_only=True), tags=build_tags, description="Validate ParallelCluster AMI", platform="Linux", data=_load_yaml(imagebuilder_resources_dir, "parallelcluster_validate.yaml"), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref("ParallelClusterValidateComponent"))) components_resources.append(validate_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Validate", to_lower=True)), ) ], ) test_component_resource = imagebuilder.CfnComponent( self, id="ParallelClusterTestComponent", name=self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Test"), version=utils.get_installed_version(base_version_only=True), tags=build_tags, description="Test ParallelCluster AMI", platform="Linux", data=_load_yaml(imagebuilder_resources_dir, "parallelcluster_test.yaml"), ) components.append( imagebuilder.CfnImageRecipe.ComponentConfigurationProperty( component_arn=Fn.ref("ParallelClusterTestComponent"))) components_resources.append(test_component_resource) if not self.custom_cleanup_lambda_role: self._add_resource_delete_policy( lambda_cleanup_policy_statements, ["imagebuilder:DeleteComponent"], [ self.format_arn( service="imagebuilder", resource="component", resource_name="{0}/*".format( self._build_resource_name( IMAGEBUILDER_RESOURCE_NAME_PREFIX + "-Test", to_lower=True)), ) ], ) return components, components_resources
def _get_official_image_name_prefix(os=None, architecture=None): """Return the prefix of the current official image, for the provided os-architecture combination.""" version = utils.get_installed_version() os = "*" if os is None else OS_TO_IMAGE_NAME_PART_MAP.get(os, "") architecture = architecture or "*" return f"aws-parallelcluster-{version}-{os}-{architecture}"
def execute( # noqa: D102 self, args: argparse.Namespace, extra_args: List[str] # pylint: disable=unused-argument ) -> None: print_json({"version": utils.get_installed_version()})
def test_get_cluster_ami_id( mocker, boto3_stubber, custom_ami_id, os, architecture, expected_ami_suffix, expected_public_ami_id, expected_self_ami_id, expected_error_message, raise_boto3_error, ): if not custom_ami_id: # Expected request for public ami mocked_requests = [ MockedBoto3Request( method="describe_images", response={ "Images": [{ "Architecture": architecture, "CreationDate": "2020-12-22T13:30:33.000Z", "ImageId": expected_public_ami_id, }] if expected_public_ami_id else [] }, expected_params={ "Filters": [ { "Name": "name", "Values": [ "aws-parallelcluster-{version}-{suffix}*". format(version=get_installed_version(), suffix=expected_ami_suffix) ], }, { "Name": "is-public", "Values": ["true"] }, ], "Owners": ["amazon"], }, generate_error=raise_boto3_error, ) ] if not expected_public_ami_id and not raise_boto3_error: # Expected request for self ami mocked_requests.append( MockedBoto3Request( method="describe_images", response={ "Images": [{ "Architecture": architecture, "CreationDate": "2020-12-22T13:30:33.000Z", "ImageId": expected_self_ami_id, }] if expected_self_ami_id else [] }, expected_params={ "Filters": [ { "Name": "name", "Values": [ "aws-parallelcluster-{version}-{suffix}*". format(version=get_installed_version(), suffix=expected_ami_suffix) ], }, ], "Owners": ["self"], }, generate_error=raise_boto3_error, )) boto3_stubber("ec2", mocked_requests) pcluster_config = get_mocked_pcluster_config(mocker) pcluster_config.get_section("cluster").get_param( "custom_ami").value = custom_ami_id pcluster_config.get_section("cluster").get_param("base_os").value = os pcluster_config.get_section("cluster").get_param( "architecture").value = architecture if expected_error_message: with pytest.raises(SystemExit, match=expected_error_message): _ = pcluster_config.cluster_model._get_cluster_ami_id( pcluster_config) else: expected_ami_id = expected_public_ami_id if expected_public_ami_id else expected_self_ami_id cluster_ami_id = pcluster_config.cluster_model._get_cluster_ami_id( pcluster_config) assert_that(cluster_ami_id).is_equal_to(expected_ami_id)