class CookbookSchema(BaseSchema): """Represent the schema of cookbook.""" chef_cookbook = fields.Str( metadata={ "update_policy": UpdatePolicy( UpdatePolicy.UNSUPPORTED, fail_reason=UpdatePolicy.FAIL_REASONS["cookbook_update"] ) } ) extra_chef_attributes = fields.Str( metadata={ "update_policy": UpdatePolicy( UpdatePolicy.UNSUPPORTED, fail_reason=UpdatePolicy.FAIL_REASONS["cookbook_update"] ) } ) @post_load() def make_resource(self, data, **kwargs): """Generate resource.""" return Cookbook(**data) @validates("extra_chef_attributes") def validate_extra_chef_attributes(self, value): """Validate json.""" # TODO: double check the allowed pattern for extra chef attribute if value and not validate_json_format(value): raise ValidationError(message="'{0}' is invalid".format(value))
def _test_more_target_sections(base_conf, target_conf): # Remove an ebs section into the base conf assert_that(base_conf.get_section("ebs", "ebs-1")).is_not_none() base_conf.remove_section("ebs", "ebs-1") assert_that(base_conf.get_section("ebs", "ebs-1")).is_none() # The patch must show 2 differences: one for ebs_settings and one for missing ebs section in base conf _check_patch( base_conf, target_conf, [ Change( "cluster", "default", "ebs_settings", "ebs-2", "ebs-1,ebs-2", UpdatePolicy( UpdatePolicy.UNSUPPORTED, fail_reason="EBS sections cannot be added or removed during a 'pcluster update' operation", ), ), Change("ebs", "ebs-1", "shared_dir", "-", "vol1", UpdatePolicy(UpdatePolicy.SUPPORTED)), Change("ebs", "ebs-1", "volume_size", "-", 20, UpdatePolicy(UpdatePolicy.SUPPORTED)), ], UpdatePolicy.UNSUPPORTED, )
def _test_different_labels(base_conf, target_conf): # First make sure sections are present with original labels base_ebs_1_section = base_conf.get_section("ebs", "ebs-1") base_ebs_2_section = base_conf.get_section("ebs", "ebs-2") assert_that(base_ebs_1_section).is_not_none() assert_that(base_ebs_2_section).is_not_none() # Now update section labels and make sure they're not more present with original labels base_ebs_1_section.label = "ebs-1_updated" base_ebs_2_section.label = "ebs-2_updated" assert_that(base_conf.get_section("ebs", "ebs-1_updated")).is_not_none() assert_that(base_conf.get_section("ebs", "ebs-2_updated")).is_not_none() assert_that(base_conf.get_section("ebs", "ebs-1")).is_none() assert_that(base_conf.get_section("ebs", "ebs-2")).is_none() # The patch should contain 5 differences: # - 2 volumes in target conf not matched in base conf # - 2 volumes in base conf not matched in target conf # - 1 ebs_settings changed in cluster section _check_patch( base_conf, target_conf, [ Change("ebs", "ebs-1", "shared_dir", "-", "vol1", UpdatePolicy(UpdatePolicy.SUPPORTED)), Change("ebs", "ebs-1_updated", "shared_dir", "vol1", "-", UpdatePolicy(UpdatePolicy.SUPPORTED)), Change("ebs", "ebs-2", "shared_dir", "-", "vol2", UpdatePolicy(UpdatePolicy.SUPPORTED)), Change("ebs", "ebs-2_updated", "shared_dir", "vol2", "-", UpdatePolicy(UpdatePolicy.SUPPORTED)), Change("ebs", "ebs-1", "volume_size", "-", 20, UpdatePolicy(UpdatePolicy.SUPPORTED)), Change("ebs", "ebs-1_updated", "volume_size", 20, "-", UpdatePolicy(UpdatePolicy.SUPPORTED)), Change("ebs", "ebs-2", "volume_size", "-", 20, UpdatePolicy(UpdatePolicy.SUPPORTED)), Change("ebs", "ebs-2_updated", "volume_size", 20, "-", UpdatePolicy(UpdatePolicy.SUPPORTED)), Change( "cluster", "default", "ebs_settings", "ebs-1_updated,ebs-2_updated", "ebs-1,ebs-2", UpdatePolicy( UpdatePolicy.UNSUPPORTED, fail_reason="EBS sections cannot be added or removed during a 'pcluster update' operation", ), ), ], UpdatePolicy.UNSUPPORTED, )
class HeadNodeRootVolumeSchema(BaseSchema): """Represent the RootVolume schema for the Head node.""" volume_type = fields.Str( validate=get_field_validator("volume_type"), metadata={ "update_policy": UpdatePolicy( UpdatePolicy.UNSUPPORTED, action_needed=UpdatePolicy.ACTIONS_NEEDED["ebs_volume_update"]) }, ) iops = fields.Int(metadata={"update_policy": UpdatePolicy.SUPPORTED}) size = fields.Int( metadata={ "update_policy": UpdatePolicy( UpdatePolicy.UNSUPPORTED, fail_reason=UpdatePolicy.FAIL_REASONS["ebs_volume_resize"], action_needed=UpdatePolicy.ACTIONS_NEEDED["ebs_volume_update"], ) }) kms_key_id = fields.Str( metadata={"update_policy": UpdatePolicy.UNSUPPORTED}) throughput = fields.Int(metadata={"update_policy": UpdatePolicy.SUPPORTED}) encrypted = fields.Bool( metadata={"update_policy": UpdatePolicy.UNSUPPORTED}) delete_on_termination = fields.Bool( metadata={"update_policy": UpdatePolicy.SUPPORTED}) @post_load def make_resource(self, data, **kwargs): """Generate resource.""" return RootVolume(**data) @validates("size") def validate_size(self, value): """Validate the size of root volume.""" if value < EBS_VOLUME_SIZE_DEFAULT: raise ValidationError( f"Root volume size {value} is invalid. It must be at least {EBS_VOLUME_SIZE_DEFAULT}." )
def _test_incompatible_ebs_sections(base_conf, target_conf): # Change shared_dir param value in target conf target_conf.get_section("ebs", "ebs-1").get_param("shared_dir").value = "new_value" # The patch must show the updated shared_dir for ebs-1 section _check_patch( base_conf, target_conf, [Change("ebs", "ebs-1", "shared_dir", "vol1", "new_value", UpdatePolicy(UpdatePolicy.UNSUPPORTED))], UpdatePolicy.UNSUPPORTED, )
def _test_more_target_sections(base_conf, target_conf): # Remove an ebs section into the base conf assert_that(_get_storage_by_name(base_conf, "ebs1")).is_not_none() _remove_storage_by_name(base_conf, "ebs1") assert_that(_get_storage_by_name(base_conf, "ebs1")).is_none() # update some values in the target config for the remaining ebs target_storage = _get_storage_by_name(target_conf, "ebs2") target_storage["MountDir"] = "vol1" target_storage["EbsSettings"]["Iops"] = 20 target_storage["EbsSettings"]["VolumeType"] = "gp2" # The patch must show multiple differences: changes for EBS settings and one for missing ebs section in base conf _check_patch( base_conf, target_conf, [ Change( [], "SharedStorage", None, _get_storage_by_name(target_conf, "ebs1"), UpdatePolicy( UpdatePolicy.UNSUPPORTED, fail_reason= ("Shared Storage cannot be added or removed during a 'pcluster update-cluster' operation" ), ), is_list=True, ), Change(["SharedStorage[ebs2]"], "MountDir", "vol2", "vol1", UpdatePolicy.UNSUPPORTED, is_list=False), Change(["SharedStorage[ebs2]", "EbsSettings"], "Iops", None, 20, UpdatePolicy.SUPPORTED, is_list=False), Change( ["SharedStorage[ebs2]", "EbsSettings"], "VolumeType", "gp3", "gp2", UpdatePolicy.UNSUPPORTED, is_list=False, ), ], UpdatePolicy.UNSUPPORTED, )
class EbsSettingsSchema(BaseSchema): """Represent the schema of EBS.""" volume_type = fields.Str( validate=get_field_validator("volume_type"), metadata={ "update_policy": UpdatePolicy( UpdatePolicy.UNSUPPORTED, action_needed=UpdatePolicy.ACTIONS_NEEDED["ebs_volume_update"]) }, ) iops = fields.Int(metadata={"update_policy": UpdatePolicy.SUPPORTED}) size = fields.Int( metadata={ "update_policy": UpdatePolicy( UpdatePolicy.UNSUPPORTED, fail_reason=UpdatePolicy.FAIL_REASONS["ebs_volume_resize"], action_needed=UpdatePolicy.ACTIONS_NEEDED["ebs_volume_update"], ) }) kms_key_id = fields.Str( metadata={"update_policy": UpdatePolicy.UNSUPPORTED}) throughput = fields.Int(metadata={"update_policy": UpdatePolicy.SUPPORTED}) encrypted = fields.Bool( metadata={"update_policy": UpdatePolicy.UNSUPPORTED}) snapshot_id = fields.Str( validate=validate.Regexp(r"^snap-[0-9a-z]{8}$|^snap-[0-9a-z]{17}$"), metadata={"update_policy": UpdatePolicy.UNSUPPORTED}, ) volume_id = fields.Str( validate=validate.Regexp(r"^vol-[0-9a-z]{8}$|^vol-[0-9a-z]{17}$"), metadata={"update_policy": UpdatePolicy.UNSUPPORTED}, ) raid = fields.Nested(RaidSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED}) deletion_policy = fields.Str( validate=validate.OneOf(DELETION_POLICIES_WITH_SNAPSHOT), metadata={"update_policy": UpdatePolicy.SUPPORTED})
def _test_incompatible_ebs_sections(base_conf, target_conf): # Change MountDir param value in target conf _get_storage_by_name(target_conf, "ebs1")["MountDir"] = "new_value" # The patch must show the updated MountDir for ebs1 section _check_patch( base_conf, target_conf, [ Change(["SharedStorage[ebs1]"], "MountDir", "vol1", "new_value", UpdatePolicy(UpdatePolicy.UNSUPPORTED), False) ], UpdatePolicy.UNSUPPORTED, )
def _test_different_names(base_conf, target_conf): # First make sure sections are present with original names base_ebs_1_section = _get_storage_by_name(base_conf, "ebs1") base_ebs_2_section = _get_storage_by_name(base_conf, "ebs2") assert_that(base_ebs_1_section).is_not_none() assert_that(base_ebs_2_section).is_not_none() target_ebs_1_section = _get_storage_by_name(target_conf, "ebs1") target_ebs_2_section = _get_storage_by_name(target_conf, "ebs2") assert_that(target_ebs_1_section).is_not_none() assert_that(target_ebs_2_section).is_not_none() # Now update section labels and make sure they're not more present with original labels target_ebs_1_section["Name"] = "ebs1_updated" target_ebs_2_section["Name"] = "ebs2_updated" assert_that(_get_storage_by_name(target_conf, "ebs1_updated")).is_not_none() assert_that(_get_storage_by_name(target_conf, "ebs2_updated")).is_not_none() assert_that(_get_storage_by_name(target_conf, "ebs1")).is_none() assert_that(_get_storage_by_name(target_conf, "ebs-")).is_none() unsupported_update_policy = UpdatePolicy( UpdatePolicy.UNSUPPORTED, fail_reason= "Shared Storage cannot be added or removed during a 'pcluster update-cluster' operation", ) # The patch should contain 5 differences: # - 2 volumes in target conf not matched in base conf # - 2 volumes in base conf not matched in target conf _check_patch( base_conf, target_conf, [ Change([], "SharedStorage", None, target_ebs_1_section, unsupported_update_policy, is_list=True), Change([], "SharedStorage", None, target_ebs_2_section, unsupported_update_policy, is_list=True), Change([], "SharedStorage", base_ebs_1_section, None, unsupported_update_policy, is_list=True), Change([], "SharedStorage", base_ebs_2_section, None, unsupported_update_policy, is_list=True), ], UpdatePolicy.UNSUPPORTED, )
class ClusterSchema(BaseSchema): """Represent the schema of the Cluster.""" image = fields.Nested(ImageSchema, required=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED}) head_node = fields.Nested( HeadNodeSchema, required=True, metadata={"update_policy": UpdatePolicy.SUPPORTED}) scheduling = fields.Nested( SchedulingSchema, required=True, metadata={"update_policy": UpdatePolicy.UNSUPPORTED}) shared_storage = fields.Nested( SharedStorageSchema, many=True, metadata={ "update_policy": UpdatePolicy(UpdatePolicy.UNSUPPORTED, fail_reason=UpdatePolicy. FAIL_REASONS["shared_storage_change"]), "update_key": "Name", }, ) monitoring = fields.Nested( MonitoringSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED}) additional_packages = fields.Nested( AdditionalPackagesSchema, metadata={"update_policy": UpdatePolicy.UNSUPPORTED}) tags = fields.Nested(TagSchema, many=True, metadata={ "update_policy": UpdatePolicy.SUPPORTED, "update_key": "Key" }) iam = fields.Nested(ClusterIamSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED}) custom_s3_bucket = fields.Str( metadata={"update_policy": UpdatePolicy.READ_ONLY_RESOURCE_BUCKET}) additional_resources = fields.Str( metadata={"update_policy": UpdatePolicy.SUPPORTED}) dev_settings = fields.Nested( ClusterDevSettingsSchema, metadata={"update_policy": UpdatePolicy.SUPPORTED}) def __init__(self, cluster_name: str): super().__init__() self.cluster_name = cluster_name @validates("tags") def validate_tags(self, tags): """Validate tags.""" validate_no_reserved_tag(tags) @validates_schema def no_intel_select_solutions_for_batch(self, data, **kwargs): """Ensure IntelSelectSolutions section is not included when AWS Batch is the scheduler.""" scheduling = data.get("scheduling") additional_packages = data.get("additional_packages") if (scheduling and scheduling.scheduler == "awsbatch" and additional_packages and additional_packages. intel_select_solutions.install_intel_software): raise ValidationError( "The use of the IntelSelectSolutions package is not supported when using awsbatch as the scheduler." ) @post_load(pass_original=True) def make_resource(self, data, original_data, **kwargs): """Generate cluster according to the scheduler. Save original configuration.""" scheduler = data.get("scheduling").scheduler if scheduler == "slurm": cluster = SlurmClusterConfig(cluster_name=self.cluster_name, **data) elif scheduler == "awsbatch": cluster = AwsBatchClusterConfig(cluster_name=self.cluster_name, **data) else: # scheduler == "custom": cluster = BaseClusterConfig(cluster_name=self.cluster_name, **data) # FIXME Must be ByosCluster cluster.source_config = original_data return cluster
}), ("ebs_snapshot_id", { "allowed_values": ALLOWED_VALUES["snapshot_id"], "cfn_param_mapping": "EBSSnapshotId", "update_policy": UpdatePolicy.UNSUPPORTED }), ("volume_type", { "default": "gp2", "allowed_values": ALLOWED_VALUES["volume_types"], "cfn_param_mapping": "VolumeType", "update_policy": UpdatePolicy(UpdatePolicy.UNSUPPORTED, action_needed=UpdatePolicy. ACTIONS_NEEDED["ebs_volume_update"]) }), ("volume_size", { "type": VolumeSizeParam, "cfn_param_mapping": "VolumeSize", "update_policy": UpdatePolicy( UpdatePolicy.UNSUPPORTED, fail_reason=UpdatePolicy.FAIL_REASONS["ebs_volume_resize"], action_needed=UpdatePolicy. ACTIONS_NEEDED["ebs_volume_update"]) }), ("volume_iops", {