class SageMakerWorkteamSpec(SageMakerComponentSpec[SageMakerWorkteamInputs, SageMakerWorkteamOutputs]): INPUTS: SageMakerWorkteamInputs = SageMakerWorkteamInputs( team_name=InputValidator(input_type=str, required=True, description="The name of your work team."), description=InputValidator( input_type=str, required=True, description="A description of the work team."), user_pool=InputValidator( input_type=str, required=False, description= "An identifier for a user pool. The user pool must be in the same region as the service that you are calling.", ), user_groups=InputValidator( input_type=str, required=False, description= "A list of identifiers for user groups separated by commas.", default="", ), client_id=InputValidator( input_type=str, required=False, description= "An identifier for an application client. You must create the app client ID using Amazon Cognito.", ), sns_topic=InputValidator( input_type=str, required=False, description= "The ARN for the SNS topic to which notifications should be published.", default="", ), **vars(COMMON_INPUTS), ) OUTPUTS = SageMakerWorkteamOutputs( workteam_arn=OutputValidator(description="The ARN of the workteam."), ) def __init__(self, arguments: List[str]): super().__init__(arguments, SageMakerWorkteamInputs, SageMakerWorkteamOutputs) @property def inputs(self) -> SageMakerWorkteamInputs: return self._inputs @property def outputs(self) -> SageMakerWorkteamOutputs: return self._outputs @property def output_paths(self) -> SageMakerWorkteamOutputs: return self._output_paths
class DummySpec(SageMakerComponentSpec[DummyInputs, DummyOutputs]): INPUTS: DummyInputs = DummyInputs( input1=InputValidator( input_type=str, description="The first input.", default="input1-default", ), input2=InputValidator( input_type=int, required=True, description="The second input.", ), ) OUTPUTS = DummyOutputs( output1=OutputValidator(description="The first output."), output2=OutputValidator(description="The second output."), ) def __init__(self, arguments: List[str]): super().__init__(arguments, DummyInputs, DummyOutputs)
class RoboMakerDeleteSimulationAppSpec( SageMakerComponentSpec[RoboMakerDeleteSimulationAppInputs, RoboMakerDeleteSimulationAppOutputs]): INPUTS: RoboMakerDeleteSimulationAppInputs = RoboMakerDeleteSimulationAppInputs( arn=InputValidator( input_type=str, required=True, description= "The Amazon Resource Name (ARN) of the simulation application.", default="", ), version=InputValidator( input_type=str, required=False, description="The version of the simulation application.", default=None, ), **vars(COMMON_INPUTS), ) OUTPUTS = RoboMakerDeleteSimulationAppOutputs(arn=OutputValidator( description= "The Amazon Resource Name (ARN) of the simulation application."), ) def __init__(self, arguments: List[str]): super().__init__( arguments, RoboMakerDeleteSimulationAppInputs, RoboMakerDeleteSimulationAppOutputs, ) @property def inputs(self) -> RoboMakerDeleteSimulationAppInputs: return self._inputs @property def outputs(self) -> RoboMakerDeleteSimulationAppOutputs: return self._outputs @property def output_paths(self) -> RoboMakerDeleteSimulationAppOutputs: return self._output_paths
class RoboMakerSimulationJobBatchSpec( SageMakerComponentSpec[RoboMakerSimulationJobBatchInputs, RoboMakerSimulationJobBatchOutputs]): INPUTS: RoboMakerSimulationJobBatchInputs = RoboMakerSimulationJobBatchInputs( role=InputValidator( input_type=str, required=True, description= "The Amazon Resource Name (ARN) that Amazon RoboMaker assumes to perform tasks on your behalf.", ), timeout_in_secs=InputValidator( input_type=int, required=False, description= "The amount of time, in seconds, to wait for the batch to complete.", default=0, ), max_concurrency=InputValidator( input_type=int, required=False, description= "The number of active simulation jobs create as part of the batch that can be in an active state at the same time.", default=0, ), simulation_job_requests=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=True, description= "A list of simulation job requests to create in the batch.", default=[], ), sim_app_arn=InputValidator( input_type=str, required=False, description="The application ARN for the simulation application.", default="", ), **vars(COMMON_INPUTS), ) OUTPUTS = RoboMakerSimulationJobBatchOutputs( arn=OutputValidator( description="The Amazon Resource Name (ARN) of the simulation job." ), batch_job_id=OutputValidator( description="The simulation job batch id."), ) def __init__(self, arguments: List[str]): super().__init__( arguments, RoboMakerSimulationJobBatchInputs, RoboMakerSimulationJobBatchOutputs, ) @property def inputs(self) -> RoboMakerSimulationJobBatchInputs: return self._inputs @property def outputs(self) -> RoboMakerSimulationJobBatchOutputs: return self._outputs @property def output_paths(self) -> RoboMakerSimulationJobBatchOutputs: return self._output_paths
class SageMakerTuningSpec(SageMakerComponentSpec[SageMakerTuningInputs, SageMakerTuningOutputs]): INPUTS: SageMakerTuningInputs = SageMakerTuningInputs( job_name=InputValidator( input_type=str, required=False, description= "The name of the tuning job. Must be unique within the same AWS account and AWS region.", ), role=InputValidator( input_type=str, required=True, description= "The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.", ), image=InputValidator( input_type=str, required=False, description= "The registry path of the Docker image that contains the training algorithm.", default="", ), algorithm_name=InputValidator( input_type=str, required=False, description= "The name of the resource algorithm to use for the hyperparameter tuning job.", default="", ), training_input_mode=InputValidator( choices=["File", "Pipe"], input_type=str, required=False, description= "The input mode that the algorithm supports. File or Pipe.", default="File", ), metric_definitions=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, required=False, description= "The dictionary of name-regex pairs specify the metrics that the algorithm emits.", default={}, ), strategy=InputValidator( choices=["Bayesian", "Random"], input_type=str, required=False, description= "How hyperparameter tuning chooses the combinations of hyperparameter values to use for the training job it launches.", default="Bayesian", ), metric_name=InputValidator( input_type=str, required=True, description= "The name of the metric to use for the objective metric.", ), metric_type=InputValidator( choices=["Maximize", "Minimize"], input_type=str, required=True, description="Whether to minimize or maximize the objective metric.", ), early_stopping_type=InputValidator( choices=["Off", "Auto"], input_type=str, required=False, description="Whether to minimize or maximize the objective metric.", default="Off", ), static_parameters=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, required=False, description= "The values of hyperparameters that do not change for the tuning job.", default={}, ), integer_parameters=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "The array of IntegerParameterRange objects that specify ranges of integer hyperparameters that you want to search.", default=[], ), continuous_parameters=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "The array of ContinuousParameterRange objects that specify ranges of continuous hyperparameters that you want to search.", default=[], ), categorical_parameters=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "The array of CategoricalParameterRange objects that specify ranges of categorical hyperparameters that you want to search.", default=[], ), channels=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=True, description= "A list of dicts specifying the input channels. Must have at least one.", ), output_location=InputValidator( input_type=str, required=True, description= "The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job.", ), output_encryption_key=InputValidator( input_type=str, required=False, description= "The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.", default="", ), instance_type=InputValidator( input_type=str, required=False, description="The ML compute instance type.", default="ml.m4.xlarge", ), instance_count=InputValidator( input_type=int, required=False, description= "The number of ML compute instances to use in each training job.", default=1, ), volume_size=InputValidator( input_type=int, required=False, description= "The size of the ML storage volume that you want to provision.", default=30, ), max_num_jobs=InputValidator( input_type=int, required=True, description= "The maximum number of training jobs that a hyperparameter tuning job can launch.", ), max_parallel_jobs=InputValidator( input_type=int, required=True, description= "The maximum number of concurrent training jobs that a hyperparameter tuning job can launch.", ), resource_encryption_key=InputValidator( input_type=str, required=False, description= "The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).", default="", ), vpc_security_group_ids=InputValidator( input_type=str, required=False, description="The VPC security group IDs, in the form sg-xxxxxxxx.", ), vpc_subnets=InputValidator( input_type=str, required=False, description= "The ID of the subnets in the VPC to which you want to connect your hpo job.", ), network_isolation=InputValidator( input_type=SpecInputParsers.str_to_bool, required=False, description="Isolates the training container.", default=True, ), traffic_encryption=InputValidator( input_type=SpecInputParsers.str_to_bool, required=False, description= "Encrypts all communications between ML compute instances in distributed training.", default=False, ), warm_start_type=InputValidator( choices=["IdenticalDataAndAlgorithm", "TransferLearning", ""], input_type=str, required=False, description= 'Specifies either "IdenticalDataAndAlgorithm" or "TransferLearning"', ), parent_hpo_jobs=InputValidator( input_type=str, required=False, description= "List of previously completed or stopped hyperparameter tuning jobs to be used as a starting point.", default="", ), **vars(COMMON_INPUTS), **vars(SPOT_INSTANCE_INPUTS)) OUTPUTS = SageMakerTuningOutputs( hpo_job_name=OutputValidator( description="The name of the hyper parameter tuning job."), model_artifact_url=OutputValidator( description="The output model artifacts S3 url."), best_job_name=OutputValidator( description="Best training job in the hyper parameter tuning job." ), best_hyperparameters=OutputValidator( description="The resulting tuned hyperparameters."), training_image=OutputValidator( description= "The registry path of the Docker image that contains the training algorithm." ), ) def __init__(self, arguments: List[str]): super().__init__(arguments, SageMakerTuningInputs, SageMakerTuningOutputs) @property def inputs(self) -> SageMakerTuningInputs: return self._inputs @property def outputs(self) -> SageMakerTuningOutputs: return self._outputs @property def output_paths(self) -> SageMakerTuningOutputs: return self._output_paths
class SageMakerGroundTruthSpec( SageMakerComponentSpec[SageMakerGroundTruthInputs, SageMakerGroundTruthOutputs] ): INPUTS: SageMakerGroundTruthInputs = SageMakerGroundTruthInputs( role=InputValidator( input_type=str, required=True, description="The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.", ), job_name=InputValidator( input_type=str, description="The name of the labeling job." ), label_attribute_name=InputValidator( input_type=str, required=False, description="The attribute name to use for the label in the output manifest file. Default is the job name.", default="", ), manifest_location=InputValidator( input_type=str, required=True, description="The Amazon S3 location of the manifest file that describes the input data objects.", ), output_location=InputValidator( input_type=str, required=True, description="The Amazon S3 location to write output data.", ), output_encryption_key=InputValidator( input_type=str, required=False, description="The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.", default="", ), task_type=InputValidator( input_type=str, required=True, description="Built in image classification, bounding box, text classification, or semantic segmentation, or custom. If custom, please provide pre- and post-labeling task lambda functions.", ), worker_type=InputValidator( input_type=str, required=True, description="The workteam for data labeling, either public, private, or vendor.", ), workteam_arn=InputValidator( input_type=str, required=False, description="The ARN of the work team assigned to complete the tasks.", ), no_adult_content=InputValidator( input_type=SpecInputParsers.str_to_bool, required=False, description="If true, your data is free of adult content.", default="False", ), no_ppi=InputValidator( input_type=SpecInputParsers.str_to_bool, required=False, description="If true, your data is free of personally identifiable information.", default="False", ), label_category_config=InputValidator( input_type=str, required=False, description="The S3 URL of the JSON structured file that defines the categories used to label the data objects.", default="", ), max_human_labeled_objects=InputValidator( input_type=int, required=False, description="The maximum number of objects that can be labeled by human workers.", default=0, ), max_percent_objects=InputValidator( input_type=int, required=False, description="The maximum percentatge of input data objects that should be labeled.", default=0, ), enable_auto_labeling=InputValidator( input_type=SpecInputParsers.str_to_bool, required=False, description="Enables auto-labeling, only for bounding box, text classification, and image classification.", default=False, ), initial_model_arn=InputValidator( input_type=str, required=False, description="The ARN of the final model used for a previous auto-labeling job.", default="", ), resource_encryption_key=InputValidator( input_type=str, required=False, description="The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).", default="", ), ui_template=InputValidator( input_type=str, required=True, description="The Amazon S3 bucket location of the UI template.", ), pre_human_task_function=InputValidator( input_type=str, required=False, description="The ARN of a Lambda function that is run before a data object is sent to a human worker.", default="", ), post_human_task_function=InputValidator( input_type=str, required=False, description="The ARN of a Lambda function implements the logic for annotation consolidation.", default="", ), task_keywords=InputValidator( input_type=str, required=False, description="Keywords used to describe the task so that workers on Amazon Mechanical Turk can discover the task.", default="", ), title=InputValidator( input_type=str, required=True, description="A title for the task for your human workers.", ), description=InputValidator( input_type=str, required=True, description="A description of the task for your human workers.", ), num_workers_per_object=InputValidator( input_type=int, required=True, description="The number of human workers that will label an object.", ), time_limit=InputValidator( input_type=int, required=True, description="The amount of time that a worker has to complete a task in seconds", ), task_availibility=InputValidator( input_type=int, required=False, description="The length of time that a task remains available for labelling by human workers.", default="0", ), max_concurrent_tasks=InputValidator( input_type=int, required=False, description="The maximum number of data objects that can be labeled by human workers at the same time.", default="0", ), workforce_task_price=InputValidator( input_type=float, required=False, description="The price that you pay for each task performed by a public worker in USD. Specify to the tenth fractions of a cent. Format as '0.000'.", default="0.000", ), **vars(COMMON_INPUTS), ) OUTPUTS = SageMakerGroundTruthOutputs( output_manifest_location=OutputValidator( description="The Amazon S3 bucket location of the manifest file for labeled data." ), active_learning_model_arn=OutputValidator( description="The ARN for the most recent Amazon SageMaker model trained as part of automated data labeling." ), ) def __init__(self, arguments: List[str]): super().__init__( arguments, SageMakerGroundTruthInputs, SageMakerGroundTruthOutputs ) @property def inputs(self) -> SageMakerGroundTruthInputs: return self._inputs @property def outputs(self) -> SageMakerGroundTruthOutputs: return self._outputs @property def output_paths(self) -> SageMakerGroundTruthOutputs: return self._output_paths
class SageMakerDeploySpec(SageMakerComponentSpec[SageMakerDeployInputs, SageMakerDeployOutputs]): INPUTS: SageMakerDeployInputs = SageMakerDeployInputs( endpoint_config_name=InputValidator( input_type=str, required=False, description= "The name of the endpoint configuration. If an existing endpoint is being updated, a suffix is automatically added if this config name exists.", default="", ), update_endpoint=InputValidator( input_type=SpecInputParsers.str_to_bool, required=False, description="Update endpoint if it exists.", default=False, ), variant_name_1=InputValidator( input_type=str, required=False, description="The name of the production variant.", default="variant-name-1", ), model_name_1=InputValidator( input_type=str, required=True, description="The model name used for endpoint deployment.", ), initial_instance_count_1=InputValidator( input_type=int, required=False, description="Number of instances to launch initially.", default=1, ), instance_type_1=InputValidator( input_type=str, required=False, description="The ML compute instance type.", default="ml.m4.xlarge", ), initial_variant_weight_1=InputValidator( input_type=float, required=False, description= "Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.", default=1.0, ), accelerator_type_1=InputValidator( choices=["ml.eia1.medium", "ml.eia1.large", "ml.eia1.xlarge", ""], input_type=str, required=False, description= "The size of the Elastic Inference (EI) instance to use for the production variant.", default="", ), variant_name_2=InputValidator( input_type=str, required=False, description="The name of the production variant.", default="variant-name-2", ), model_name_2=InputValidator( input_type=str, required=False, description="The model name used for endpoint deployment.", default="", ), initial_instance_count_2=InputValidator( input_type=int, required=False, description="Number of instances to launch initially.", default=1, ), instance_type_2=InputValidator( input_type=str, required=False, description="The ML compute instance type.", default="ml.m4.xlarge", ), initial_variant_weight_2=InputValidator( input_type=float, required=False, description= "Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.", default=1.0, ), accelerator_type_2=InputValidator( choices=["ml.eia1.medium", "ml.eia1.large", "ml.eia1.xlarge", ""], input_type=str, required=False, description= "The size of the Elastic Inference (EI) instance to use for the production variant.", default="", ), variant_name_3=InputValidator( input_type=str, required=False, description="The name of the production variant.", default="variant-name-3", ), model_name_3=InputValidator( input_type=str, required=False, description="The model name used for endpoint deployment.", default="", ), initial_instance_count_3=InputValidator( input_type=int, required=False, description="Number of instances to launch initially.", default=1, ), instance_type_3=InputValidator( input_type=str, required=False, description="The ML compute instance type.", default="ml.m4.xlarge", ), initial_variant_weight_3=InputValidator( input_type=float, required=False, description= "Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.", default=1.0, ), accelerator_type_3=InputValidator( choices=["ml.eia1.medium", "ml.eia1.large", "ml.eia1.xlarge", ""], input_type=str, required=False, description= "The size of the Elastic Inference (EI) instance to use for the production variant.", default="", ), resource_encryption_key=InputValidator( input_type=str, required=False, description= "The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).", default="", ), endpoint_config_tags=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, required=False, description= "An array of key-value pairs, to categorize AWS resources.", default={}, ), endpoint_name=InputValidator( input_type=str, required=False, description="The name of the endpoint.", default="", ), **vars(COMMON_INPUTS), ) OUTPUTS = SageMakerDeployOutputs(endpoint_name=OutputValidator( description="The created endpoint name."), ) def __init__(self, arguments: List[str]): super().__init__(arguments, SageMakerDeployInputs, SageMakerDeployOutputs) @property def inputs(self) -> SageMakerDeployInputs: return self._inputs @property def outputs(self) -> SageMakerDeployOutputs: return self._outputs @property def output_paths(self) -> SageMakerDeployOutputs: return self._output_paths
class SageMakerRLEstimatorSpec( SageMakerComponentSpec[SageMakerRLEstimatorInputs, SageMakerRLEstimatorOutputs]): INPUTS: SageMakerRLEstimatorInputs = SageMakerRLEstimatorInputs( job_name=InputValidator( input_type=str, required=False, description="Training job name.", default="", ), role=InputValidator( input_type=str, required=True, description= "The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.", ), image=InputValidator( input_type=str, required=False, description= "An ECR url. If specified, the estimator will use this image for training and hosting", default=None, ), entry_point=InputValidator( input_type=str, required=True, description= "Path (absolute or relative) to the Python source file which should be executed as the entry point to training.", default="", ), source_dir=InputValidator( input_type=str, required=False, description= "Path (S3 URI) to a directory with any other training source code dependencies aside from the entry point file.", default="", ), toolkit=InputValidator( input_type=str, choices=["coach", "ray", ""], required=False, description= "RL toolkit you want to use for executing your model training code.", default="", ), toolkit_version=InputValidator( input_type=str, required=False, description= "RL toolkit version you want to be use for executing your model training code.", default=None, ), framework=InputValidator( input_type=str, choices=["tensorflow", "mxnet", "pytorch", ""], required=False, description= "Framework (MXNet, TensorFlow or PyTorch) you want to be used as a toolkit backed for reinforcement learning training.", default="", ), metric_definitions=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "The dictionary of name-regex pairs specify the metrics that the algorithm emits.", default=[], ), training_input_mode=InputValidator( choices=["File", "Pipe"], input_type=str, description= "The input mode that the algorithm supports. File or Pipe.", default="File", ), hyperparameters=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, required=False, description="Hyperparameters that will be used for training.", default={}, ), instance_type=InputValidator( input_type=str, required=False, description="The ML compute instance type.", default="ml.m4.xlarge", ), instance_count=InputValidator( input_type=int, required=False, description= "The number of ML compute instances to use in the training job.", default=1, ), volume_size=InputValidator( input_type=int, required=True, description= "The size of the ML storage volume that you want to provision.", default=30, ), max_run=InputValidator( input_type=int, required=False, description= "Timeout in seconds for training (default: 24 * 60 * 60).", default=24 * 60 * 60, ), model_artifact_path=InputValidator( input_type=str, required=True, description= "Identifies the S3 path where you want Amazon SageMaker to store the model artifacts.", ), vpc_security_group_ids=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description="The VPC security group IDs, in the form sg-xxxxxxxx.", default=[], ), vpc_subnets=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "The ID of the subnets in the VPC to which you want to connect your hpo job.", default=[], ), network_isolation=InputValidator( input_type=SpecInputParsers.str_to_bool, description="Isolates the training container.", default=False, ), traffic_encryption=InputValidator( input_type=SpecInputParsers.str_to_bool, description= "Encrypts all communications between ML compute instances in distributed training.", default=False, ), debug_hook_config=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, required=False, description= "Configuration information for the debug hook parameters, collection configuration, and storage paths.", default={}, ), debug_rule_config=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description="Configuration information for debugging rules.", default=[], ), **vars(COMMON_INPUTS), **vars(SPOT_INSTANCE_INPUTS)) OUTPUTS = SageMakerRLEstimatorOutputs( model_artifact_url=OutputValidator( description="The model artifacts URL."), job_name=OutputValidator(description="The training job name."), training_image=OutputValidator( description= "The registry path of the Docker image that contains the training algorithm." ), ) def __init__(self, arguments: List[str]): super().__init__(arguments, SageMakerRLEstimatorInputs, SageMakerRLEstimatorOutputs) @property def inputs(self) -> SageMakerRLEstimatorInputs: return self._inputs @property def outputs(self) -> SageMakerRLEstimatorOutputs: return self._outputs @property def output_paths(self) -> SageMakerRLEstimatorOutputs: return self._output_paths
class SageMakerTrainingSpec( SageMakerComponentSpec[SageMakerTrainingInputs, SageMakerTrainingOutputs] ): INPUTS: SageMakerTrainingInputs = SageMakerTrainingInputs( job_name=InputValidator( input_type=str, description="The name of the training job.", default="", ), role=InputValidator( input_type=str, required=True, description="The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.", ), image=InputValidator( input_type=str, description="The registry path of the Docker image that contains the training algorithm.", default="", ), algorithm_name=InputValidator( input_type=str, description="The name of the resource algorithm to use for the training job. Do not specify a value for this if using training image.", default="", ), metric_definitions=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, description="The dictionary of name-regex pairs specify the metrics that the algorithm emits.", default={}, ), training_input_mode=InputValidator( choices=["File", "Pipe"], input_type=str, description="The input mode that the algorithm supports. File or Pipe.", default="File", ), hyperparameters=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, description="Dictionary of hyperparameters for the the algorithm.", default={}, ), channels=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=True, description="A list of dicts specifying the input channels. Must have at least one.", ), instance_type=InputValidator( input_type=str, description="The ML compute instance type.", default="ml.m4.xlarge", ), instance_count=InputValidator( required=True, input_type=int, description="The registry path of the Docker image that contains the training algorithm.", default=1, ), volume_size=InputValidator( input_type=int, required=True, description="The size of the ML storage volume that you want to provision.", default=30, ), resource_encryption_key=InputValidator( input_type=str, description="The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).", default="", ), model_artifact_path=InputValidator( input_type=str, required=True, description="Identifies the S3 path where you want Amazon SageMaker to store the model artifacts.", ), output_encryption_key=InputValidator( input_type=str, description="The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.", default="", ), vpc_security_group_ids=InputValidator( input_type=str, description="The VPC security group IDs, in the form sg-xxxxxxxx.", ), vpc_subnets=InputValidator( input_type=str, description="The ID of the subnets in the VPC to which you want to connect your hpo job.", ), network_isolation=InputValidator( input_type=SpecInputParsers.str_to_bool, description="Isolates the training container.", default=True, ), traffic_encryption=InputValidator( input_type=SpecInputParsers.str_to_bool, description="Encrypts all communications between ML compute instances in distributed training.", default=False, ), debug_hook_config=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, description="Configuration information for the debug hook parameters, collection configuration, and storage paths.", default={}, ), debug_rule_config=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, description="Configuration information for debugging rules.", default=[], ), **vars(COMMON_INPUTS), **vars(SPOT_INSTANCE_INPUTS) ) OUTPUTS = SageMakerTrainingOutputs( model_artifact_url=OutputValidator(description="The model artifacts URL."), job_name=OutputValidator(description="The training job name."), training_image=OutputValidator( description="The registry path of the Docker image that contains the training algorithm." ), ) def __init__(self, arguments: List[str]): super().__init__(arguments, SageMakerTrainingInputs, SageMakerTrainingOutputs) @property def inputs(self) -> SageMakerTrainingInputs: return self._inputs @property def outputs(self) -> SageMakerTrainingOutputs: return self._outputs @property def output_paths(self) -> SageMakerTrainingOutputs: return self._output_paths
class SageMakerProcessSpec(SageMakerComponentSpec[SageMakerProcessInputs, SageMakerProcessOutputs]): INPUTS: SageMakerProcessInputs = SageMakerProcessInputs( job_name=InputValidator( input_type=str, required=False, description="The name of the processing job.", default="", ), role=InputValidator( input_type=str, required=True, description= "The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.", ), image=InputValidator( input_type=str, required=True, description= "The registry path of the Docker image that contains the processing container.", default="", ), instance_type=InputValidator( required=True, input_type=str, description="The ML compute instance type.", default="ml.m4.xlarge", ), instance_count=InputValidator( required=True, input_type=int, description= "The number of ML compute instances to use in each processing job.", default=1, ), volume_size=InputValidator( input_type=int, required=False, description= "The size of the ML storage volume that you want to provision.", default=30, ), resource_encryption_key=InputValidator( input_type=str, required=False, description= "The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).", default="", ), output_encryption_key=InputValidator( input_type=str, required=False, description= "The AWS KMS key that Amazon SageMaker uses to encrypt the processing artifacts.", default="", ), max_run_time=InputValidator( input_type=int, required=False, description= "The maximum run time in seconds for the processing job.", default=86400, ), environment=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, required=False, description= "The dictionary of the environment variables to set in the Docker container. Up to 16 key-value entries in the map.", default={}, ), container_entrypoint=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "The entrypoint for the processing job. This is in the form of a list of strings that make a command.", default=[], ), container_arguments=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "A list of string arguments to be passed to a processing job.", default=[], ), input_config=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "Parameters that specify Amazon S3 inputs for a processing job.", default=[], ), output_config=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=True, description= "Parameters that specify Amazon S3 outputs for a processing job.", default=[], ), vpc_security_group_ids=InputValidator( input_type=str, required=False, description="The VPC security group IDs, in the form sg-xxxxxxxx.", ), vpc_subnets=InputValidator( input_type=str, required=False, description= "The ID of the subnets in the VPC to which you want to connect your hpo job.", ), network_isolation=InputValidator( input_type=SpecInputParsers.str_to_bool, required=False, description="Isolates the processing container.", default=True, ), traffic_encryption=InputValidator( input_type=SpecInputParsers.str_to_bool, required=False, description= "Encrypts all communications between ML compute instances in distributed training.", default=False, ), **vars(COMMON_INPUTS), ) OUTPUTS = SageMakerProcessOutputs( job_name=OutputValidator(description="Processing job name."), output_artifacts=OutputValidator( description="A dictionary containing the output S3 artifacts."), ) def __init__(self, arguments: List[str]): super().__init__(arguments, SageMakerProcessInputs, SageMakerProcessOutputs) @property def inputs(self) -> SageMakerProcessInputs: return self._inputs @property def outputs(self) -> SageMakerProcessOutputs: return self._outputs @property def output_paths(self) -> SageMakerProcessOutputs: return self._output_paths
class SageMakerCreateModelSpec( SageMakerComponentSpec[SageMakerCreateModelInputs, SageMakerCreateModelOutputs]): INPUTS: SageMakerCreateModelInputs = SageMakerCreateModelInputs( model_name=InputValidator(input_type=str, required=True, description="The name of the new model."), role=InputValidator( input_type=str, required=True, description= "The Amazon Resource Name (ARN) that Amazon SageMaker assumes to perform tasks on your behalf.", ), container_host_name=InputValidator( input_type=str, required=False, description= "When a ContainerDefinition is part of an inference pipeline, this value uniquely identifies the container for the purposes of logging and metrics.", default="", ), image=InputValidator( input_type=str, required=False, description= "The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.", default="", ), model_artifact_url=InputValidator( input_type=str, required=False, description= "S3 path where Amazon SageMaker to store the model artifacts.", default="", ), environment=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, required=False, description= "The dictionary of the environment variables to set in the Docker container. Up to 16 key-value entries in the map.", default={}, ), model_package=InputValidator( input_type=str, required=False, description= "The name or Amazon Resource Name (ARN) of the model package to use to create the model.", default="", ), secondary_containers=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "A list of dicts that specifies the additional containers in the inference pipeline.", default=[], ), vpc_security_group_ids=InputValidator( input_type=str, required=False, description="The VPC security group IDs, in the form sg-xxxxxxxx.", default="", ), vpc_subnets=InputValidator( input_type=str, required=False, description= "The ID of the subnets in the VPC to which you want to connect your hpo job.", default="", ), network_isolation=InputValidator( input_type=SpecInputParsers.str_to_bool, required=False, description="Isolates the training container.", default=True, ), **vars(COMMON_INPUTS), ) OUTPUTS = SageMakerCreateModelOutputs(model_name=OutputValidator( description="The name of the model created by SageMaker."), ) def __init__(self, arguments: List[str]): super().__init__(arguments, SageMakerCreateModelInputs, SageMakerCreateModelOutputs) @property def inputs(self) -> SageMakerCreateModelInputs: return self._inputs @property def outputs(self) -> SageMakerCreateModelOutputs: return self._outputs @property def output_paths(self) -> SageMakerCreateModelOutputs: return self._output_paths
class RoboMakerSimulationJobSpec( SageMakerComponentSpec[RoboMakerSimulationJobInputs, RoboMakerSimulationJobOutputs]): INPUTS: RoboMakerSimulationJobInputs = RoboMakerSimulationJobInputs( role=InputValidator( input_type=str, required=True, description= "The Amazon Resource Name (ARN) that Amazon RoboMaker assumes to perform tasks on your behalf.", ), output_bucket=InputValidator( input_type=str, required=True, description="The bucket to place outputs from the simulation job.", default="", ), output_path=InputValidator( input_type=str, required=True, description= "The S3 key where outputs from the simulation job are placed.", default="", ), max_run=InputValidator( input_type=int, required=True, description= "Timeout in seconds for simulation job (default: 8 * 60 * 60).", default=8 * 60 * 60, ), failure_behavior=InputValidator( input_type=str, required=False, description= "The failure behavior the simulation job (Continue|Fail).", default="Fail", ), sim_app_arn=InputValidator( input_type=str, required=False, description="The application ARN for the simulation application.", default="", ), sim_app_version=InputValidator( input_type=str, required=False, description= "The application version for the simulation application.", default="", ), sim_app_launch_config=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, required=False, description= "The launch configuration for the simulation application.", default={}, ), sim_app_world_config=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description="A list of world configurations.", default=[], ), robot_app_arn=InputValidator( input_type=str, required=False, description="The application ARN for the robot application.", default="", ), robot_app_version=InputValidator( input_type=str, required=False, description="The application version for the robot application.", default="", ), robot_app_launch_config=InputValidator( input_type=SpecInputParsers.yaml_or_json_dict, required=False, description="The launch configuration for the robot application.", default={}, ), data_sources=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "Specify data sources to mount read-only files from S3 into your simulation.", default=[], ), vpc_security_group_ids=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description="The VPC security group IDs, in the form sg-xxxxxxxx.", default=[], ), vpc_subnets=InputValidator( input_type=SpecInputParsers.yaml_or_json_list, required=False, description= "The ID of the subnets in the VPC to which you want to connect your simulation job.", default=[], ), use_public_ip=InputValidator( input_type=bool, description= "A boolean indicating whether to assign a public IP address.", default=False, ), sim_unit_limit=InputValidator( input_type=int, required=False, description="The simulation unit limit.", default=15, ), record_ros_topics=InputValidator( input_type=bool, description= "A boolean indicating whether to record all ROS topics. Used for logging.", default=False, ), **vars(COMMON_INPUTS), ) OUTPUTS = RoboMakerSimulationJobOutputs( arn=OutputValidator( description="The Amazon Resource Name (ARN) of the simulation job." ), output_artifacts=OutputValidator( description="The simulation job artifacts URL."), job_id=OutputValidator(description="The simulation job id."), ) def __init__(self, arguments: List[str]): super().__init__( arguments, RoboMakerSimulationJobInputs, RoboMakerSimulationJobOutputs, ) @property def inputs(self) -> RoboMakerSimulationJobInputs: return self._inputs @property def outputs(self) -> RoboMakerSimulationJobOutputs: return self._outputs @property def output_paths(self) -> RoboMakerSimulationJobOutputs: return self._output_paths