Exemplo n.º 1
0
def test_py2_xgboost_error(sagemaker_session, xgboost_framework_version):
    with pytest.raises(ValueError) as error1:
        XGBoost(
            entry_point=SCRIPT_PATH,
            role=ROLE,
            framework_version=xgboost_framework_version,
            sagemaker_session=sagemaker_session,
            instance_type=INSTANCE_TYPE,
            instance_count=1,
            py_version="py2",
        )

    with pytest.raises(ValueError) as error2:
        model = XGBoostModel(
            model_data=DATA_DIR,
            role=ROLE,
            sagemaker_session=sagemaker_session,
            entry_point=SCRIPT_PATH,
            framework_version=xgboost_framework_version,
            py_version="py2",
        )
        model.serving_image_uri(REGION, INSTANCE_TYPE)

    error_message = "Unsupported Python version: py2."
    assert error_message in str(error1)
    assert error_message in str(error2)
Exemplo n.º 2
0
def test_model(sagemaker_session):
    model = XGBoostModel(
        "s3://some/data.tar.gz",
        role=ROLE,
        framework_version=XGBOOST_LATEST_VERSION,
        entry_point=SCRIPT_PATH,
        sagemaker_session=sagemaker_session,
    )
    predictor = model.deploy(1, CPU)
    assert isinstance(predictor, XGBoostPredictor)
def main():
    print('Starting model training.')
    print('Note: if launching for the first time in local mode, container image download might take a few minutes to complete.')

    hyperparameters = {
        "max_depth": "5",
        "eta": "0.2",
        "gamma": "4",
        "min_child_weight": "6",
        "subsample": "0.7",
        "objective": "reg:squarederror",
        "num_round": "50",
        "verbosity": "2",
    }

    xgb_script_mode_estimator = XGBoost(
        entry_point="./code/abalone.py",
        hyperparameters=hyperparameters,
        role=DUMMY_IAM_ROLE,
        instance_count=1,
        instance_type='local',
        framework_version="1.2-1"
    )

    train_input = TrainingInput("file://./data/train/abalone", content_type="text/libsvm")

    xgb_script_mode_estimator.fit({"train": train_input, "validation": train_input})

    print('Completed model training')

    model_data = xgb_script_mode_estimator.model_data
    print(model_data)

    xgb_inference_model = XGBoostModel(
        model_data=model_data,
        role=DUMMY_IAM_ROLE,
        entry_point="./code/inference.py",
        framework_version="1.2-1",
    )

    print('Deploying endpoint in local mode')
    predictor = xgb_inference_model.deploy(
        initial_instance_count=1,
        instance_type="local",
    )

    a_young_abalone = "6 1:3 2:0.37 3:0.29 4:0.095 5:0.249 6:0.1045 7:0.058 8:0.067"
    do_inference_on_local_endpoint(predictor, a_young_abalone)

    an_old_abalone = "15 1:1 2:0.655 3:0.53 4:0.175 5:1.2635 6:0.486 7:0.2635 8:0.415"
    do_inference_on_local_endpoint(predictor, an_old_abalone)

    print('About to delete the endpoint to stop paying (if in cloud mode).')
    predictor.delete_endpoint(predictor.endpoint_name)
Exemplo n.º 4
0
def test_create_model(sagemaker_session):
    source_dir = "s3://mybucket/source"

    xgboost_model = XGBoostModel(
        model_data=source_dir,
        role=ROLE,
        sagemaker_session=sagemaker_session,
        entry_point=SCRIPT_PATH,
        framework_version=XGBOOST_LATEST_VERSION,
    )
    default_image_uri = _get_full_cpu_image_uri(XGBOOST_LATEST_VERSION)
    model_values = xgboost_model.prepare_container_def(CPU)
    assert model_values["Image"] == default_image_uri
Exemplo n.º 5
0
def test_py2_xgboost_attribute_error(sagemaker_session):
    with pytest.raises(AttributeError) as error1:
        XGBoost(
            entry_point=SCRIPT_PATH,
            role=ROLE,
            framework_version=XGBOOST_LATEST_VERSION,
            sagemaker_session=sagemaker_session,
            train_instance_type=INSTANCE_TYPE,
            train_instance_count=1,
            py_version="py2",
        )

    with pytest.raises(AttributeError) as error2:
        XGBoostModel(
            model_data=DATA_DIR,
            role=ROLE,
            sagemaker_session=sagemaker_session,
            entry_point=SCRIPT_PATH,
            framework_version=XGBOOST_LATEST_VERSION,
            py_version="py2",
        )

    error_message = "XGBoost container does not support Python 2, please use Python 3"
    assert error_message in str(error1)
    assert error_message in str(error2)
Exemplo n.º 6
0
def test_model_custom_serialization(sagemaker_session,
                                    xgboost_framework_version):
    model = XGBoostModel(
        "s3://some/data.tar.gz",
        role=ROLE,
        framework_version=xgboost_framework_version,
        entry_point=SCRIPT_PATH,
        sagemaker_session=sagemaker_session,
    )
    custom_serializer = Mock()
    custom_deserializer = Mock()
    predictor = model.deploy(
        1,
        CPU,
        serializer=custom_serializer,
        deserializer=custom_deserializer,
    )
    assert isinstance(predictor, XGBoostPredictor)
    assert predictor.serializer is custom_serializer
    assert predictor.deserializer is custom_deserializer
Exemplo n.º 7
0
def test_create_model_with_network_isolation(upload, sagemaker_session,
                                             xgboost_framework_version):
    source_dir = "s3://mybucket/source"
    repacked_model_data = "s3://mybucket/prefix/model.tar.gz"

    xgboost_model = XGBoostModel(
        model_data=source_dir,
        role=ROLE,
        sagemaker_session=sagemaker_session,
        entry_point=SCRIPT_PATH,
        framework_version=xgboost_framework_version,
        enable_network_isolation=True,
    )
    xgboost_model.uploaded_code = UploadedCode(s3_prefix=repacked_model_data,
                                               script_name="script")
    xgboost_model.repacked_model_data = repacked_model_data
    model_values = xgboost_model.prepare_container_def(CPU)
    assert model_values["Environment"][
        "SAGEMAKER_SUBMIT_DIRECTORY"] == "/opt/ml/model/code"
    assert model_values["ModelDataUrl"] == repacked_model_data
Exemplo n.º 8
0
def test_xgboost_serverless_inference(
    xgboost_training_job,
    sagemaker_session,
    xgboost_latest_version,
):
    endpoint_name = unique_name_from_base(
        "test-xgboost-deploy-model-serverless")
    with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):
        desc = sagemaker_session.sagemaker_client.describe_training_job(
            TrainingJobName=xgboost_training_job)
        model_data = desc["ModelArtifacts"]["S3ModelArtifacts"]

        xgboost = XGBoostModel(
            sagemaker_session=sagemaker_session,
            model_data=model_data,
            role=ROLE,
            entry_point=os.path.join(DATA_DIR, "xgboost_abalone",
                                     "abalone.py"),
            framework_version=xgboost_latest_version,
        )

        xgboost.deploy(serverless_inference_config=ServerlessInferenceConfig(),
                       endpoint_name=endpoint_name)
def test_sklearn_xgboost_sip_model_registration(sagemaker_session, role,
                                                pipeline_name, region_name):
    prefix = "sip"
    bucket_name = sagemaker_session.default_bucket()
    instance_count = ParameterInteger(name="InstanceCount", default_value=1)
    instance_type = ParameterString(name="InstanceType",
                                    default_value="ml.m5.xlarge")

    sklearn_processor = SKLearnProcessor(
        role=role,
        instance_type=instance_type,
        instance_count=instance_count,
        framework_version="0.20.0",
        sagemaker_session=sagemaker_session,
    )

    # The path to the raw data.
    raw_data_path = "s3://{0}/{1}/data/raw/".format(bucket_name, prefix)
    raw_data_path_param = ParameterString(name="raw_data_path",
                                          default_value=raw_data_path)

    # The output path to the training data.
    train_data_path = "s3://{0}/{1}/data/preprocessed/train/".format(
        bucket_name, prefix)
    train_data_path_param = ParameterString(name="train_data_path",
                                            default_value=train_data_path)

    # The output path to the validation data.
    val_data_path = "s3://{0}/{1}/data/preprocessed/val/".format(
        bucket_name, prefix)
    val_data_path_param = ParameterString(name="val_data_path",
                                          default_value=val_data_path)

    # The training output path for the model.
    output_path = "s3://{0}/{1}/output/".format(bucket_name, prefix)
    output_path_param = ParameterString(name="output_path",
                                        default_value=output_path)

    # The output path to the featurizer model.
    model_path = "s3://{0}/{1}/output/sklearn/".format(bucket_name, prefix)
    model_path_param = ParameterString(name="model_path",
                                       default_value=model_path)

    inputs = [
        ProcessingInput(
            input_name="raw_data",
            source=raw_data_path_param,
            destination="/opt/ml/processing/input",
        )
    ]

    outputs = [
        ProcessingOutput(
            output_name="train_data",
            source="/opt/ml/processing/train",
            destination=train_data_path_param,
        ),
        ProcessingOutput(
            output_name="val_data",
            source="/opt/ml/processing/val",
            destination=val_data_path_param,
        ),
        ProcessingOutput(
            output_name="model",
            source="/opt/ml/processing/model",
            destination=model_path_param,
        ),
    ]

    base_dir = os.path.join(DATA_DIR, "sip")
    code_path = os.path.join(base_dir, "preprocessor.py")

    processing_step = ProcessingStep(
        name="Processing",
        code=code_path,
        processor=sklearn_processor,
        inputs=inputs,
        outputs=outputs,
        job_arguments=["--train-test-split-ratio", "0.2"],
    )

    entry_point = "training.py"
    source_dir = base_dir
    code_location = "s3://{0}/{1}/code".format(bucket_name, prefix)

    estimator = XGBoost(
        entry_point=entry_point,
        source_dir=source_dir,
        output_path=output_path_param,
        code_location=code_location,
        instance_type=instance_type,
        instance_count=instance_count,
        framework_version="0.90-2",
        sagemaker_session=sagemaker_session,
        py_version="py3",
        role=role,
    )

    training_step = TrainingStep(
        name="Training",
        estimator=estimator,
        inputs={
            "train":
            TrainingInput(
                s3_data=processing_step.properties.ProcessingOutputConfig.
                Outputs["train_data"].S3Output.S3Uri,
                content_type="text/csv",
            ),
            "validation":
            TrainingInput(
                s3_data=processing_step.properties.ProcessingOutputConfig.
                Outputs["val_data"].S3Output.S3Uri,
                content_type="text/csv",
            ),
        },
    )

    code_location = "s3://{0}/{1}/code".format(bucket_name, prefix)
    source_dir = os.path.join(base_dir, "sklearn_source_dir")

    sklearn_model = SKLearnModel(
        name="sklearn-model",
        model_data=processing_step.properties.ProcessingOutputConfig.
        Outputs["model"].S3Output.S3Uri,
        entry_point="inference.py",
        source_dir=source_dir,
        code_location=code_location,
        role=role,
        sagemaker_session=sagemaker_session,
        framework_version="0.20.0",
        py_version="py3",
    )

    code_location = "s3://{0}/{1}/code".format(bucket_name, prefix)
    source_dir = os.path.join(base_dir, "xgboost_source_dir")

    xgboost_model = XGBoostModel(
        name="xgboost-model",
        model_data=training_step.properties.ModelArtifacts.S3ModelArtifacts,
        entry_point="inference.py",
        source_dir=source_dir,
        code_location=code_location,
        framework_version="0.90-2",
        py_version="py3",
        role=role,
        sagemaker_session=sagemaker_session,
    )

    pipeline_model = PipelineModel([xgboost_model, sklearn_model],
                                   role,
                                   sagemaker_session=sagemaker_session)

    step_register = RegisterModel(
        name="AbaloneRegisterModel",
        model=pipeline_model,
        content_types=["application/json"],
        response_types=["application/json"],
        inference_instances=["ml.t2.medium", "ml.m5.xlarge"],
        transform_instances=["ml.m5.xlarge"],
        model_package_group_name="windturbine",
    )

    pipeline = Pipeline(
        name=pipeline_name,
        parameters=[
            raw_data_path_param,
            train_data_path_param,
            val_data_path_param,
            model_path_param,
            instance_type,
            instance_count,
            output_path_param,
        ],
        steps=[processing_step, training_step, step_register],
        sagemaker_session=sagemaker_session,
    )

    try:
        response = pipeline.upsert(role_arn=role)
        create_arn = response["PipelineArn"]
        assert re.match(
            rf"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}",
            create_arn,
        )

        execution = pipeline.start(parameters={})
        assert re.match(
            rf"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}/execution/",
            execution.arn,
        )

        execution = pipeline.start()
        assert re.match(
            rf"arn:aws:sagemaker:{region_name}:\d{{12}}:pipeline/{pipeline_name}/execution/",
            execution.arn,
        )
    finally:
        try:
            pipeline.delete()
        except Exception:
            pass
Exemplo n.º 10
0
def get_pipeline(
        region,
        sagemaker_project_arn=None,
        role=None,
        default_bucket='',
        pipeline_name='end-to-end-ml-sagemaker-pipeline',
        model_package_group_name='end-to-end-ml-sm-model-package-group',
        base_job_prefix='endtoendmlsm') -> Pipeline:
    """
    Gets the SM Pipeline.

    :param role: The execution role.
    :param bucket_name: The bucket where pipeline artifacts are stored.
    :param prefix: The prefix where pipeline artifacts are stored.
    :return: A Pipeline instance.
    """

    bucket_name = default_bucket
    prefix = 'endtoendmlsm'
    sagemaker_session = get_session(region, bucket_name)

    # ---------------------
    # Processing parameters
    # ---------------------
    # The path to the raw data.
    raw_data_path = 's3://gianpo-public/endtoendml/data/raw/predmain_raw_data_header.csv'.format(
        bucket_name, prefix)
    raw_data_path_param = ParameterString(name="raw_data_path",
                                          default_value=raw_data_path)
    # The output path to the training data.
    train_data_path = 's3://{0}/{1}/data/preprocessed/train/'.format(
        bucket_name, prefix)
    train_data_path_param = ParameterString(name="train_data_path",
                                            default_value=train_data_path)
    # The output path to the validation data.
    val_data_path = 's3://{0}/{1}/data/preprocessed/val/'.format(
        bucket_name, prefix)
    val_data_path_param = ParameterString(name="val_data_path",
                                          default_value=val_data_path)
    # The output path to the featurizer model.
    model_path = 's3://{0}/{1}/output/sklearn/'.format(bucket_name, prefix)
    model_path_param = ParameterString(name="model_path",
                                       default_value=model_path)
    # The instance type for the processing job.
    processing_instance_type_param = ParameterString(
        name="processing_instance_type", default_value='ml.m5.large')
    # The instance count for the processing job.
    processing_instance_count_param = ParameterInteger(
        name="processing_instance_count", default_value=1)
    # The train/test split ration parameter.
    train_test_split_ratio_param = ParameterString(
        name="train_test_split_ratio", default_value='0.2')
    # -------------------
    # Training parameters
    # -------------------
    # XGB hyperparameters.
    max_depth_param = ParameterString(name="max_depth", default_value='3')
    eta_param = ParameterString(name="eta", default_value='0.1')
    gamma_param = ParameterString(name="gamma", default_value='0')
    min_child_weight_param = ParameterString(name="min_child_weight",
                                             default_value='1')
    objective_param = ParameterString(name="objective",
                                      default_value='binary:logistic')
    num_round_param = ParameterString(name="num_round", default_value='10')
    eval_metric_param = ParameterString(name="eval_metric",
                                        default_value='auc')
    # The instance type for the training job.
    training_instance_type_param = ParameterString(
        name="training_instance_type", default_value='ml.m5.xlarge')
    # The instance count for the training job.
    training_instance_count_param = ParameterInteger(
        name="training_instance_count", default_value=1)
    # The training output path for the model.
    output_path = 's3://{0}/{1}/output/'.format(bucket_name, prefix)
    output_path_param = ParameterString(name="output_path",
                                        default_value=output_path)
    # --------------------------
    # Register model parameters
    # --------------------------
    # The default instance type for deployment.
    deploy_instance_type_param = ParameterString(name="deploy_instance_type",
                                                 default_value='ml.m5.2xlarge')
    # The approval status for models added to the registry.
    model_approval_status_param = ParameterString(
        name="model_approval_status", default_value='PendingManualApproval')
    # --------------------------
    # Processing Step
    # --------------------------
    sklearn_processor = SKLearnProcessor(
        role=role,
        instance_type=processing_instance_type_param,
        instance_count=processing_instance_count_param,
        framework_version='0.20.0')
    inputs = [
        ProcessingInput(input_name='raw_data',
                        source=raw_data_path_param,
                        destination='/opt/ml/processing/input')
    ]
    outputs = [
        ProcessingOutput(output_name='train_data',
                         source='/opt/ml/processing/train',
                         destination=train_data_path_param),
        ProcessingOutput(output_name='val_data',
                         source='/opt/ml/processing/val',
                         destination=val_data_path_param),
        ProcessingOutput(output_name='model',
                         source='/opt/ml/processing/model',
                         destination=model_path_param)
    ]
    code_path = os.path.join(BASE_DIR, 'dataprep/preprocess.py')
    processing_step = ProcessingStep(name='Processing',
                                     code=code_path,
                                     processor=sklearn_processor,
                                     inputs=inputs,
                                     outputs=outputs,
                                     job_arguments=[
                                         '--train-test-split-ratio',
                                         train_test_split_ratio_param
                                     ])
    # --------------------------
    # Training Step
    # --------------------------
    hyperparameters = {
        "max_depth": max_depth_param,
        "eta": eta_param,
        "gamma": gamma_param,
        "min_child_weight": min_child_weight_param,
        "silent": 0,
        "objective": objective_param,
        "num_round": num_round_param,
        "eval_metric": eval_metric_param
    }
    entry_point = 'train.py'
    source_dir = os.path.join(BASE_DIR, 'train/')
    code_location = 's3://{0}/{1}/code'.format(bucket_name, prefix)
    estimator = XGBoost(entry_point=entry_point,
                        source_dir=source_dir,
                        output_path=output_path_param,
                        code_location=code_location,
                        hyperparameters=hyperparameters,
                        instance_type=training_instance_type_param,
                        instance_count=training_instance_count_param,
                        framework_version="0.90-2",
                        py_version="py3",
                        role=role)
    training_step = TrainingStep(
        name='Training',
        estimator=estimator,
        inputs={
            'train':
            TrainingInput(
                s3_data=processing_step.properties.ProcessingOutputConfig.
                Outputs['train_data'].S3Output.S3Uri,
                content_type='text/csv'),
            'validation':
            TrainingInput(
                s3_data=processing_step.properties.ProcessingOutputConfig.
                Outputs['val_data'].S3Output.S3Uri,
                content_type='text/csv')
        })
    # --------------------------
    # Register Model Step
    # --------------------------
    code_location = 's3://{0}/{1}/code'.format(bucket_name, prefix)
    sklearn_model = SKLearnModel(
        name='end-to-end-ml-sm-skl-model-{0}'.format(str(int(time.time()))),
        model_data=processing_step.properties.ProcessingOutputConfig.
        Outputs['model'].S3Output.S3Uri,
        entry_point='inference.py',
        source_dir=os.path.join(BASE_DIR, 'deploy/sklearn/'),
        code_location=code_location,
        role=role,
        sagemaker_session=sagemaker_session,
        framework_version='0.20.0',
        py_version='py3')
    code_location = 's3://{0}/{1}/code'.format(bucket_name, prefix)
    xgboost_model = XGBoostModel(
        name='end-to-end-ml-sm-xgb-model-{0}'.format(str(int(time.time()))),
        model_data=training_step.properties.ModelArtifacts.S3ModelArtifacts,
        entry_point='inference.py',
        source_dir=os.path.join(BASE_DIR, 'deploy/xgboost/'),
        code_location=code_location,
        framework_version='0.90-2',
        py_version='py3',
        role=role,
        sagemaker_session=sagemaker_session)
    pipeline_model_name = 'end-to-end-ml-sm-xgb-skl-pipeline-{0}'.format(
        str(int(time.time())))
    pipeline_model = PipelineModel(name=pipeline_model_name,
                                   role=role,
                                   models=[sklearn_model, xgboost_model],
                                   sagemaker_session=sagemaker_session)

    register_model_step = RegisterModel(
        name='RegisterModel',
        content_types=['text/csv'],
        response_types=['application/json', 'text/csv'],
        inference_instances=[deploy_instance_type_param, 'ml.m5.large'],
        transform_instances=['ml.c5.4xlarge'],
        model_package_group_name=model_package_group_name,
        approval_status=model_approval_status_param,
        model=pipeline_model)
    # --------------------------
    # Pipeline
    # --------------------------

    pipeline = Pipeline(
        name=pipeline_name,
        parameters=[
            raw_data_path_param, train_data_path_param, val_data_path_param,
            model_path_param, processing_instance_type_param,
            processing_instance_count_param, train_test_split_ratio_param,
            max_depth_param, eta_param, gamma_param, min_child_weight_param,
            objective_param, num_round_param, eval_metric_param,
            training_instance_type_param, training_instance_count_param,
            output_path_param, deploy_instance_type_param,
            model_approval_status_param
        ],
        steps=[processing_step, training_step, register_model_step],
        sagemaker_session=sagemaker_session,
    )
    response = pipeline.upsert(role_arn=role)
    print(response["PipelineArn"])
    return pipeline