def test_save_load():
    clf = BayesianRidge(compute_score=True)
    X, y = get_traininig_data()
    clf.fit(X, y)
    y_hat = clf.predict(X)

    model = MyCustomModel(clf)
    conda = {
        "name": "test",
        "channels": ["defaults"],
        "dependencies": [{"pip": ["scipy", "sklearn"]}]
    }

    model_save_path = os.path.join(dirname(abspath(__file__)), "AzureMLModel")
    local_dependencies = [dirname(abspath(__file__))]

    save_generic_model(model, path=model_save_path, conda=conda, local_dependencies=local_dependencies)

    df = pd.DataFrame(data=X)
    df.columns = df.columns.astype(str)
    
    loaded_generic_model = load_generic_model(model_save_path)
    result_df = loaded_generic_model.predict(df)
    assert (result_df.to_numpy() == y_hat.reshape(-1, 1)).all()

    dfd_path = os.path.join(dirname((abspath(__file__))), "dfd")
    os.makedirs(dfd_path, exist_ok=True)
    data_save_path = os.path.join(dfd_path, "data.dataset.parquet")
    df.to_parquet(data_save_path, engine="pyarrow")
    meta_path = os.path.join(dfd_path, "_meta.yaml")
    with open(meta_path, "w") as fp:
        fp.write("type: DataFrameDirectory\nextension: {}\nformat: Parquet\ndata: data.dataset.parquet")
Esempio n. 2
0
def test_save_load():
    init_params = {
        "model_type": "densenet201",
        "pretrained": True,
        "memory_efficient": False,
        "num_classes": 3
    }
    model = DenseNet(**init_params)

    model_save_path = os.path.join(dirname(dirname(abspath(__file__))),
                                   "AzureMLModel")
    local_dependencies = [dirname(dirname(abspath(__file__)))]
    # Also support list and csv_file
    index_to_label = {0: "056.dog", 1: "060.duc", 2: "080.frog"}

    save_pytorch_state_dict_model(model,
                                  init_params=init_params,
                                  path=model_save_path,
                                  task_type=TaskType.MultiClassification,
                                  label_map=index_to_label,
                                  local_dependencies=local_dependencies)
    loaded_generic_model = load_generic_model(model_save_path)
    image_directory = os.path.join(dirname(dirname(abspath(__file__))),
                                   "images")
    image_iterator = mock_image_directory_iterator(image_directory)
    predict_result = loaded_generic_model.predict(image_iterator)
    print(f"predict_result =\n{predict_result}")

    loaded_pytorch_model = loaded_generic_model.raw_model
    assert isinstance(loaded_pytorch_model, torch.nn.Module)
def test_save_load():
    init_params = {"inputSize": 1, "outputSize": 1}
    model = LinearRegression(**init_params).to(device)
    x_train, y_train = get_training_data()
    train(model, x_train, y_train)

    model_save_path = os.path.join(dirname(dirname(abspath(__file__))),
                                   "AzureMLModel")
    local_dependencies = [dirname(dirname(abspath(__file__)))]

    save_pytorch_state_dict_model(model,
                                  init_params=init_params,
                                  path=model_save_path,
                                  local_dependencies=local_dependencies)
    loaded_generic_model = load_generic_model(model_save_path)
    df = pd.DataFrame({"x": [[10.0], [11.0], [12.0]]})
    predict_result = loaded_generic_model.predict(df)
    assert predict_result.shape[0] == df.shape[0]

    loaded_pytorch_model = loaded_generic_model.raw_model
    assert isinstance(loaded_pytorch_model, torch.nn.Module)

    dfd_path = os.path.join(dirname(dirname(abspath(__file__))), "dfd")
    os.makedirs(dfd_path, exist_ok=True)
    data_save_path = os.path.join(dfd_path, "data.dataset.parquet")
    df.to_parquet(data_save_path, engine="pyarrow")
    meta_path = os.path.join(dfd_path, "_meta.yaml")
    with open(meta_path, "w") as fp:
        fp.write(
            "type: DataFrameDirectory\nextension: {}\nformat: Parquet\ndata: data.dataset.parquet"
        )
Esempio n. 4
0
    def __init__(self, model_path, params={}):
        logger.info(f"BuiltinScoreModule({model_path}, {params})")
        append_score_column_to_output_value_str = params.get(
            constants.APPEND_SCORE_COLUMNS_TO_OUTPUT_KEY, None)
        self.append_score_column_to_output = isinstance(append_score_column_to_output_value_str, str) and\
            append_score_column_to_output_value_str.lower() == "true"
        logger.info(
            f"self.append_score_column_to_output = {self.append_score_column_to_output}"
        )

        self.model = load_generic_model(model_path, install_dependencies=True)
        logger.info("Generic model loaded")
def main():
    model = LinearRegression(1, 1).to(device)
    x_train, y_train = get_training_data()
    train(model, x_train, y_train)

    model_save_path = "./AzureMLModel"
    save_pytorch_cloudpickle_model(model,
    path=model_save_path,
    local_dependencies=["."])
    loaded_generic_model = load_generic_model(model_save_path)
    df = pd.DataFrame({"x": [[10.0], [11.0], [12.0]]})
    predict_result = loaded_generic_model.predict(df)
    assert predict_result.shape[0] == df.shape[0]

    loaded_pytorch_model = loaded_generic_model.raw_model
    assert isinstance(loaded_pytorch_model, torch.nn.Module)
def main():
    init_params = {"inputSize": 1, "outputSize": 1}
    model = LinearRegression(**init_params).to(device)
    x_train, y_train = get_training_data()
    train(model, x_train, y_train)

    model_save_path = "./AzureMLModel"
    local_dependencies = ["."]

    save_pytorch_state_dict_model(model,
                                  init_params=init_params,
                                  path=model_save_path,
                                  local_dependencies=local_dependencies)
    loaded_generic_model = load_generic_model(model_save_path)
    df = pd.DataFrame({"x": [[10.0], [11.0], [12.0]]})
    predict_result = loaded_generic_model.predict(df)
    assert predict_result.shape[0] == df.shape[0]

    loaded_pytorch_model = loaded_generic_model.raw_model
    assert isinstance(loaded_pytorch_model, torch.nn.Module)
Esempio n. 7
0
def main():
    X, y = get_training_data()
    clf = BayesianRidge(compute_score=True)
    clf.fit(X, y)
    y_hat = clf.predict(X)

    model = MyCustomModel(clf)
    model.conda = {
        "name": "test",
        "channels": ["defaults"],
        "dependencies": [{
            "pip": ["scipy", "sklearn"]
        }]
    }

    save_generic_model(model, path="./AzureMLModel", local_dependencies=["."])
    loaded_generic_model = load_generic_model(path="./AzureMLModel",
                                              install_dependencies=False)

    df = pd.DataFrame(data=X)
    result_df = loaded_generic_model.predict(df)
    print(f"result_df = {result_df}")
    assert (result_df.to_numpy() == y_hat.reshape(-1, 1)).all()
Esempio n. 8
0
from azureml.designer.model.io import load_generic_model
from azureml.studio.core.io.image_directory import ImageDirectory
import torch
import torch.nn as nn
from torchvision import transforms
# from .densenet import DenseNet
from .utils import logger

if __name__ == '__main__':
    # Test inference
    print("Testing inference.")
    loaded_generic_model = load_generic_model(
        path='/mnt/chjinche/projects/saved_model')
    model = loaded_generic_model.raw_model
    # # check predict before save
    # state_dict = model.state_dict()
    loader_dir = ImageDirectory.load('/mnt/chjinche/data/out_transform_test/')
    # to_tensor_transform = transforms.Compose([transforms.ToTensor()])
    # model_config = {
    #     'model_type': 'densenet201',
    #     'pretrained': False,
    #     'memory_efficient': True,
    #     'num_classes': 3
    # }
    # new_model = DenseNet(**model_config)
    # new_model.load_state_dict(state_dict)
    # if torch.cuda.is_available():
    #     new_model = new_model.cuda()
    #     if torch.cuda.device_count() > 1:
    #         new_model = torch.nn.DataParallel(new_model).cuda()
    # new_model.eval()
Esempio n. 9
0
from azureml.designer.model.io import load_generic_model
from azureml.studio.core.io.image_directory import ImageDirectory
import torch
import torch.nn as nn
from torchvision import transforms
# from .densenet import DenseNet
# from .utils import logger

if __name__ == '__main__':
    # Test inference
    print("Testing inference.")
    loaded_generic_model = load_generic_model(
        path='/mnt/chjinche/test_data/saved_model_classification')
    # model = loaded_generic_model.raw_model
    # # check predict before save
    # state_dict = model.state_dict()
    loader_dir = ImageDirectory.load('/mnt/chjinche/test_data/transform_test/')
    # to_tensor_transform = transforms.Compose([transforms.ToTensor()])
    # model_config = {
    #     'model_type': 'densenet201',
    #     'pretrained': False,
    #     'memory_efficient': True,
    #     'num_classes': 3
    # }
    # new_model = DenseNet(**model_config)
    # new_model.load_state_dict(state_dict)
    # if torch.cuda.is_available():
    #     new_model = new_model.cuda()
    #     if torch.cuda.device_count() > 1:
    #         new_model = torch.nn.DataParallel(new_model).cuda()
    # new_model.eval()