Exemple #1
0
def init():
    global model
    # note here "sklearn_regression_model.pkl" is the name of the model registered under
    # this is a different behavior than before when the code is run locally, even though the code is the same.
    model_path = Model.get_model_path('LOS_RF_model2.pkl')
    # deserialize the model file back into a sklearn model
    model = pickle.load(open(model_path, 'rb'))
    global inputs_dc, prediction_dc
    # this setup will help us save our inputs under the "inputs" path in our Azure Blob
    inputs_dc = ModelDataCollector(
        model_name="LOS_RF_model2.pkl",
        identifier="inputs",
        feature_names=[
            'rcount', 'gender', 'dialysisrenalendstage', 'asthma', 'irondef',
            'pneum', 'substancedependence', 'psychologicaldisordermajor',
            'depress', 'psychother', 'fibrosisandother', 'malnutrition',
            'hemo', 'hematocrit', 'neutrophils', 'sodium', 'glucose',
            'bloodureanitro', 'creatinine', 'bmi', 'pulse', 'respiration',
            'secondarydiagnosisnonicd9', 'fid', 'Capacity', 'Name',
            'daysofweek_admit'
        ])
    # this setup will help us save our ipredictions under the "predictions" path in our Azure Blob
    prediction_dc = ModelDataCollector("LOS_RF_model2.pkl",
                                       identifier="predictions",
                                       feature_names=["prediction1"])
Exemple #2
0
def init():
    global model
    global inputs_dc, prediction_dc
    global tokenizer, max_len, max_words

    try:
        model_name = 'MODEL-NAME'  # Placeholder model name
        print('Looking for model path for model: ', model_name)
        model_path = Model.get_model_path(model_name=model_name)
        print('Loading model from: ', model_path)
        # Load the ONNX model
        model = onnxruntime.InferenceSession(model_path)
        print('Model loaded...')

        inputs_dc = ModelDataCollector("model_telemetry", designation="inputs")
        prediction_dc = ModelDataCollector("model_telemetry",
                                           designation="predictions",
                                           feature_names=["prediction"])

        car_components_descriptions = pd.read_csv(
            'dataset/training_data.csv')['text'].tolist()
        print('Training dataset loaded...')

        max_len = 100
        max_words = 10000
        tokenizer = Tokenizer(num_words=max_words)
        tokenizer.fit_on_texts(car_components_descriptions)
        print('Tokenizer fitted...')

    except Exception as e:
        print(e)
Exemple #3
0
def init():
    global model, scaler, input_name, label_name, inputs_dc, prediction_dc

    scaler_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'),
                               'model-scaler/1/model-scaler.pkl')
    # deserialize the model file back into a sklearn model
    scaler = joblib.load(scaler_path)

    model_onnx = os.path.join(os.getenv('AZUREML_MODEL_DIR'),
                              'support-vector-classifier/2/svc.onnx')
    # print(os.listdir(model_onnx))
    model = onnxruntime.InferenceSession(model_onnx, None)
    input_name = model.get_inputs()[0].name
    label_name = model.get_outputs()[0].name

    # variables to monitor model input and output data
    inputs_dc = ModelDataCollector("Support vector classifier model",
                                   designation="inputs",
                                   feature_names=[
                                       "feat1", "feat2", "feat3", "feat4",
                                       "feat5", "feat6", "feat7"
                                   ])
    prediction_dc = ModelDataCollector("Support vector classifier model",
                                       designation="predictions",
                                       feature_names=["weatherprediction"])
def init():
    try:
        # One-time initialization of PySpark and predictive model

        global trainedModel
        global spark

        global inputs_dc, prediction_dc
        model_name = "{model_name}"  # interpolated
        model_path = Model.get_model_path(model_name)

        # hack to such that model_path can work with zip file. Reason is the image can only be created using zip file.
        # If no zip file was used, then every file in the unzipped file would create a layer and thus exceeding max depth layers (125) of docker
        mdl, ext = model_path.rsplit(".", 1)
        os.mkdir(mdl + ".tmp")
        shutil.unpack_archive(model_path, mdl + ".tmp")
        os.remove(model_path)
        os.rename(mdl + ".tmp", model_path)

        inputs_dc = ModelDataCollector(model_name,
                                       designation="inputs",
                                       feature_names=["json_input_data"])
        prediction_dc = ModelDataCollector(model_name,
                                           designation="predictions",
                                           feature_names=["predictions"])

        spark = pyspark.sql.SparkSession.builder.appName(
            "AML Production Model").getOrCreate()
        trainedModel = PipelineModel.load(model_path)
    except Exception as e:
        trainedModel = e
def init():
    global model
    global inputs_dc, prediction_dc
    global tokenizer, max_len, max_words
    
    try:
        model_name = 'compliance-classifier'
        print('Looking for model path for model: ', model_name)
        model_path = Model.get_model_path(model_name = model_name)
        print('Loading model from: ', model_path)
        # Load the ONNX model
        model = onnxruntime.InferenceSession(model_path)
        print('Model loaded...')

        inputs_dc = ModelDataCollector("model_telemetry", designation="inputs")
        prediction_dc = ModelDataCollector("model_telemetry", designation="predictions", feature_names=["prediction"])

        cardata_url = ('https://quickstartsws9073123377.blob.core.windows.net/'
                        'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'
                        'quickstarts/connected-car-data/connected-car_components.csv')

        car_components_descriptions = pd.read_csv(cardata_url)['text'].tolist()
        print('Training dataset loaded...')

        max_len = 100
        max_words = 10000
        tokenizer = Tokenizer(num_words = max_words)
        tokenizer.fit_on_texts(car_components_descriptions)
        print('Tokenizer fitted...')

    except Exception as e:
        print(e)
Exemple #6
0
def init():
    global model
    global inputs_dc, prediction_dc
    # The AZUREML_MODEL_DIR environment variable indicates a directory containing the model file you registered.
    model_file_name = "model.pkl"
    model_path = os.path.join(os.environ.get("AZUREML_MODEL_DIR"), model_file_name)
    model = joblib.load(model_path)
    inputs_dc = ModelDataCollector("sample-model", designation="inputs", feature_names=["feat1", "feat2", "feat3", "feat4"])
    prediction_dc = ModelDataCollector("sample-model", designation="predictions", feature_names=["prediction"])
Exemple #7
0
def init():
    # Load model
    global model
    model_dir = os.getenv('AZUREML_MODEL_DIR')
    model_path = os.path.join(model_dir, 'credit-prediction.pkl')
    model = joblib.load(model_path)

    # Setup Data Collection
    global inputs_dc
    global predictions_dc
    inputs_dc = ModelDataCollector("best_model", designation="inputs", feature_names=["Age", "Sex", "Job", "Housing", "Saving accounts", "Checking account", "Credit amount", "Duration", "Purpose"])
    predictions_dc = ModelDataCollector("best_model", designation="predictions", feature_names=["good", "bad"])
Exemple #8
0
def init():
    global model
    print ("model initialized" + time.strftime("%H:%M:%S"))
    # note here "sklearn_regression_model.pkl" is the name of the model registered under the workspace
    # this call should return the path to the model.pkl file on the local disk.
    model_path = Model.get_model_path(model_name = 'sklearn_regression_model')
    # deserialize the model file back into a sklearn model
    model = joblib.load(model_path)
    
    global inputs_dc, prediction_dc
    # this setup will help us save our inputs under the "inputs" path in our Azure Blob
    inputs_dc = ModelDataCollector(model_name="sklearn_regression_model", identifier="inputs", feature_names=["feat1", "feat2"]) 
    # this setup will help us save our ipredictions under the "predictions" path in our Azure Blob
    prediction_dc = ModelDataCollector("sklearn_regression_model", identifier="predictions", feature_names=["prediction1", "prediction2"])
def init():
    global model
    global inputs_dc, prediction_dc

    inputs_dc = ModelDataCollector("torchcnn", identifier="inputs")
    prediction_dc = ModelDataCollector("torchcnn", identifier="predictions")

    model = CNN()
    # The line below loads the model from the AML Service
    model_path = Model.get_model_path(model_name="torchcnn")
    # It is also possible to load a local model file
    # model_path = '/temp/torchcnn.pth'
    model.load_state_dict(torch.load(model_path))
    model.eval()
Exemple #10
0
def init():
    global model
    global inputs_dc, prediction_dc
    inputs_dc = ModelDataCollector("model",
                                   identifier="inputs",
                                   feature_names=["feat1", "feat2", "feat3"])
    prediction_dc = ModelDataCollector("model",
                                       identifier="predictions",
                                       feature_names=["prediction1"])
    # note here "best_model" is the name of the model registered under the workspace
    # this call should return the path to the model.pkl file on the local disk.
    model_path = Model.get_model_path(model_name='model')

    # deserialize the model file back into a sklearn model
    model = joblib.load(model_path)
Exemple #11
0
def init():
    global model
    print("Model Initialized: " + time.strftime("%H:%M:%S"))
    # load the model from file into a global object
    model_path = Model.get_model_path(model_name="mymodel")
    model = joblib.load(model_path)
    print("Initialize Data Collectors")
    global inputs_dc, prediction_dc
    inputs_dc = ModelDataCollector(model_name="sklearn_regression_model",
                                   feature_names=[
                                       "AGE", "SEX", "BMI", "BP", "S1", "S2",
                                       "S3", "S4", "S5", "S6"
                                   ])
    prediction_dc = ModelDataCollector(model_name="sklearn_regression_model",
                                       feature_names=["Y"])
Exemple #12
0
def init():
    global model
    global inputs_dc, prediction_dc

    model_name = 'MODEL-NAME'  # Placeholder model name
    print('Looking for model path for model: ', model_name)
    model_path = Model.get_model_path(model_name=model_name)
    print('Loading model from: ', model_path)

    with open(model_path, 'rb') as f:
        model = pickle.load(f)

    inputs_dc = ModelDataCollector("model_telemetry", designation="inputs")
    prediction_dc = ModelDataCollector("model_telemetry",
                                       designation="predictions",
                                       feature_names=["prediction"])
Exemple #13
0
def init():
    global logger
    global model
    global pred_collector
    init_logger()
    pred_collector = ModelDataCollector(MODEL_NAME,
                                        identifier="imgpred",
                                        feature_names=["detection"])
    model = load_model()
def init():
    global model
    global inputs_dc, prediction_dc
    model_path = Model.get_model_path(consts.model_name)
    model = joblib.load(model_path)
    inputs_dc = ModelDataCollector(consts.model_name,
                                   designation="inputs",
                                   feature_names=[
                                       'vendorID', 'passengerCount',
                                       'tripDistance', 'pickupLongitude',
                                       'pickupLatitude', 'dropoffLongitude',
                                       'dropoffLatitude', 'totalAmount',
                                       'month_num', 'day_of_month',
                                       'day_of_week', 'hour_of_day'
                                   ])
    prediction_dc = ModelDataCollector(consts.model_name,
                                       designation="predictions",
                                       feature_names=["duration"])
Exemple #15
0
def init():
    global model, scaler, input_name, label_name, inputs_dc, prediction_dc, tc

    # Add your telemetry key
    tc = TelemetryClient('xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxx')
    scaler_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'),
                               'scaler/1/scaler.pkl')
    # deserialize the model file back into a sklearn model

    try:
        scaler = joblib.load(scaler_path)
    except Exception as e:
        tc.track_event('FileNotFoundException', {'error_message': str(e)},
                       {'FileNotFoundError': 101})
        tc.flush()

    model_onnx = os.path.join(os.getenv('AZUREML_MODEL_DIR'),
                              'support-vector-classifier/1/svc.onnx')

    try:
        model = onnxruntime.InferenceSession(model_onnx, None)
    except Exception as e:
        tc.track_event('FileNotFoundException', {'error_message': str(e)},
                       {'FileNotFoundError': 101})
        tc.flush()

    input_name = model.get_inputs()[0].name
    label_name = model.get_outputs()[0].name

    # variables to monitor model input and output data
    inputs_dc = ModelDataCollector("Support vector classifier model",
                                   designation="inputs",
                                   feature_names=[
                                       "Temperature_C", "Humidity",
                                       "Wind_speed_kmph",
                                       "Wind_bearing_degrees", "Visibility_km",
                                       "Pressure_millibars",
                                       "Current_weather_condition"
                                   ])
    prediction_dc = ModelDataCollector(
        "Support vector classifier model",
        designation="predictions",
        feature_names=["Future_weather_condition"])
def init():
    global model
    global inputs_dc, prediction_dc

    try:
        model_name = 'MODEL-NAME'  # Placeholder model name
        print('Looking for model path for model: ', model_name)
        model_path = Model.get_model_path(model_name=model_name)
        print('Loading model from: ', model_path)
        model = load_model(model_path)
        print("Model loaded from disk.")
        print(model.summary())

        inputs_dc = ModelDataCollector("model_telemetry", designation="inputs")
        prediction_dc = ModelDataCollector("model_telemetry",
                                           designation="predictions",
                                           feature_names=["prediction"])
    except Exception as e:
        print(e)
def init():
    # Load model
    global model
    model_dir = os.getenv('AZUREML_MODEL_DIR')
    model_path = os.path.join(model_dir, 'aml-demo-model.pkl')
    model = joblib.load(model_path)

    # Setup Data Collection
    global data_collector
    data_collector = ModelDataCollector("prod_model", designation="inputs")
def init():
    global learn
    
    model_file = Model.get_model_path("cats_vs_dogs")
    model_path = os.path.dirname(model_file)
    print(model_path)
    learn = load_learner(model_path)
    
    global prediction_dc
    prediction_dc = ModelDataCollector("best_model", identifier="predictions", feature_names=["prediction"])
Exemple #19
0
def init():
    '''
    Initialize required models:
        Get the IRIS Model from Model Registry and load
    '''
    global prediction_dc
    global model
    prediction_dc = ModelDataCollector("IRIS", designation="predictions", feature_names=["SepalLengthCm","SepalWidthCm", "PetalLengthCm","PetalWidthCm","Predicted_Species"])

    model_path = Model.get_model_path('IRIS')
    model = joblib.load(model_path+"/"+"iris_model.pkl")
    print('IRIS model loaded...')
Exemple #20
0
def init():
    global model, inputs_dc, prediction_dc, feature_names, categorical_features

    print("Model is initialized" + time.strftime("%H:%M:%S"))
    model_path = Model.get_model_path(model_name="driftmodel")
    model = joblib.load(model_path)

    feature_names = [
        "usaf", "wban", "latitude", "longitude", "station_name", "p_k",
        "sine_weekofyear", "cosine_weekofyear", "sine_hourofday",
        "cosine_hourofday", "temperature-7"
    ]

    categorical_features = ["usaf", "wban", "p_k", "station_name"]

    inputs_dc = ModelDataCollector(model_name="driftmodel",
                                   identifier="inputs",
                                   feature_names=feature_names)

    prediction_dc = ModelDataCollector("driftmodel",
                                       identifier="predictions",
                                       feature_names=["temperature"])
def init():
    try:
        # One-time initialization of PySpark and predictive model

        global trainedModel
        global spark

        global inputs_dc, prediction_dc
        model_name = "{model_name}"  # interpolated
        inputs_dc = ModelDataCollector(model_name,
                                       identifier="inputs",
                                       feature_names=["json_input_data"])
        prediction_dc = ModelDataCollector(model_name,
                                           identifier="predictions",
                                           feature_names=["predictions"])

        spark = pyspark.sql.SparkSession.builder.appName(
            "AML Production Model").getOrCreate()
        model_path = Model.get_model_path(model_name)
        trainedModel = PipelineModel.load(model_path)
    except Exception as e:
        trainedModel = e
def init():
    global logger
    global model
    global pred_collector
    global blob_service
    init_logger()
    pred_collector = ModelDataCollector(MODEL_NAME,
                                        identifier="imgpred",
                                        feature_names=["detection"])
    model = load_model()
    blob_service = BlockBlobService(IMAGE_STORAGE_ACCOUNT_NAME,
                                    IMAGE_STORAGE_ACCOUNT_KEY)
    blob_service.create_container(
        IMAGE_STORAGE_CONTAINER_NAME)  #fail_on_exist=False by default
Exemple #23
0
def init():
    global model
    global inputs_dc, prediction_dc
    global tokenizer
    global maxlen
    try:

        data_url = ('https://quickstartsws9073123377.blob.core.windows.net/'
                    'azureml-blobstore-0d1c4218-a5f9-418b-bf55-902b65277b85/'
                    'quickstarts/connected-car-data/connected-car_components.csv')

        # Load the car components labeled data
        car_components_df = pd.read_csv(data_url)
        components = car_components_df["text"].tolist()
        labels = car_components_df["label"].tolist()

        maxlen = 100                                               
        max_words = 10000      

        tokenizer = Tokenizer(num_words=max_words)
        tokenizer.fit_on_texts(components)

        model_name = 'MODEL-NAME' # Placeholder model name
        print('Looking for model path for model: ', model_name)
        model_path = Model.get_model_path(model_name = model_name)
        print('Loading model from: ', model_path)
        # Load the ONNX model
        model = onnxruntime.InferenceSession(model_path)
        print('Model loaded...')

        inputs_dc = ModelDataCollector("model_telemetry", designation="inputs")
        prediction_dc = ModelDataCollector("model_telemetry", designation="predictions", feature_names=["prediction"])


    except Exception as e:
        print(e)
def init():
    from azureml.monitoring import ModelDataCollector

    global model
    global inputs_dc, prediction_dc

    # Retreive path to model folder
    model_path = os.path.join(os.getenv("AZUREML_MODEL_DIR"), "model.pkl")

    # Deserialize the model file back into a sklearn model
    model = joblib.load(model_path)

    # Initialize data collectors
    inputs_dc = ModelDataCollector(
        model_name="cardiovascular_disease_model",
        designation="inputs",
        feature_names=[
            "age",
            "gender",
            "systolic",
            "diastolic",
            "height",
            "weight",
            "cholesterol",
            "glucose",
            "smoker",
            "alcoholic",
            "active",
            "datetime",
        ],
    )
    prediction_dc = ModelDataCollector(
        model_name="cardiovascular_disease_model",
        designation="predictions",
        feature_names=["cardiovascular_disease"],
    )
Exemple #25
0
def init():
    global model
    global inputs_dc
    inputs_dc = ModelDataCollector('elevation-regression-model.pkl',
                                   designation='inputs',
                                   feature_names=[
                                       'latitude', 'longitude', 'temperature',
                                       'windAngle', 'windSpeed'
                                   ])
    # note here "elevation-regression-model.pkl" is the name of the model registered under
    # this is a different behavior than before when the code is run locally, even though the code is the same.
    # AZUREML_MODEL_DIR is an environment variable created during deployment.
    # It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
    # For multiple models, it points to the folder containing all deployed models (./azureml-models)
    model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'),
                              'elevation-regression-model.pkl')
    model = joblib.load(model_path)
def init():

    global heart_disease_model
    global deploy_parameters
    global explainer_model
    global inputs_dc


    deploy_parameters = joblib.load("dependencies/deploy_parameters.pkl")
    model_path = Model.get_model_path(
        model_name=deploy_parameters.get("model_name"))
    heart_disease_model = joblib.load(model_path)
    explainer_model_path = Model.get_model_path(
        model_name=deploy_parameters.get("explainer_model_name"))
    explainer_model = joblib.load(explainer_model_path)

    inputs_dc = ModelDataCollector(deploy_parameters.get("model_name"), designation='inputs',
                        feature_names=deploy_parameters.get("dataset_columns"))
Exemple #27
0
def init():
    global model
    global prediction_dc
    global storage_location

    storage_location = "/tmp/output"

    if not os.path.exists(storage_location):
        os.makedirs(storage_location)

    # next, we delete previous output files
    files = glob.glob(os.path.join(storage_location,'*'))
    
    for f in files:
        os.remove(f)

    model_path = Model.get_model_path(model_name = model_name)
    # deserialize the model file back into a sklearn model
    model = joblib.load(model_path)
    prediction_dc = ModelDataCollector("automl_model", identifier="predictions", feature_names=["prediction"])
Exemple #28
0
    # next, we delete previous output files
    files = glob.glob(os.path.join(storage_location,'*'))
    
    for f in files:
        os.remove(f)

<<<<<<< HEAD
    model_name = "model.pkl"

=======
>>>>>>> d07f34a81d6ad3c2bacb7beab8698c05dda9da19
    model_path = Model.get_model_path(model_name = model_name)
    # deserialize the model file back into a sklearn model
    model = joblib.load(model_path)
    prediction_dc = ModelDataCollector("automl_model", identifier="predictions", feature_names=["prediction"])

    
def run(rawdata, window=14 * 24):
    """

    :param data:
    :param window:
    :return:
    """

    # set some parameters for the AD algorithm
    alpha = 0.1
    max_anoms = 0.05
    only_last = None  # alternative, we can set this to 'hr' or 'day'