Пример #1
0
def evaluate_model():
    all_runs = exp.get_runs(properties={
        "release_id": release_id,
        "run_type": "train"
    },
                            include_children=True)
    # print(f'Search parameters: properties="release_id": {release_id}, "run_type": "train"')
    # print(f'Experiment :{exp}')

    # li_test = list(all_runs)
    # print(f'li_test: {li_test}')

    # all_runs contains the reference to the entire run that satisfied the properties values in the query:
    # The list of runs is returned in decending order, so the first value is the most recent run
    new_model_run = next(all_runs)
    new_model_run_id = new_model_run.id
    print(f'New Run found with Run ID of: {new_model_run_id}')

    new_model_run = Run(exp, run_id=new_model_run_id)
    new_model_acc = new_model_run.get_metrics().get("final-accuracy")

    try:
        # Get most recently registered model, we assume that
        # is the model in production.
        # Download this model and compare it with the recently
        # trained model by running test with same data set.
        model_list = Model.list(ws)
        production_model = next(
            filter(
                lambda x: x.created_time == max(model.created_time
                                                for model in model_list),
                model_list,
            ))
        production_model_run_id = production_model.tags.get("run_id")
        run_list = exp.get_runs()

        # Get the run history for both production model and
        # newly trained model and compare final-accuracy
        production_model_run = Run(exp, run_id=production_model_run_id)

        production_model_acc = production_model_run.get_metrics().get(
            "final-accuracy")

        print(
            "Current Production model accuracy: {}, New trained model accuracy: {}"
            .format(production_model_acc, new_model_acc))

        promote_new_model = False
        if new_model_acc < production_model_acc:
            promote_new_model = True
            print(
                "New trained model performs better, thus it will be registered"
            )
    except Exception:
        promote_new_model = True
        print("This is the first model to be trained, \
            thus nothing to evaluate for now")

    return promote_new_model, new_model_run, new_model_acc
Пример #2
0
def test_registered_model_metric(get_ws_config):
    try:
        with open("aml_config/run_id.json") as f:
            config = json.load(f)
            new_model_run_id = config["run_id"]
            if new_model_run_id != "":
                experiment_name = config["experiment_name"]
                exp = Experiment(workspace=ws, name=experiment_name)
                model_list = Model.list(
                    ws, tags={"area": "predictive maintenance"})
                production_model = model_list[0]
                run_list = exp.get_runs()
                new_model_run = Run(exp, run_id=new_model_run_id)
                new_model_metric = new_model_run.get_metrics().get('accuracy')
                assert new_model_metric > 0.85, "Above 85% accuracy"
    except FileNotFoundError:
        print("No new model registered to test")
Пример #3
0
try:
    # Get most recently registered model, we assume that is the model in production. Download this model and compare it with the recently trained model by running test with same data set.
    model_list = Model.list(ws)
    production_model = next(
        filter(
            lambda x: x.created_time == max(model.created_time
                                            for model in model_list),
            model_list))
    production_model_run_id = production_model.tags.get('run_id')
    run_list = exp.get_runs()
    # Get the run history for both production model and newly trained model and compare mse
    production_model_run = Run(exp, run_id=production_model_run_id)
    new_model_run = Run(exp, run_id=new_model_run_id)

    production_model_metric = production_model_run.get_metrics().get(
        'accuracy')
    new_model_metric = new_model_run.get_metrics().get('accuracy')
    print(
        'Current Production model accuracy: {}, New trained model accuracy: {}'
        .format(production_model_metric, new_model_metric))

    promote_new_model = False
    if new_model_metric < production_model_metric:
        promote_new_model = True
        print('New trained model performs better, thus it will be registered')
except:
    promote_new_model = True
    print(
        'This is the first model to be trained, thus nothing to evaluate for now'
    )
Пример #4
0
    model_list = Model.list(ws)
    production_model = next(
        filter(
            lambda x: x.created_time == max(model.created_time
                                            for model in model_list),
            model_list,
        ))
    production_model_run_id = production_model.tags.get("run_id")
    run_list = exp.get_runs()
    # production_model_run = next(filter(lambda x: x.id == production_model_run_id, run_list))

    # Get the run history for both production model and newly trained model and compare mse
    production_model_run = Run(exp, run_id=production_model_run_id)
    new_model_run = Run(exp, run_id=new_model_run_id)

    production_model_mse = production_model_run.get_metrics().get("mse")
    new_model_mse = new_model_run.get_metrics().get("mse")
    print("Current Production model mse: {}, New trained model mse: {}".format(
        production_model_mse, new_model_mse))

    promote_new_model = False
    if new_model_mse < production_model_mse:
        promote_new_model = True
        print("New trained model performs better, thus it will be registered")
except:
    promote_new_model = True
    print(
        "This is the first model to be trained, thus nothing to evaluate for now"
    )

run_id = {}
Пример #5
0
latest_model_name = latest_model.name
latest_model_version = latest_model.version
latest_model_path = latest_model.get_model_path(latest_model_name,
                                                _workspace=ws)

print('Latest model id: ', latest_model_id)
print('Latest model name: ', latest_model_name)
print('Latest model version: ', latest_model_version)
print('Latest model path: ', latest_model_path)

latest_model_run_id = latest_model.tags.get("run_id")
print('Latest model run id: ', latest_model_run_id)

latest_model_run = Run(run.experiment, run_id=latest_model_run_id)

latest_model_accuracy = latest_model_run.get_metrics().get("acc")
print('Latest model accuracy: ', latest_model_accuracy)

ws_list = Webservice.list(ws, model_name=latest_model_name)
print('webservice list')
print(ws_list)

deploy_model = False
current_model = None

if (len(ws_list) > 0):
    webservice = ws_list[0]
    try:
        image_id = webservice.tags['image_id']
        image = Image(ws, id=image_id)
        current_model = image.models[0]
Пример #6
0
    tag_name = 'experiment_name'

    model = get_model_by_tag(model_name, tag_name, exp.name, ws)

    if (model is not None):

        production_model_run_id = model.run_id

        # Get the run history for both production model and
        # newly trained model and compare mse
        production_model_run = Run(exp, run_id=production_model_run_id)
        new_model_run = run.parent
        print("Production model run is", production_model_run)

        production_model_mse = \
            production_model_run.get_metrics().get(metric_eval)
        new_model_mse = new_model_run.get_metrics().get(metric_eval)
        if (production_model_mse is None or new_model_mse is None):
            print("Unable to find", metric_eval, "metrics, "
                  "exiting evaluation")
            run.parent.cancel()
        else:
            print("Current Production model mse: {}, "
                  "New trained model mse: {}".format(production_model_mse,
                                                     new_model_mse))

        if (new_model_mse < production_model_mse):
            print("New trained model performs better, "
                  "thus it should be registered")
        else:
            print("New trained model metric is less than or equal to "
Пример #7
0
# Only register model, if it performs better than the production model
print("Register model only if it performs better.")
try:
    # Loading run of production model
    print("Loading Run of Production Model to evaluate new model")
    production_model = Model(workspace=ws, name=deployment_settings["model"]["name"])
    production_model_run_id = production_model.tags.get(["run_id"])
    production_model_run = Run(experiment=experiment, run_id=production_model_run_id)

    # Comparing models
    print("Comparing Metrics of production and newly trained model")
    promote_new_model = True
    for metric in deployment_settings["model"]["evaluation_parameters"]["larger_is_better"]:
        if not promote_new_model:
            break
        new_model_parameter = run.get_metrics().get(metric)
        production_model_parameter = production_model_run.get_metrics().get(metric)
        if new_model_parameter < production_model_parameter:
            promote_new_model = False
    for metric in deployment_settings["model"]["evaluation_parameters"]["smaller_is_better"]:
        if not promote_new_model:
            break
        new_model_parameter = run.get_metrics().get(metric)
        production_model_parameter = production_model_run.get_metrics().get(metric)
        if new_model_parameter > production_model_parameter:
            promote_new_model = False
except Exception:
    promote_new_model = True
    print("This is the first model to be trained, thus nothing to evaluate for now")

# TODO: Remove
Пример #8
0
    print('Currently trained model name: ', currentlyTrainedModelName)

    currentlyTrainedModelVersion = currentlyTrainedModelInRegistry.version
    print('Currently trained model version: ', currentlyTrainedModelVersion)

    currentlyTrainedModelPath = currentlyTrainedModelInRegistry.get_model_path(
        currentlyTrainedModelName, _workspace=amlWs)
    print('Currently trained model path: ', currentlyTrainedModelPath)

    currentlyTrainedModelRunId = currentlyTrainedModelInRegistry.tags.get(
        "run_id")
    print('Currently trained model run id: ', currentlyTrainedModelRunId)

    currentlyTrainedModelRunRef = Run(run.experiment,
                                      run_id=currentlyTrainedModelRunId)
    currentlyTrainedModelAccuracy = currentlyTrainedModelRunRef.get_metrics(
    ).get("acc")
    print('Currently trained model accuracy: ', currentlyTrainedModelAccuracy)

    containerizeStepLogInfo["model_name"] = currentlyTrainedModelName
    containerizeStepLogInfo["model_version"] = currentlyTrainedModelVersion
    containerizeStepLogInfo["model_path"] = currentlyTrainedModelPath
    containerizeStepLogInfo["model_acc"] = currentlyTrainedModelAccuracy
    containerizeStepLogInfo["deploy_model_bool"] = deployModelBool
    containerizeStepLogInfo["image_name"] = args.image_name
    containerizeStepLogInfo["image_id"] = ""

else:
    print('..No freshly trained model found!  This should not have happened!')

print('..3. completed')
print('')
Пример #9
0
    model_list = Model.list(ws)
    production_model = next(
        filter(
            lambda x: x.created_time == max(model.created_time
                                            for model in model_list),
            model_list,
        ))
    production_model_run_id = production_model.tags.get("run_id")
    run_list = exp.get_runs()
    # production_model_run = next(filter(lambda x: x.id == production_model_run_id, run_list))

    # Get the run history for both production model and newly trained model and compare mse
    production_model_run = Run(exp, run_id=production_model_run_id)
    new_model_run = Run(exp, run_id=new_model_run_id)

    production_model_accuracy = production_model_run.get_metrics().get(
        "accuracy")
    new_model_accuracy = new_model_run.get_metrics().get("accuracy")
    print("Current Production model mse: {}, New trained model mse: {}".format(
        production_model_accuracy, new_model_accuracy))

    promote_new_model = False
    if new_model_accuracy < production_model_accuracy:
        promote_new_model = True
        print("New trained model performs better, thus it will be registered")
except:
    promote_new_model = True
    print(
        "This is the first model to be trained, thus nothing to evaluate for now"
    )

run_id = {}
Пример #10
0
    model_list = Model.list(ws)

    # 稼働中モデルの取得
    production_model = next(
        filter(
            lambda x: x.created_time == max(model.created_time
                                            for model in model_list),
            model_list))
    production_model_run_id = production_model.tags.get('run_id')

    # 稼働中モデルの学習時に記録したメトリックを取得
    production_model_run = Run(exp, run_id=production_model_run_id)
    new_model_run = Run(exp, run_id=new_model_run_id)

    # 稼働中モデルのmse
    production_model_mse = production_model_run.get_metrics().get('mse')
    # 新しいモデルのmse
    new_model_mse = new_model_run.get_metrics().get('mse')
    print('Current Model MSE: {}, New Model MSE: {}'.format(
        production_model_mse, new_model_mse))

    promote_new_model = False
    if new_model_mse < production_model_mse:
        promote_new_model = True
        print('New Model is Better')
except:
    promote_new_model = True
    print('This is FIRST model to register')

run_id = {}
run_id['run_id'] = ''
Пример #11
0
    model_list = Model.list(ws)
    production_model = next(
        filter(
            lambda x: x.created_time == max(model.created_time
                                            for model in model_list),
            model_list,
        ))
    production_model_run_id = production_model.tags.get("run_id")
    run_list = exp.get_runs()
    # production_model_run = next(filter(lambda x: x.id == production_model_run_id, run_list))

    # Get the run history for both production model and newly trained model and compare R2
    production_model_run = Run(exp, run_id=production_model_run_id)
    new_model_run = Run(exp, run_id=new_model_run_id)

    production_model_r2 = production_model_run.get_metrics().get("r2")
    new_model_r2 = new_model_run.get_metrics().get("r2")
    print("Current Production model r2: {}, New trained model r2: {}".format(
        production_model_r2, new_model_r2))

    promote_new_model = False
    if new_model_r2 > production_model_r2:
        promote_new_model = True
        print("New trained model performs better, thus it will be registered")
except:
    promote_new_model = True
    print(
        "This is the first model to be trained, thus nothing to evaluate for now"
    )

run_id = {}
Пример #12
0
    run.log("acc", accuracy)

    model_name = "model_n_estimators_" + str(n) + ".pkl"
    filename = "outputs/" + model_name

    joblib.dump(value=model, filename=filename)
    run.upload_file(name=model_name, path_or_stream=filename)
    run.complete()

    ###############

maximum_acc_runid = None
maximum_acc = None

for run in experiment.get_runs():
    run_metrics = run.get_metrics()
    run_details = run.get_details()
    # each logged metric becomes a key in this returned dict
    run_acc = run_metrics["acc"]
    run_id = run_details["runId"]

    if maximum_acc is None:
        maximum_acc = run_acc
        maximum_acc_runid = run_id
    else:
        if run_acc > maximum_acc:
            maximum_acc = run_acc
            maximum_acc_runid = run_id

print("Best run_id: " + maximum_acc_runid)
print("Best run_id acc: " + str(maximum_acc))
Пример #13
0
    model_list = Model.list(ws)
    production_model = next(
        filter(
            lambda x: x.created_time == max(model.created_time
                                            for model in model_list),
            model_list,
        ))
    production_model_run_id = production_model.tags.get("run_id")
    run_list = exp.get_runs()
    # production_model_run = next(filter(lambda x: x.id == production_model_run_id, run_list))

    # Get the run history for both production model and newly trained model and compare mse
    production_model_run = Run(exp, run_id=production_model_run_id)
    new_model_run = Run(exp, run_id=new_model_run_id)

    production_model_mse = production_model_run.get_metrics().get(
        "validation_acc")
    new_model_mse = new_model_run.get_metrics().get("validation_acc")
    print(
        "Current Production model accuracy: {}, New trained model accuracy: {}"
        .format(production_model_mse, new_model_mse))

    promote_new_model = False
    if new_model_mse < production_model_mse:
        promote_new_model = True
        print("New trained model performs better, thus it will be registered")
except:
    promote_new_model = True
    print(
        "This is the first model to be trained, thus nothing to evaluate for now"
    )
Пример #14
0
latest_model_name = latest_model.name
latest_model_version = latest_model.version
latest_model_path = latest_model.get_model_path(latest_model_name,
                                                _workspace=ws)

print('Latest model id: ', latest_model_id)
print('Latest model name: ', latest_model_name)
print('Latest model version: ', latest_model_version)
print('Latest model path: ', latest_model_path)

latest_model_run_id = latest_model.tags.get("run_id")
print('Latest model run id: ', latest_model_run_id)

latest_model_run = Run(run.experiment, run_id=latest_model_run_id)

latest_model_accuracy = latest_model_run.get_metrics().get("Accuracy")
print('Latest model accuracy: ', latest_model_accuracy)

ws_list = Webservice.list(ws, model_name=latest_model_name)
print('webservice list')
print(ws_list)

deploy_model = False
current_model = None

if (len(ws_list) > 0):
    webservice = ws_list[0]
    try:
        image_id = webservice.tags['image_id']
        image = Image(ws, id=image_id)
        current_model = image.models[0]