def init():
    """
    Initializer called once per node that runs the scoring job. Parse command
    line arguments and get the right model to use for scoring.
    """
    try:
        print("Initializing batch scoring script...")

        model_filter = parse_args()
        amlmodel = get_latest_model(
            model_filter[0], model_filter[1], model_filter[2]
        )  # NOQA: E501

        global model
        modelpath = amlmodel.get_model_path(model_name=model_filter[0])
        model = joblib.load(modelpath)
        print("Loaded model {}".format(model_filter[0]))
    except Exception as ex:
        print("Error: {}".format(ex))
Ejemplo n.º 2
0
args = parser.parse_args()
if (args.run_id is not None):
    run_id = args.run_id
if (run_id == 'amlcompute'):
    run_id = run.parent.id
model_name = args.model_name
metric_eval = "mse"

allow_run_cancel = args.allow_run_cancel
# Parameterize the matrices on which the models should be compared
# Add golden data set on which all the model performance can be evaluated
try:
    firstRegistration = False
    tag_name = 'experiment_name'

    model = get_latest_model(
        model_name, tag_name, exp.name, ws)

    if (model is not None):
        production_model_mse = 10000
        if (metric_eval in model.tags):
            production_model_mse = float(model.tags[metric_eval])
        new_model_mse = float(run.parent.get_metrics().get(metric_eval))
        if (production_model_mse is None or new_model_mse is None):
            print("Unable to find", metric_eval, "metrics, "
                  "exiting evaluation")
            if((allow_run_cancel).lower() == 'true'):
                run.parent.cancel()
        else:
            print(
                "Current Production model mse: {}, "
                "New trained model mse: {}".format(