Exemplo n.º 1
0
def register_model(model_name, run_id):
    """Register the model to the AML Workspace"""
    cli_auth = AzureCliAuthentication()

    experiment = Experiment.from_directory(".", auth=cli_auth)
    run = Run(experiment, run_id)

    run.register_model(model_name,
                       model_path='outputs/final_model.hdf5',
                       model_framework='TfKeras',
                       model_framework_version='1.13')
Exemplo n.º 2
0
def register_aml_model(model_name, exp, run_id, build_id: str = 'none'):
    try:
        if (build_id != 'none'):
            model_already_registered(model_name, exp, run_id)
            run = Run(experiment=exp, run_id=run_id)
            tagsValue = {"area": "diabetes", "type": "regression",
                         "BuildId": build_id, "run_id": run_id,
                         "experiment_name": exp.name}
        else:
            run = Run(experiment=exp, run_id=run_id)
            if (run is not None):
                tagsValue = {"area": "diabetes", "type": "regression",
                             "run_id": run_id, "experiment_name": exp.name}
            else:
                print("A model run for experiment", exp.name,
                      "matching properties run_id =", run_id,
                      "was not found. Skipping model registration.")
                sys.exit(0)

        model = run.register_model(model_name=model_name,
                                   model_path="./outputs/" + model_name,
                                   tags=tagsValue)
        os.chdir("..")
        print(
            "Model registered: {} \nModel Description: {} "
            "\nModel Version: {}".format(
                model.name, model.description, model.version
            )
        )
    except Exception:
        traceback.print_exc(limit=None, file=None, chain=True)
        print("Model registration failed")
        raise
Exemplo n.º 3
0
def main():
    args = getRuntimeArgs()
    run = Run.get_context()
    run_id = run.parent.id
    parent_run = Run(experiment=run.experiment, run_id=run_id)
    # register the best model with the input dataset
    model = parent_run.register_model(model_name=args.model_name,
                                      model_path=os.path.join(
                                          'outputs', 'model.pkl'))
Exemplo n.º 4
0
def register_model(run_id, experiment):
    best_run = Run(experiment=experiment, run_id=run_id)
    files = best_run.get_file_names()
    r = re.compile('outputs.*')
    model_path = [l for l in files if r.match(l)]
    path, model = os.path.split(model_path[0])

    model = best_run.register_model(model_name=model,
                                    model_path='outputs/model.pkl')
    return path
Exemplo n.º 5
0
deployment_settings = settings["deployment"]

# Get details from Run
print("Loading Run Details")
with open(os.path.join("code", "run_details.json")) as f:
    run_details = json.load(f)

# Get workspace
print("Loading Workspace")
cli_auth = AzureCliAuthentication()
config_file_path = os.environ.get("GITHUB_WORKSPACE", default="aml_service")
config_file_name = "aml_arm_config.json"
ws = Workspace.from_config(path=config_file_path,
                           auth=cli_auth,
                           _file_name=config_file_name)
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep='\n')

# Loading Run
print("Loading Run")
experiment = Experiment(workspace=ws, name=run_details["experiment_name"])
run = Run(experiment=experiment, run_id=run_details["run_id"])

# Register model
tags = deployment_settings["model"]["tags"]

model = run.register_model(
    model_name=deployment_settings["model"]["name"],
    model_path=deployment_settings["model"]["path"],
    tags=tags,
    description=deployment_settings["model"]["description"],
)
Exemplo n.º 6
0
    
    parser.add_argument(
        '--model_name', type=str, default='',help='Name you want to give to the model.'
    )
    
    parser.add_argument(
        '--model_assets_path',type=str, default='outputs',help='Location of trained model.'
    )

    args,unparsed = parser.parse_known_args()
    
    print('Model assets path is:',args.model_assets_path)
    print('Model name is:',args.model_name)
      
    run = Run.get_context()
   
    pipeline_run = Run(run.experiment, run._root_run_id)
    pipeline_run.upload_file("outputs/model/model.pth",os.path.join(args.model_assets_path,"model.pth"))
    pipeline_run.upload_file("outputs/model/labels.txt",os.path.join(args.model_assets_path,"labels.txt"))
    pipeline_run.upload_file("outputs/deployment/score.py","deployment/score.py")
    pipeline_run.upload_file("outputs/deployment/myenv.yml","deployment/myenv.yml")
    pipeline_run.upload_file("outputs/deployment/deploymentconfig.json","deployment/deploymentconfig.json")
    pipeline_run.upload_file("outputs/deployment/inferenceconfig.json","deployment/inferenceconfig.json")

    tags = {
       "Ignite":"Dubai"
    }

    model = pipeline_run.register_model(model_name='seer', model_path='outputs/',tags=tags)
       
    print('Model registered: {} \nModel Description: {} \nModel Version: {}'.format(model.name, model.description, model.version))
# obtain run with max accuracy
max_accuracy = None
max_acc_runid = None

for run in exp.get_runs():
    run_metrics = run.get_metrics()
    run_details = run.get_details()
 
    # obtain metric and run id
    run_id = run_details['runId']
    run_acc = run_metrics['accuracy']

    if max_accuracy is None:
        max_accuracy = run_acc
        max_acc_runid = run_id
    else:
        if run_acc > max_accuracy:
            max_accuracy = run_acc
            max_acc_runid = run_id

# obtain best run
print("Best run_id: " + max_acc_runid)
print("Best run_id rmse: " + str(max_accuracy))
best_run = Run(experiment=exp, run_id=max_acc_runid)

# register model for deployment
model = best_run.register_model(
    model_name='sklearn_classification',
    model_path='outputs/sklearn_classification_model.pkl')
print(model.name, model.id, model.version, sep='\t')
Exemplo n.º 8
0
        print("WARNING: Could get val_los for run_id", run)
        pass

print("best run", best_run_id, best_loss)


# start an Azure ML run
run = Run.get_context()
run_details = run.get_details()

environment_definition = run_details['runDefinition']['environment']
experiment_name = environment_definition['name'].split()[1]

exp = Experiment(ws, name=experiment_name)
best_run = Run(exp, best_run_id)

output_dir = './'  # 'outputs'

# register the model
if best_run_id:
    tags = {}
    tags['run_id'] = best_run_id
    tags['val_loss'] = metrics[best_run_id]['val_loss'][-1]
    model = best_run.register_model(model_name=experiment_name,
                                    model_path=output_dir,
                                    tags=tags)
    model.download(target_dir=args.prednet_path)
else:
    raise Exception("Couldn't not find a model to register."
                    "Probably because no run completed")
Exemplo n.º 9
0
except Exception:
    print("No new model to register as production model perform better")
    sys.exit(0)

run_id = config["run_id"]
experiment_name = config["experiment_name"]
# exp = Experiment(workspace=ws, name=experiment_name)

run = Run(experiment=exp, run_id=run_id)
names = run.get_file_names
names()
print("Run ID for last run: {}".format(run_id))

model = run.register_model(model_name=model_name,
                           model_path="./outputs/" + model_name,
                           tags={
                               "area": "diabetes",
                               "type": "regression"
                           })
os.chdir("..")
print(
    "Model registered: {} \nModel Description: {} \nModel Version: {}".format(
        model.name, model.description, model.version))

# Writing the registered model details to /aml_config/model.json
model_json = {}
model_json["model_name"] = model.name
model_json["model_version"] = model.version
model_json["run_id"] = run_id
filename = "model_{}.json".format(args.config_suffix)
output_path = os.path.join(args.json_config, filename)
with open(output_path, "w") as outfile:
Exemplo n.º 10
0
# comment out the below line to only register the model if the new accuracy score is better
acc_to_beat = 0

print("accuracy to beat", acc_to_beat)
if model_accuracy > acc_to_beat:
    print("model is better, registering")

    # Registering the model to the parent run (the pipeline). The entire pipeline encapsulates the training process.
    model = parentrun.register_model(
        model_name=model_name,  # Name of the registered model in your workspace.
        model_path=model_path,  # Local file to upload and register as a model.
        model_framework=Model.Framework.
        SCIKITLEARN,  # Framework used to create the model.
        model_framework_version=sklearn.
        __version__,  # Version of scikit-learn used to create the model.
        sample_input_dataset=dataset,
        resource_configuration=ResourceConfiguration(cpu=1, memory_in_gb=0.5),
        description='basic iris classification',
        tags={
            'quality': 'good',
            'type': 'classification'
        })
    model.add_properties({
        "accuracy": model_accuracy,
        "model_type": model_type
    })
    model.add_tags({"accuracy": model_accuracy, "model_type": model_type})

    print(model)
    # Azure ML UI doesn't list the datasets so the print statement below does indeed show the dataset was included.
    print(model.sample_input_dataset)
Exemplo n.º 11
0
        ))
    production_model_run_id = production_model.tags.get("run_id")
    run_list = exp.get_runs()

    # Get the run history for both production model and
    # newly trained model and compare mse
    production_model_run = Run(exp, run_id=production_model_run_id)
    new_model_run = Run(exp, run_id=new_model_run_id)

    production_model_mse = production_model_run.get_metrics().get("mse")
    new_model_mse = new_model_run.get_metrics().get("mse")
    print("Current Production model mse: {}, New trained model mse: {}".format(
        production_model_mse, new_model_mse))

    promote_new_model = False
    if new_model_mse < production_model_mse:
        promote_new_model = True
        print("New trained model performs better, thus it will be registered")
except Exception:
    promote_new_model = True
    print("This is the first model to be trained, \
          thus nothing to evaluate for now")

# Writing the run id to /aml_config/run_id.json
if promote_new_model:
    model_path = os.path.join('outputs', model_name)
    new_model_run.register_model(model_name=model_name,
                                 model_path=model_path,
                                 properties={"release_id": release_id})
    print("Registered new model!")
Exemplo n.º 12
0
        new_model_parameter = run.get_metrics().get(metric)
        production_model_parameter = production_model_run.get_metrics().get(metric)
        if new_model_parameter > production_model_parameter:
            promote_new_model = False
except Exception:
    promote_new_model = True
    print("This is the first model to be trained, thus nothing to evaluate for now")

# TODO: Remove
if promote_new_model:
    print("New model performs better, thus it will be registered")
else:
    print("New model does not perform better.")
print("Promote all models for now")
promote_new_model = True

# Registering new Model
if promote_new_model:
    print("Registering new Model, because it performs better")
    tags = deployment_settings["model"]["tags"]
    tags["run_id"] = run.id
    model = run.register_model(model_name=deployment_settings["model"]["name"],
                               model_path=deployment_settings["model"]["path"],
                               tags=tags,
                               properties=deployment_settings["model"]["properties"],
                               model_framework=deployment_settings["model"]["model_framework"],
                               model_framework_version=deployment_settings["model"]["model_framework_version"],
                               description=deployment_settings["model"]["description"],
                               datasets=deployment_settings["model"]["datasets"])
else:
    raise Exception("No new model to register as production model perform better")
Exemplo n.º 13
0
from azureml.core.model import Model
from azureml.pipeline.steps import HyperDriveStep, HyperDriveStepRun
######################################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--ModelData', dest='ModelData', required=True)
args = parser.parse_args()

Model_OutputPath = args.ModelData

######################################################################################################

run = Run.get_context()
run_id = run.parent.id

print("register final model")

parent_run = Run(experiment=run.experiment, run_id=run_id)

model = parent_run.register_model(
    model_name='MLOps_Model',
    model_path='outputs/weights.best.dense_generator_callback.hdf5')

######################################################################################################

os.makedirs(Model_OutputPath, exist_ok=True)

model_json = {}
model_json["model_name"] = model.name
model_json["model_version"] = model.version
with open(os.path.join(Model_OutputPath, 'model.json'), "w") as outfile:
    json.dump(model_json, outfile)
parser.add_argument('--run_id', 
                    help='the experiment run_id (not the run number) in Azure ML that the model is based on',
                    required=True)
FLAGS = parser.parse_args()

# input parameters
proj_root=FLAGS.proj_root
run_id = FLAGS.run_id

# env variables
subscription_id = os.getenv("SUBSCRIPTION_ID")
resource_group = os.getenv("RESOURCE_GROUP")
workspace_name = os.getenv("WORKSPACE_NAME")

# constants
MODEL_FILE='outputs/model/frozen_inference_graph.pb'

# set up Azure ML environment
ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
model_name = proj_root if proj_root.isalnum() else ''.join(ch for ch in proj_root if ch.isalnum())
experiment_name = model_name
exp = Experiment(workspace=ws, name=experiment_name)

print("experiment:{}".format(experiment_name))

# register the model
run = Run(exp, run_id)
model = run.register_model(model_name=model_name, model_path=MODEL_FILE)
print('registered model {}, version: {}'.format(model.name, model.version))

model_nm = "bikeshare.mml"
model_output = '/mnt/azml/outputs/' + model_nm
model_dbfs = "/dbfs" + model_output
lrPipelineModel.write().overwrite().save(model_output)

# COMMAND ----------

model_name, model_ext = model_dbfs.split(".")

# COMMAND ----------

model_zip = model_name + ".zip"
shutil.make_archive(model_name, 'zip', model_dbfs)
azRun.upload_file("outputs/" + model_nm, model_zip)

# COMMAND ----------

azRun.register_model(model_name='model_nm', model_path="outputs/" + model_nm)

# COMMAND ----------

# now delete the serialized model from local folder since it is already uploaded to run history
shutil.rmtree(model_dbfs)
os.remove(model_zip)

# COMMAND ----------

mlflow.end_run()

# COMMAND ----------
Exemplo n.º 16
0
joblib.dump(value=model, filename='finalmodel2.pkl')
run.upload_file(name=model_name, path_or_stream=filename)
run.complete()


from azureml.core import Run

runid='062ae84f-1f94-43ed-bcf5-377a93b14006'
run = Run(experiment=experiment, run_id=runid)
print(run.get_file_names())


# Change names
run.download_file(name="finalmodel2.pkl")

model = run.register_model(model_name='finalmodel2.pkl',
                           model_path='finalmodel2.pkl')
print(model.name, model.id, model.version, sep='\t')













#############################################################
if __name__ == "__main__":
    ws = Workspace(subscription_id, resource_group, workspace_name)
    if test_only:
        service = ws.webservices[service_name]
        test_service(service, container, blob)
        sys.exit()
    exp = Experiment(workspace=ws, name=experiment_name)
    try:
        run = Run(exp, run_id)
    except ServiceException:
        print("Run id not found, exiting...")
        sys.exit()
    model = run.register_model(
        model_name=model_name,
        model_path=PathsConfig.outputs_directory,
        tags={"run_id": run_id},
    )
    cd = CondaDependencies.create()
    for conda_package in conda_packages:
        cd.add_conda_package(conda_package)
    for pip_package in pip_packages:
        cd.add_pip_package(pip_package)
    cd.add_tensorflow_pip_package(core_type="gpu", version="2.2.0")
    cd.save_to_file(
        base_directory="src/classification/deployment",
        conda_file_path="env.yml",
    )
    if compute_name in ws.compute_targets:
        compute_target = ws.compute_targets[compute_name]
    else:
Exemplo n.º 18
0
except Exception:
    print("No new model to register as production model perform better")
    sys.exit(0)

run_id = config["run_id"]
experiment_name = config["experiment_name"]
# exp = Experiment(workspace=ws, name=experiment_name)

run = Run(experiment=exp, run_id=run_id)
names = run.get_file_names
names()
print("Run ID for last run: {}".format(run_id))

model = run.register_model(model_name=model_name,
                           model_path="./outputs/" + model_name,
                           tags={
                               "area": "HR",
                               "type": "attrition"
                           })
os.chdir("..")
print(
    "Model registered: {} \nModel Description: {} \nModel Version: {}".format(
        model.name, model.description, model.version))

# Writing the registered model details to /aml_config/model.json
model_json = {}
model_json["model_name"] = model.name
model_json["model_version"] = model.version
model_json["run_id"] = run_id
filename = "model_{}.json".format(args.config_suffix)
output_path = os.path.join(args.json_config, filename)
with open(output_path, "w") as outfile:
Exemplo n.º 19
0
minimum_rmse_runid = None
minimum_rmse = None

for run in experiment.get_runs():
    run_metrics = run.get_metrics()
    run_details = run.get_details()
    # each logged metric becomes a key in this returned dict
    run_rmse = run_metrics["rmse"]
    run_id = run_details["runId"]

    if minimum_rmse is None:
        minimum_rmse = run_rmse
        minimum_rmse_runid = run_id
    else:
        if run_rmse < minimum_rmse:
            minimum_rmse = run_rmse
            minimum_rmse_runid = run_id

print("Best run_id: " + minimum_rmse_runid)
print("Best run_id rmse: " + str(minimum_rmse))

from azureml.core import Run
best_run = Run(experiment=experiment, run_id=minimum_rmse_runid)
print(best_run.get_file_names())

best_run.download_file(name="model_alpha_0.1.pkl")

model = best_run.register_model(model_name='diabetes',
                                model_path='model_alpha_0.1.pkl')
print(model.name, model.id, model.version, sep='\t')
Exemplo n.º 20
0
from azureml.core import Experiment, Run, Workspace
from config_deepensemble_1 import CONFIG

if __name__ == "__main__":

    workspace = Workspace.from_config()
    experiment = Experiment(workspace=workspace, name=CONFIG.EXPERIMENT_NAME)
    run = Run(experiment, CONFIG.RUN_ID)
    model = run.register_model(model_name=CONFIG.MODEL_NAME,
                               model_path='outputs')
    print('Model register successfully')
Exemplo n.º 21
0
    
    parser.add_argument(
        '--model_name', type=str, default='',help='Name you want to give to the model.'
    )
    
    parser.add_argument(
        '--model_assets_path',type=str, default='outputs',help='Location of trained model.'
    )

    args,unparsed = parser.parse_known_args()
    
    print('Model assets path is:',args.model_assets_path)
    print('Model name is:',args.model_name)
      
    run = Run.get_context()
   
    pipeline_run = Run(run.experiment, run._root_run_id)
    pipeline_run.upload_file("outputs/model/model.pth",os.path.join(args.model_assets_path,"model.pth"))
    pipeline_run.upload_file("outputs/model/labels.txt",os.path.join(args.model_assets_path,"labels.txt"))
    pipeline_run.upload_file("outputs/deployment/score.py","deployment/score.py")
    pipeline_run.upload_file("outputs/deployment/myenv.yml","deployment/myenv.yml")
    pipeline_run.upload_file("outputs/deployment/deploymentconfig.json","deployment/deploymentconfig.json")
    pipeline_run.upload_file("outputs/deployment/inferenceconfig.json","deployment/inferenceconfig.json")

    tags = {
       "Conference":"Codecamp"
    }

    model = pipeline_run.register_model(model_name=args.model_name, model_path='outputs/',tags=tags)
       
    print('Model registered: {} \nModel Description: {} \nModel Version: {}'.format(model.name, model.description, model.version))
Exemplo n.º 22
0
import os
import sys
import argparse

from azureml.core import Run
from azureml.core.model import Model
from azureml.pipeline.steps import HyperDriveStep, HyperDriveStepRun

run = Run.get_context()
run_id = run.parent.id

parent_run = Run(experiment=run.experiment, run_id=run_id)

model = parent_run.register_model(
    model_name='xrayml_pipeline1',
    model_path='outputs/weights.best.dense_generator_callback.hdf5')