def main():

    run = Run.get_context()
    if (run.id.startswith('OfflineRun')):
        from dotenv import load_dotenv
        sys.path.append(os.path.abspath("./code/util"))  # NOQA: E402
        from model_helper import get_model_by_tag
        # For local development, set values in this section
        load_dotenv()
        workspace_name = os.environ.get("WORKSPACE_NAME")
        experiment_name = os.environ.get("EXPERIMENT_NAME")
        resource_group = os.environ.get("RESOURCE_GROUP")
        subscription_id = os.environ.get("SUBSCRIPTION_ID")
        build_id = os.environ.get('BUILD_BUILDID')
        aml_workspace = Workspace.get(name=workspace_name,
                                      subscription_id=subscription_id,
                                      resource_group=resource_group)
        ws = aml_workspace
        exp = Experiment(ws, experiment_name)
    else:
        sys.path.append(os.path.abspath("./util"))  # NOQA: E402
        from model_helper import get_model_by_tag
        ws = run.experiment.workspace
        exp = run.experiment

    e = Env()

    parser = argparse.ArgumentParser("register")
    parser.add_argument(
        "--build_id",
        type=str,
        help="The Build ID of the build triggering this pipeline run",
    )
    parser.add_argument("--output_model_version_file",
                        type=str,
                        default="model_version.txt",
                        help="Name of a file to write model version to")

    args = parser.parse_args()
    if (args.build_id is not None):
        build_id = args.build_id
    model_name = e.model_name

    try:
        tag_name = 'BuildId'
        model = get_model_by_tag(model_name, tag_name, build_id, exp.workspace)
        if (model is not None):
            print("Model was registered for this build.")
        if (model is None):
            print("Model was not registered for this run.")
            sys.exit(1)
    except Exception as e:
        print(e)
        print("Model was not registered for this run.")
        sys.exit(1)

    # Save the Model Version for other AzDO jobs after script is complete
    if args.output_model_version_file is not None:
        with open(args.output_model_version_file, "w") as out_file:
            out_file.write(str(model.version))
Example #2
0
def main():

    run = Run.get_context()
    if (run.id.startswith('OfflineRun')):
        from dotenv import load_dotenv
        sys.path.append(os.path.abspath("./code/util"))  # NOQA: E402
        from model_helper import get_model_by_tag
        # For local development, set values in this section
        load_dotenv()
        workspace_name = os.environ.get("WORKSPACE_NAME")
        experiment_name = os.environ.get("EXPERIMENT_NAME")
        resource_group = os.environ.get("RESOURCE_GROUP")
        subscription_id = os.environ.get("SUBSCRIPTION_ID")
        tenant_id = os.environ.get("TENANT_ID")
        model_name = os.environ.get("MODEL_NAME")
        app_id = os.environ.get('SP_APP_ID')
        app_secret = os.environ.get('SP_APP_SECRET')
        build_id = os.environ.get('BUILD_BUILDID')
        # run_id useful to query previous runs
        run_id = "bd184a18-2ac8-4951-8e78-e290bef3b012"
        service_principal = ServicePrincipalAuthentication(
            tenant_id=tenant_id,
            service_principal_id=app_id,
            service_principal_password=app_secret)

        aml_workspace = Workspace.get(name=workspace_name,
                                      subscription_id=subscription_id,
                                      resource_group=resource_group,
                                      auth=service_principal)
        ws = aml_workspace
        exp = Experiment(ws, experiment_name)
    else:
        sys.path.append(os.path.abspath("./util"))  # NOQA: E402
        from model_helper import get_model_by_tag
        ws = run.experiment.workspace
        exp = run.experiment
        run_id = 'amlcompute'

    parser = argparse.ArgumentParser("register")
    parser.add_argument(
        "--build_id",
        type=str,
        help="The Build ID of the build triggering this pipeline run",
    )
    parser.add_argument(
        "--run_id",
        type=str,
        help="Training run ID",
    )
    parser.add_argument(
        "--model_name",
        type=str,
        help="Name of the Model",
        default="sklearn_regression_model.pkl",
    )
    parser.add_argument(
        "--validate",
        type=str,
        help="Set to true to only validate if model is registered for run",
        default=False,
    )

    args = parser.parse_args()
    if (args.build_id is not None):
        build_id = args.build_id
    if (args.run_id is not None):
        run_id = args.run_id
    if (run_id == 'amlcompute'):
        run_id = run.parent.id
    if (args.validate is not None):
        validate = args.validate
    model_name = args.model_name

    if (validate):
        try:
            tag_name = 'BuildId'
            model = get_model_by_tag(model_name, tag_name, build_id,
                                     exp.workspace)
            if (model is not None):
                print("Model was registered for this build.")
            if (model is None):
                print("Model was not registered for this run.")
                sys.exit(1)
        except Exception as e:
            print(e)
            print("Model was not registered for this run.")
            sys.exit(1)
    else:
        if (build_id is None):
            register_aml_model(model_name, exp, run_id)
        else:
            run.tag("BuildId", value=build_id)
            register_aml_model(model_name, exp, run_id, build_id)
Example #3
0
    build_id = args.build_id
if (args.run_id is not None):
    run_id = args.run_id
if (run_id == 'amlcompute'):
    run_id = run.parent.id
model_name = args.model_name
metric_eval = "mse"
run.tag("BuildId", value=build_id)

# Paramaterize the matrices on which the models should be compared
# Add golden data set on which all the model performance can be evaluated
try:
    firstRegistration = False
    tag_name = 'experiment_name'

    model = get_model_by_tag(model_name, tag_name, exp.name, ws)

    if (model is not None):

        production_model_run_id = model.run_id

        # Get the run history for both production model and
        # newly trained model and compare mse
        production_model_run = Run(exp, run_id=production_model_run_id)
        new_model_run = run.parent
        print("Production model run is", production_model_run)

        production_model_mse = \
            production_model_run.get_metrics().get(metric_eval)
        new_model_mse = new_model_run.get_metrics().get(metric_eval)
        if (production_model_mse is None or new_model_mse is None):