Esempio n. 1
0
def get_child_runs(run_history_name, parent_run_id):
    ws = Workspace.from_config()
    run = Run(ws, run_history_name, parent_run_id)
    print('parent:', run.id)
    print('children:')
    for c in run.get_children():
        print(c.id)
def cancel_runs_in_experiment(ws, experiment):
    failed_experiment = Experiment(ws, experiment)
    all_runs = failed_experiment.get_runs()
    for idx, run in enumerate(all_runs):
        try:
            if run.status == 'Running':
                run = Run(failed_experiment, run.id)
                print('Canceling run: ', run)
                run.cancel()
        except Exception as e:
            print('Canceling run failed due to ', e)
Esempio n. 3
0
def run_multiple_inferences(summary_df, train_experiment, test_experiment,
                            compute_target, script_folder, test_dataset,
                            lookback_dataset, max_horizon, target_column_name,
                            time_column_name, freq):
    for run_name, run_summary in summary_df.iterrows():
        print(run_name)
        print(run_summary)
        run_id = run_summary.run_id
        train_run = Run(train_experiment, run_id)

        test_run = run_inference(test_experiment, compute_target,
                                 script_folder, train_run, test_dataset,
                                 lookback_dataset, max_horizon,
                                 target_column_name, time_column_name, freq)

        print(test_run)
        summary_df.loc[summary_df.run_id == run_id,
                       'test_run_id'] = test_run.id

    return summary_df
Esempio n. 4
0
def download_model(workspace, experiment_name, run_id, input_location,
                   output_location):
    """Download the pretrained model

    Args:
         ws: workspace to access the experiment
         experiment_name: Name of the experiment in which model is saved
         run_id: Run Id of the experiment in which model is pre-trained
         input_location: Input location in a RUN Id
         output_location: Location for saving the model
    """
    experiment = Experiment(workspace=workspace, name=experiment_name)
    # Download the model on which evaluation need to be done
    run = Run(experiment, run_id=run_id)
    if input_location.endswith(".h5"):
        run.download_file(input_location, output_location)
    elif input_location.endswith(".ckpt"):
        run.download_files(prefix=input_location,
                           output_directory=output_location)
    else:
        raise NameError(f"{input_location}'s path extension not supported")
    logger.info("Successfully downloaded model")
Esempio n. 5
0
    automl_setup_model_explanations, automl_check_model_if_explainable
from azureml.explain.model.mimic.models.lightgbm_model import LGBMExplainableModel
from azureml.explain.model.mimic_wrapper import MimicWrapper
from automl.client.core.common.constants import MODEL_PATH
from azureml.explain.model.scoring.scoring_explainer import TreeScoringExplainer, save

OUTPUT_DIR = './outputs/'
os.makedirs(OUTPUT_DIR, exist_ok=True)

# Get workspace from the run context
run = Run.get_context()
ws = run.experiment.workspace

# Get the AutoML run object from the experiment name and the workspace
experiment = Experiment(ws, '<<experiment_name>>')
automl_run = Run(experiment=experiment, run_id='<<run_id>>')

# Check if this AutoML model is explainable
if not automl_check_model_if_explainable(automl_run):
    raise Exception("Model explanations is currently not supported for " +
                    automl_run.get_properties().get('run_algorithm'))

# Download the best model from the artifact store
automl_run.download_file(name=MODEL_PATH, output_file_path='model.pkl')

# Load the AutoML model into memory
fitted_model = joblib.load('model.pkl')

# Get the train dataset from the workspace
train_dataset = Dataset.get_by_name(workspace=ws,
                                    name='<<train_dataset_name>>')
Esempio n. 6
0
def download_pretrained_model(workspace: Workspace, output_model_fpath: str):
    print(f"Downloading pretrained model from {CONFIG.PRETRAINED_RUN}")
    previous_experiment = Experiment(workspace=workspace, name=CONFIG.PRETRAINED_EXPERIMENT)
    previous_run = Run(previous_experiment, CONFIG.PRETRAINED_RUN)
    previous_run.download_file(f"outputs/{MODEL_H5_FILENAME}", output_model_fpath)
                        nargs='+',
                        default=None,
                        help='runids to create')

    return parser.parse_args()


args = parse_args()

print(args)

if args.runids:
    # get workspace
    ws = Workspace.from_config()

    # set the expiriment
    experiment_name = 'test'
    exp = Experiment(workspace=ws, name=experiment_name)

    runs = []
    for idx in args.runids:
        run = Run(exp, idx)
        runs.append(run)
    tb = Tensorboard(runs)
    tb.start()

    ## Wait for input to stop tensorboard.
    print('Enter to stop tensorboard')
    input()
    tb.stop()
Esempio n. 8
0
def download_pretrained_model(output_model_fpath):
    print(f"Downloading pretrained model from {CONFIG.PRETRAINED_RUN}")
    previous_experiment = Experiment(workspace=workspace, name=CONFIG.PRETRAINED_EXPERIMENT)
    previous_run = Run(previous_experiment, CONFIG.PRETRAINED_RUN)
    previous_run.download_file("outputs/best_model.h5", output_model_fpath)
Esempio n. 9
0
from azureml.interpret.scoring.scoring_explainer import TreeScoringExplainer
from azureml.train.automl.runtime.automl_explain_utilities import (
    automl_setup_model_explanations,
    automl_check_model_if_explainable,
)

OUTPUT_DIR = "./outputs/"
os.makedirs(OUTPUT_DIR, exist_ok=True)

# Get workspace from the run context
run = Run.get_context()
ws = run.experiment.workspace

# Get the AutoML run object from the experiment name and the workspace
experiment = Experiment(ws, "<<experiment_name>>")
automl_run = Run(experiment=experiment, run_id="<<run_id>>")

# Check if this AutoML model is explainable
if not automl_check_model_if_explainable(automl_run):
    raise Exception("Model explanations are currently not supported for " +
                    automl_run.get_properties().get("run_algorithm"))

# Download the best model from the artifact store
automl_run.download_file(name=MODEL_PATH, output_file_path="model.pkl")

# Load the AutoML model into memory
fitted_model = joblib.load("model.pkl")

# Get the train dataset from the workspace
train_dataset = Dataset.get_by_name(workspace=ws,
                                    name="<<train_dataset_name>>")
Esempio n. 10
0
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Cancel pipeline run
"""
from azureml.core.run import Run
from azureml.core import Workspace, Experiment
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("--run_id", type=str, help="run id value", required=True)
parser.add_argument("--step_id", type=str, help="step id value", required=True)

args = parser.parse_args()

ws = Workspace.from_config()

experiment = Experiment(workspace=ws,
                        name="DEV-train-pipeline",
                        _id=args.run_id)
fetched_run = Run(experiment=experiment, run_id=args.step_id)
fetched_run.cancel()
Esempio n. 11
0
def get_run(run_id):
    ws = Workspace.from_config()
    print(ws.name, ws.resource_group)
    from azureml.core.run import Run
    run = Run(workspace=ws, run_history_name='e2e-07', run_id = run_id)
    return run
    automl_setup_model_explanations
from azureml.explain.model.mimic.models.lightgbm_model import LGBMExplainableModel
from azureml.explain.model.mimic_wrapper import MimicWrapper
from automl.client.core.common.constants import MODEL_PATH
from azureml.explain.model.scoring.scoring_explainer import TreeScoringExplainer, save

OUTPUT_DIR = './outputs/'
os.makedirs(OUTPUT_DIR, exist_ok=True)

# Get workspace from the run context
run = Run.get_context()
ws = run.experiment.workspace

# Get the AutoML run object from the experiment name and the workspace
experiment = Experiment(ws, '<<experimnet_name>>')
automl_run = Run(experiment=experiment,
                 run_id='AutoML_176c6b1a-fbf8-4f52-a4c9-36ea019addc1_18')

# Download the best model from the artifact store
automl_run.download_file(name=MODEL_PATH, output_file_path='model.pkl')

# Load the AutoML model into memory
fitted_model = joblib.load('model.pkl')

# Get the train dataset from the workspace
train_dataset = Dataset.get_by_name(workspace=ws,
                                    name='machineData_train_dataset')
# Drop the lablled column to get the training set.
X_train = train_dataset.drop_columns(columns=['<<target_column_name>>'])
y_train = train_dataset.keep_columns(columns=['<<target_column_name>>'],
                                     validate=True)