示例#1
0
    def get_latest_model(self, experiment_name):
        """
        This function finds the experiment associated with the Data Labelling
        Project and finds the best model and downloads the train artifacts. Note,
        at the time of writing no SDK support is available for data labelling projects
        :param experiment_name:
        :return:
        """
        success = False

        ws = self.get_workspace()

        logging.info(f"Connected to Workspace {ws.name}")
        experiment = Experiment(workspace=ws, name=experiment_name)
        list_runs = experiment.get_runs()
        for run in list_runs:
            logging.info(f"Getting last run {run.id}")
            tags = run.get_tags()
            if tags['model_explain_run'] == 'best_run':
                # Get the latest run
                logging.info(f"Getting last best child run {tags['automl_best_child_run_id']}")
                child_run = run.get(ws, tags['automl_best_child_run_id'])
                metrics = run.get_metrics()
                logging.info(f"Accuracy (class) {metrics['accuracy']}")
                file_names = child_run.get_file_names()
                if "train_artifacts/model.pt" in file_names:
                    logging.info('Found a trained model.pt')
                    child_run.download_files(prefix='train_artifacts',
                                             output_directory='/usr/src/api/models')
                    success = True
                    break

        return success
示例#2
0
from azureml.widgets import RunDetails
from checknotebookoutput import checkNotebookOutput

if __name__ == "__main__":
    ws = Workspace.from_config()

    print(ws.resource_group)
    print(ws.subscription_id)

    # choose a name for the run history container in the workspace
    experiment_name = 'automl-remote-attach'
    # project folder
    project_folder = './sample_projects/automl-remote-attach'

    experiment = Experiment(ws, experiment_name)
    automl_runs = list(experiment.get_runs(type='automl'))

    assert (len(automl_runs) == 1)

    compute_name = 'mydsvmb'

    dsvm_compute = ws.compute_targets[compute_name]

    # create a new RunConfig object
    conda_run_config = RunConfiguration(framework="python")

    # Set compute target to the Linux DSVM
    conda_run_config.target = dsvm_compute

    cd = CondaDependencies.create(pip_packages=['azureml-sdk[automl]'],
                                  conda_packages=['numpy'])
示例#3
0
# Step 4 - Query for all Experiments.
#####################################
# You can retreive the list of all experiments in Workspace using the following:
all_experiments = ws.experiments

print(all_experiments)

# Query for the metrics of a particular experiment
# You can retrieve an existing experiment by constructing an Experiment object using the name of an existing experiment.
my_experiment = Experiment(ws, "Experiment-02-03")
print(my_experiment)

# Query an experiment for metrics
# With an experiment in hand, you retrieve any metrics collected for any of its child runs
my_experiment_runs = my_experiment.get_runs()
print([(run.experiment.name, run.id, run.get_metrics())
       for run in my_experiment_runs])

# Step 5 - Submit an experiment to Azure Batch AI and log metrics for multiple training runs
############################################################################################
experiment_name = "UsedCars_Batch_02"

from azureml.core import Experiment
exp = Experiment(workspace=ws, name=experiment_name)

from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
import os

# choose a name for your cluster