Exemplo n.º 1
0
# Download files for the remote run.
download_run_files(experiment_names=[
    'automl-bikeshareforecasting', 'automl-bikeshareforecasting_test'
],
                   download_all_runs=True)

checkExperimentResult(experiment_name='automl-bikeshareforecasting',
                      expected_num_iteration='1000',
                      expected_minimum_score=0.01,
                      expected_maximum_score=0.3,
                      metric_name='normalized_root_mean_squared_error',
                      absolute_minimum_score=0.0,
                      absolute_maximum_score=1.0)

check_experiment_model_explanation_of_best_run(
    experiment_name='automl-bikeshareforecasting')

# Check the output cells of the notebook.
checkNotebookOutput(
    "auto-ml-forecasting-bike-share.nbconvert.ipynb"
    if len(sys.argv) < 2 else sys.argv[1],
    "warning[except]retrying[except]UserWarning: Matplotlib is building the font cache"
    "[except]warning: a newer version of conda exists"
    "[except]UserWarning: Starting from version 2.2.1, "
    "the library file in distribution wheels for macOS is built by the Apple Clang"
    "[except]The following algorithms are not compatibile with lags and rolling windows"
    "[except]brew install libomp"
    "[except]If 'script' has been provided here"
    "[except]If 'arguments' has been provided here")
Exemplo n.º 2
0
    remote_run = experiment.submit(automl_config)

    # Canceling runs
    #
    # You can cancel ongoing remote runs using the *cancel()* and *cancel_iteration()* functions

    print(remote_run.id)

    time.sleep(180)

    # Cancel the ongoing experiment and stop scheduling new iterations
    remote_run.cancel()

    print('run cancelled')

    # Wait for the run to complete.  It should complete soon because it has been canceled.
    remote_run.wait_for_completion()

    children = list(remote_run.get_children())

    print(len(children))

    if (len(children) == 100):
        raise Exception(
            'Run wasnt cancelled properly, child run count is 100 should have been less than 100'
        )

    # Check the output cells of the notebook.
    checkNotebookOutput('auto-ml-remote-attach.nbconvert.ipynb', 'warning',
                        'nan')
Exemplo n.º 3
0
# Test classification

from checknotebookoutput import checkNotebookOutput
from checkexperimentresult import checkExperimentResult

checkExperimentResult(experiment_name='automl-classification',
                      expected_num_iteration='1000',
                      expected_minimum_score=0.5,
                      metric_name='AUC_weighted')

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-classification.nbconvert.ipynb', 'warning',
                    'nan[except]\'missing\': nan')
Exemplo n.º 4
0
from checknotebookoutput import checkNotebookOutput
from checkexperimentresult import checkExperimentResult

checkExperimentResult(experiment_name='automl-classification-deployment',
                      expected_num_iteration='10',
                      expected_minimum_score=0.5,
                      metric_name='AUC_weighted')

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-classification-with-deployment.nbconvert.ipynb',
                    'warning', 'nan')
                      absolute_maximum_score=1.0)

check_experiment_model_explanation_of_best_run(
    experiment_name='automl-ojforecasting')

# Check the output cells of the notebook.
# We need to suppress this warning '[except]Warning, azureml-defaults not detected' since it's popped
# from the Aci deploy_configuration(), as the azureml-defaults is a required package by Aci.
# This won't impact the customer and it's not critical.
# [stderr] checks for any messages written to stderr, typically logger.warning() will output this.

allowed_warn_str = (
    '[except]warning - retrying'
    '[except]UserWarning: Matplotlib is building the font cache'
    '[except]warning: a newer version of conda exists'
    '[except]Warning, azureml-defaults not detected'
    '[except]UserWarning: Starting from version 2.2.1, '
    'the library file in distribution wheels for macOS is built by the Apple Clang'
    '[except]brew install libomp'
    '[except]Using different time series parameters in AutoML configs'
    '[except]Forecasting parameter country_or_region will be deprecated in the future,'
    '[except]Forecasting parameter max_horizon will be deprecated in the future,'
    '[except]reg:linear is now deprecated'
    '[except]Forecasting parameter grain_column_names will be deprecated in the future'
)

checkNotebookOutput(
    'auto-ml-forecasting-orange-juice-sales.nbconvert.ipynb' if
    len(sys.argv) < 2 else sys.argv[1], 'warning{}'.format(allowed_warn_str),
    '[stderr]{}{}'.format(allowed_warn_str, '[except]Importing plotly failed'))
Exemplo n.º 6
0
from checknotebookoutput import checkNotebookOutput
from checkexperimentresult import checkExperimentResult

checkExperimentResult(experiment_name='non_sample_weight_experiment',
                      expected_num_iteration='10',
                      expected_minimum_score=0.3,
                      metric_name='AUC_weighted')

checkExperimentResult(experiment_name='sample_weight_experiment',
                      expected_num_iteration='10',
                      expected_minimum_score=0.3,
                      metric_name='AUC_weighted')

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-sample-weight.nbconvert.ipynb', 'warning', 'nan')
Exemplo n.º 7
0
from checknotebookoutput import checkNotebookOutput
from checkexperimentresult import checkExperimentResult
from checkcelloutput import checkCellOutput

checkExperimentResult(experiment_name='automl-local-regression',
                      expected_num_iteration='10',
                      expected_minimum_score=0.45,
                      metric_name='spearman_correlation',
                      absolute_minimum_score=-1.0)

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-regression.nbconvert.ipynb', 'warning', 'nan')

# Check expected cell output contents.
expected_cells = [
    [], ["Found the config file in: "], [], [],
    [
        "Running on local machine\n", "Parent Run ID: ",
        "Current status: DatasetCrossValidationSplit. Generating individually featurized CV splits.\n",
        "Current status: ModelSelection. Beginning model selection.\n", "\n",
        "***********************************************************************"
        + "*****************************\n",
        "ITERATION: The iteration being evaluated.\n",
        "PIPELINE: A summary description of the pipeline being evaluated.\n",
        "DURATION: Time taken for the current iteration.\n",
        "METRIC: The result of computing score on the fitted pipeline.\n",
        "BEST: The best observed score thus far.\n",
        "***********************************************************************"
        + "*****************************\n", "\n",
        " ITERATION   PIPELINE                                       DURATION"
        + "      METRIC      BEST\n", "         0   ", "         1   ",
Exemplo n.º 8
0
automl_runs = list(experiment.get_runs(type='automl'))

assert (len(automl_runs) == 1)

ml_run = AutoMLRun(experiment=experiment, run_id=automl_runs[0].id)

properties = ml_run.get_properties()
status = ml_run.get_details()
assert (status['status'] == 'Completed')
assert (properties['num_iterations'] == '10')

children = list(ml_run.get_children())
for iteration in children:
    metrics = iteration.get_metrics()
    iteration_status = iteration.get_status()
    iteration_properties = iteration.get_properties()
    pipeline_spec = iteration_properties['pipeline_spec']
    print(iteration.id)
    print(metrics['AUC_weighted'])
    assert (metrics['AUC_weighted'] > 0.4)
    assert (metrics['AUC_weighted'] <= 1.0)
    print(iteration_status)
    assert (iteration_status == 'Completed')
    assert ('TFLinearClassifierWrapper' in pipeline_spec
            or 'TFDNNClassifierWrapper' in pipeline_spec
            or 'LightGBM' in pipeline_spec or 'Ensemble' in pipeline_spec)

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-classification-with-whitelisting.nbconvert.ipynb',
                    'warning', 'nan')
experiment_name = 'automl-remote-datastore-file'
# project folder
project_folder = './sample_projects/automl-remote-datastore-file'

experiment = Experiment(ws, experiment_name)
automl_runs = list(experiment.get_runs(type='automl'))

assert (len(automl_runs) == 1)

ml_run = AutoMLRun(experiment=experiment, run_id=automl_runs[0].id)

properties = ml_run.get_properties()
status = ml_run.get_details()
assert (status['status'] == 'Completed')
assert (properties['num_iterations'] == '4')

children = list(ml_run.get_children())
for iteration in children:
    metrics = iteration.get_metrics()
    iteration_status = iteration.get_status()
    print(iteration.id)
    print(metrics['AUC_weighted'])
    assert (metrics['AUC_weighted'] > 0.6)
    assert (metrics['AUC_weighted'] <= 1.0)
    print(iteration_status)
    assert (iteration_status == 'Completed')

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-remote-execution-with-datastore.nbconvert.ipynb',
                    'warning', 'nan')
from checknotebookoutput import checkNotebookOutput

# Check the output cells of the notebook.
checkNotebookOutput(
    'auto-ml-missing-data-blacklist-early-termination.nbconvert.ipynb',
    'warning', 'nan')
Exemplo n.º 11
0
from checknotebookoutput import checkNotebookOutput
from checkexperimentresult import checkExperimentResult

checkExperimentResult(experiment_name='automl-dataprep-local',
                      expected_num_iteration='2',
                      expected_minimum_score=0.5,
                      metric_name='AUC_weighted')

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-dataprep.nbconvert.ipynb', 'warning', 'nan')