Beispiel #1
0
from checknotebookoutput import checkNotebookOutput
from checkexperimentresult import checkExperimentResult
from checkexperimentresult import check_experiment_model_explanation_of_best_run
from download_run_files import download_run_files

# Download files for the remote run.
download_run_files(experiment_names=[
    'automl-bikeshareforecasting', 'automl-bikeshareforecasting_test'
],
                   download_all_runs=True)

checkExperimentResult(experiment_name='automl-bikeshareforecasting',
                      expected_num_iteration='1000',
                      expected_minimum_score=0.01,
                      expected_maximum_score=0.3,
                      metric_name='normalized_root_mean_squared_error',
                      absolute_minimum_score=0.0,
                      absolute_maximum_score=1.0)

check_experiment_model_explanation_of_best_run(
    experiment_name='automl-bikeshareforecasting')

# Check the output cells of the notebook.
checkNotebookOutput(
    "auto-ml-forecasting-bike-share.nbconvert.ipynb"
    if len(sys.argv) < 2 else sys.argv[1],
    "warning[except]retrying[except]UserWarning: Matplotlib is building the font cache"
    "[except]warning: a newer version of conda exists"
    "[except]UserWarning: Starting from version 2.2.1, "
    "the library file in distribution wheels for macOS is built by the Apple Clang"
Beispiel #2
0
from checknotebookoutput import checkNotebookOutput
from checkexperimentresult import checkExperimentResult

checkExperimentResult(experiment_name='automl-classification-deployment',
                      expected_num_iteration='10',
                      expected_minimum_score=0.5,
                      metric_name='AUC_weighted')

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-classification-with-deployment.nbconvert.ipynb',
                    'warning', 'nan')
from checknotebookoutput import checkNotebookOutput
from checkexperimentresult import checkExperimentResult
from checkcelloutput import checkCellOutput

checkExperimentResult(experiment_name='automl-local-regression',
                      expected_num_iteration='10',
                      expected_minimum_score=0.45,
                      metric_name='spearman_correlation',
                      absolute_minimum_score=-1.0)

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-regression.nbconvert.ipynb', 'warning', 'nan')

# Check expected cell output contents.
expected_cells = [
    [], ["Found the config file in: "], [], [],
    [
        "Running on local machine\n", "Parent Run ID: ",
        "Current status: DatasetCrossValidationSplit. Generating individually featurized CV splits.\n",
        "Current status: ModelSelection. Beginning model selection.\n", "\n",
        "***********************************************************************"
        + "*****************************\n",
        "ITERATION: The iteration being evaluated.\n",
        "PIPELINE: A summary description of the pipeline being evaluated.\n",
        "DURATION: Time taken for the current iteration.\n",
        "METRIC: The result of computing score on the fitted pipeline.\n",
        "BEST: The best observed score thus far.\n",
        "***********************************************************************"
        + "*****************************\n", "\n",
        " ITERATION   PIPELINE                                       DURATION"
        + "      METRIC      BEST\n", "         0   ", "         1   ",
from checknotebookoutput import checkNotebookOutput
from checkexperimentresult import checkExperimentResult

checkExperimentResult(experiment_name='non_sample_weight_experiment',
                      expected_num_iteration='10',
                      expected_minimum_score=0.3,
                      metric_name='AUC_weighted')

checkExperimentResult(experiment_name='sample_weight_experiment',
                      expected_num_iteration='10',
                      expected_minimum_score=0.3,
                      metric_name='AUC_weighted')

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-sample-weight.nbconvert.ipynb', 'warning', 'nan')
Beispiel #5
0
from checknotebookoutput import checkNotebookOutput
from checkexperimentresult import checkExperimentResult

checkExperimentResult(experiment_name='automl-dataprep-local',
                      expected_num_iteration='2',
                      expected_minimum_score=0.5,
                      metric_name='AUC_weighted')

# Check the output cells of the notebook.
checkNotebookOutput('auto-ml-dataprep.nbconvert.ipynb', 'warning', 'nan')