# Initialize various utilities that will make our lives easier
studio_utils = WatsonStudioUtils(region="us-south")
studio_utils.configure_utilities_from_file()

project_utils = ProjectUtils(studio_utils)

# Initialize our experiment
experiment = Experiment("Fashion MNIST-dropout tests",
                        "Test two different dropout values", "tensorflow",
                        "1.5", "python", "3.5", studio_utils, project_utils)

# Add two training runs to determine which dropout is best: 0.4 or 0.9
run_1a_path = os.path.join("experiment_zips", "dropout_0.4.zip")
run_1b_path = os.path.join("experiment_zips", "dropout_0.6.zip")

# Specify different GPU types as "k80", "k80x2", "k80x4", "p100", ...
experiment.add_training_run("Run #1", "python3 experiment.py", run_1a_path,
                            "k80")
experiment.add_training_run("Run #2", "python3 experiment.py", run_1b_path,
                            "k80")

# Execute experiment
experiment.execute()

# Print the current status of the Experiment.
experiment.print_experiment_summary()

# Now you'll want to continuously monitor your experiment.  To do that from the command line,
# you should use the WML CLI.
Пример #2
0

# Initialize various utilities that will make our lives easier
studio_utils = WatsonStudioUtils(region="us-south")
studio_utils.configure_utilities_from_file()

project_utils = ProjectUtils(studio_utils)

# Initialize our experiment
experiment = Experiment("Fashion MNIST-Random", "Perform random grid search",
                        "tensorflow", "1.5", "python", "3.5", studio_utils,
                        project_utils)

# Create random parameters to search then create a training run for each
search = create_random_search()
experiment_zip = os.path.join("zips", "fashion_mnist_random_search.zip")
for index, run_params in enumerate(search):

    run_name = "run_%d" % (index + 1)
    experiment.add_training_run(run_name, run_params, "python3 experiment.py",
                                experiment_zip, "k80")

# Execute experiment
experiment.execute()

# Print the current status of the Experiment.
experiment.print_experiment_summary()

# Now you'll want to continuously monitor your experiment.  To do that from the command line,
# you should use the WML CLI: bx ml monitor training-runs TRAINING_RUN_ID
Пример #3
0
else:
    framework = "tensorflow"
    version = "1.5"
    experiment_zip = "dynamic_hyperparms_tf.zip"

experiment_zip = os.path.join("experiment_zips", experiment_zip)

# Initialize our experiment
gpu_type = "k80"
experiment = Experiment(
    "Fashion MNIST-RBFOpt HPO-{}-{}".format(framework,
                                            gpu_type), "Perform RBFOpt HPO",
    framework, version, "python", "3.5", studio_utils, project_utils)

# Run RBFOpt to search through the hyperparameters
rbfopt_config = get_rbfopt_config()
experiment.set_rbfopt_config(rbfopt_config)

# Specify different GPU types as "k80", "k80x2", "k80x4", "p100", ...
experiment.add_training_run("RBFOpt search", "python3 experiment.py",
                            experiment_zip, gpu_type)

# Execute experiment
experiment.execute()

# Print the current status of the Experiment.
#experiment.print_experiment_summary()

# Now you'll want to continuously monitor your experiment.  To do that from the command line,
# you should use the WML CLI: bx ml monitor training-runs TRAINING_RUN_ID
                         "python",
                         "3.5",
                         studio_utils,
                         project_utils)

experiment_zip = os.path.join("experiment_zips", experiment_zip)

# Create random parameters to search then create a training run for each
search = create_random_search()
for index, run_params in enumerate(search):

    # Append hyperparameters to the command
    command = "python3 experiment.py"

    # Specify different GPU types as "k80", "k80x2", "k80x4", "p100", ...
    run_name = "run_%d" % (index + 1)

    # Add the hyperparameters to the experiment.zip (in config.json)
    updated_experiment_zip = experiment.save_hyperparameters_config(run_params, experiment_zip)

    experiment.add_training_run(run_name, command, updated_experiment_zip, gpu_type)

# Execute experiment
experiment.execute()

# Print the current status of the Experiment.
#experiment.print_experiment_summary()

# Now you'll want to continuously monitor your experiment.  To do that from the command line,
# you should use the WML CLI: bx ml monitor training-runs TRAINING_RUN_ID
run_count = 25
rbfopt_search = get_rbfopt_search()

# Note: "accuracy" is the objective variable being provided to RBFOpt.  For RBFOpt to function properly
# then, we must also store the "accuracy" values to "val_dict_list.json" as shown at the end of the experiment.py file
# in the .zip experiment.
#
# To use a different objective metric, pass the object name here plus update the
# experiment.py file to pass the correct values and corresponding name to the "val_dict_list.json"
#
# Likewise if you want to use iterations instead of epochs, then you must also change that value both here as well
# as in the "val_dict_list.json".
experiment_zip = os.path.join("zips", "fashion_mnist_rbfopt.zip")
rbfopt_experiment_zip = rbfopt_search.save_rbfopt_hpo(
    experiment_zip, run_count, "accuracy", HPOUtils.TIME_INTERVAL_EPOCH,
    HPOUtils.GOAL_MAXIMIZE)

# Note: We don't pass hyperparameters for this run as RBFOpt will determine the hyperparameters to pass for
# each training run as it intelligently explores the hyperparameter space for us.
experiment.add_training_run("RBFOpt search", None, "python3 experiment.py",
                            rbfopt_experiment_zip, "k80")

# Execute experiment
experiment.execute()

# Print the current status of the Experiment.
experiment.print_experiment_summary()

# Now you'll want to continuously monitor your experiment.  To do that from the command line,
# you should use the WML CLI: bx ml monitor training-runs TRAINING_RUN_ID