# Add source directory to the path as Python doesn't like sub-directories
source_path = os.path.join("..", "source")
sys.path.insert(0, source_path)
from watson_studio_utils import WatsonStudioUtils
from experiment_utils import Experiment
from project_utils import ProjectUtils

# Initialize various utilities that will make our lives easier
studio_utils = WatsonStudioUtils(region="us-south")
studio_utils.configure_utilities_from_file()

project_utils = ProjectUtils(studio_utils)

# Initialize our experiment
experiment = Experiment("Fashion MNIST-dropout tests",
                        "Test two different dropout values", "tensorflow",
                        "1.5", "python", "3.5", studio_utils, project_utils)

# Add two training runs to determine which dropout is best: 0.4 or 0.9
run_1a_path = os.path.join("experiment_zips", "dropout_0.4.zip")
run_1b_path = os.path.join("experiment_zips", "dropout_0.6.zip")

# Specify different GPU types as "k80", "k80x2", "k80x4", "p100", ...
experiment.add_training_run("Run #1", "python3 experiment.py", run_1a_path,
                            "k80")
experiment.add_training_run("Run #2", "python3 experiment.py", run_1b_path,
                            "k80")

# Execute experiment
experiment.execute()
Exemplo n.º 2
0
    search.add_power_range("dense_neurons_1", 6, 11,
                           2)  # 64 128 256 512 1024 2048

    search_count = 5
    return search.create_random_search(search_count)


# Initialize various utilities that will make our lives easier
studio_utils = WatsonStudioUtils(region="us-south")
studio_utils.configure_utilities_from_file()

project_utils = ProjectUtils(studio_utils)

# Initialize our experiment
experiment = Experiment("Fashion MNIST-Random", "Perform random grid search",
                        "tensorflow", "1.5", "python", "3.5", studio_utils,
                        project_utils)

# Create random parameters to search then create a training run for each
search = create_random_search()
experiment_zip = os.path.join("zips", "fashion_mnist_random_search.zip")
for index, run_params in enumerate(search):

    run_name = "run_%d" % (index + 1)
    experiment.add_training_run(run_name, run_params, "python3 experiment.py",
                                experiment_zip, "k80")

# Execute experiment
experiment.execute()

# Print the current status of the Experiment.
isPyTorch = False  # else TensorFlow
if isPyTorch:
    framework = "pytorch"
    version = "0.4"
    experiment_zip = "dynamic_hyperparms_pt.zip"
else:
    framework = "tensorflow"
    version = "1.5"
    experiment_zip = "dynamic_hyperparms_tf.zip"

# Initialize our experiment
gpu_type = "k80"
experiment = Experiment( "Fashion MNIST-Custom Random-{}-{}".format(framework, gpu_type),
                         "Perform random grid search",
                         framework,
                         version,
                         "python",
                         "3.5",
                         studio_utils,
                         project_utils)

experiment_zip = os.path.join("experiment_zips", experiment_zip)

# Create random parameters to search then create a training run for each
search = create_random_search()
for index, run_params in enumerate(search):

    # Append hyperparameters to the command
    command = "python3 experiment.py"

    # Specify different GPU types as "k80", "k80x2", "k80x4", "p100", ...
    run_name = "run_%d" % (index + 1)
Exemplo n.º 4
0
isPyTorch = True  # else TensorFlow
if isPyTorch:
    framework = "pytorch"
    version = "0.4"
    experiment_zip = "dynamic_hyperparms_pt.zip"
else:
    framework = "tensorflow"
    version = "1.5"
    experiment_zip = "dynamic_hyperparms_tf.zip"

experiment_zip = os.path.join("experiment_zips", experiment_zip)

# Initialize our experiment
gpu_type = "k80"
experiment = Experiment(
    "Fashion MNIST-RBFOpt HPO-{}-{}".format(framework,
                                            gpu_type), "Perform RBFOpt HPO",
    framework, version, "python", "3.5", studio_utils, project_utils)

# Run RBFOpt to search through the hyperparameters
rbfopt_config = get_rbfopt_config()
experiment.set_rbfopt_config(rbfopt_config)

# Specify different GPU types as "k80", "k80x2", "k80x4", "p100", ...
experiment.add_training_run("RBFOpt search", "python3 experiment.py",
                            experiment_zip, gpu_type)

# Execute experiment
experiment.execute()

# Print the current status of the Experiment.
#experiment.print_experiment_summary()
    if GET_WEIGHTS == True:
        # Get best combination of weights
        inertia_weights = [0.5, 0.8, 1]
        cognitive_weights = [0.5, 1, 2.05, 4]
        social_weights = [0.5, 1, 2.05, 4]

        for inertia_weight in inertia_weights:
            for cognitive_weight in cognitive_weights:
                for social_weight in social_weights:
                    constants.INERTIA_WEIGHT = inertia_weight
                    constants.COGNITIVE_WEIGHT = cognitive_weight
                    constants.SOCIAL_WEIGHT = social_weight

                    print(inertia_weight, cognitive_weight, social_weight)
                    # 30 experiments on a fixed population + statistics
                    experiment = Experiment(griewangk, constants)
                    max_value, min_value, mean_value = experiment.run_experiment(
                        30)
                    print('Min = ', min_value, " Max = ", max_value,
                          " Mean = ", mean_value)

                    results.add("griewangk", -1, constants, min_value,
                                max_value, mean_value)
    else:
        weight_combinations = [(0.5, 0.50, 4.00), (0.5, 1.00, 4.00)]

        for (inertia_weight, cognitive_weight,
             social_weight) in weight_combinations:
            constants.INERTIA_WEIGHT = inertia_weight
            constants.COGNITIVE_WEIGHT = cognitive_weight
            constants.SOCIAL_WEIGHT = social_weight
    search.add_step_range("dropout_4", 0.1, 0.9, 0.1)
    search.add_power_range("dense_neurons_1", 6, 11,
                           2)  # 64 128 256 512 1024 2048

    return search


# Initialize various utilities that will make our lives easier
studio_utils = WatsonStudioUtils(region="us-south")
studio_utils.configure_utilities_from_file()

project_utils = ProjectUtils(studio_utils)

# Initialize our experiment
experiment = Experiment("Fashion MNIST-RBFOpt HPO", "Perform RBFOpt HPO",
                        "tensorflow", "1.5", "python", "3.5", studio_utils,
                        project_utils)

# Create random parameters to search then create a training run for each
run_count = 25
rbfopt_search = get_rbfopt_search()

# Note: "accuracy" is the objective variable being provided to RBFOpt.  For RBFOpt to function properly
# then, we must also store the "accuracy" values to "val_dict_list.json" as shown at the end of the experiment.py file
# in the .zip experiment.
#
# To use a different objective metric, pass the object name here plus update the
# experiment.py file to pass the correct values and corresponding name to the "val_dict_list.json"
#
# Likewise if you want to use iterations instead of epochs, then you must also change that value both here as well
# as in the "val_dict_list.json".