コード例 #1
0

# We generate data for our tests and global variables for all tests
x_train, t_train, x_test, t_test = dm.load_iris_dataset(random_state=42)
dataset_name = 'Iris'
nb_cross_validation = 4
experiment_title = 'IrisClassification2'
total_budget = 150000
max_budget_per_config = 600

# We initialize an MLP with default hyper-parameters and 4 hidden layers of 20 neurons to classify our data
# and test its performance on both training and test data sets
mlp = mod.MLP(hidden_layers_number=4, layers_size=20, max_iter=1000)


search_space = {'alpha': ContinuousDomain(-8, 0, log_scaled=True),
                'learning_rate_init': ContinuousDomain(-8, 0, log_scaled=True),
                'batch_size': DiscreteDomain(list(linspace(50, 500, 10, dtype=int).tolist())),
                'hidden_layers_number': DiscreteDomain(range(1, 21)),
                'layers_size': DiscreteDomain(range(5, 51))}

grid_search_space = {'alpha': DiscreteDomain(list(linspace(10 ** -8, 1, 5))),
                     'learning_rate_init': DiscreteDomain(list(linspace(10 ** -8, 1, 5))),
                     'batch_size': DiscreteDomain([200]),
                     'hidden_layers_number': DiscreteDomain([1, 5, 10, 15, 20]),
                     'layers_size': DiscreteDomain([20, 50])}


run_experiment(model=mlp, experiment_title=experiment_title, x_train=x_train, t_train=t_train,
               x_test=x_test, t_test=t_test, search_space=search_space, grid_search_space=grid_search_space,
               total_budget=total_budget, max_budget_per_config=max_budget_per_config,
コード例 #2
0
Random search
"""

print('\n\n RANDOM SEARCH \n\n')

# We do a deep copy of our MLP for the test, set the experiment title and save the path to save the results
save = pickle.dumps(mlp)
mlp_for_rs = pickle.loads(save)
experiment_title = 'Forest'
results_path = os.path.join(os.path.dirname(module_path), 'Results')

# We initialize a tuner with random search method and set our search space
rs_tuner = HPtuner(mlp_for_rs, 'random_search')
rs_tuner.set_search_space({
    'alpha':
    ContinuousDomain(-8, 0, log_scaled=True),
    'learning_rate_init':
    ContinuousDomain(-8, 0, log_scaled=True),
    'batch_size':
    DiscreteDomain(list(linspace(50, 500, 10, dtype=int))),
    'hidden_layers_number':
    DiscreteDomain(range(1, 21)),
    'layers_size':
    DiscreteDomain(range(5, 51))
})

# We execute the tuning and save the results
rs_results = rs_tuner.tune(x_train,
                           t_train,
                           n_evals=nb_evals,
                           nb_cross_validation=nb_cross_validation)
コード例 #3
0
mlp.fit(x_train, t_train)
print(mlp.score(x_test, t_test))

"""
Random search
"""

# We do a deep copy of our MLP for the test, set the experiment title and save the path to save the results
save = pickle.dumps(mlp)
mlp_for_rs = pickle.loads(save)
experiment_title = 'nSPIRAL1'
results_path = os.path.join(os.path.dirname(module_path), 'Results')

# We initialize a tuner with random search method and set our search space
rs_tuner = HPtuner(mlp_for_rs, 'random_search')
rs_tuner.set_search_space({'alpha': ContinuousDomain(-8, 0, log_scaled=True),
                           'learning_rate_init': ContinuousDomain(-8, 0, log_scaled=True),
                           'batch_size': DiscreteDomain(list(linspace(50, 500, 10, dtype=int)))})

# We execute the tuning and save the results
rs_results = rs_tuner.tune(x_train, t_train, n_evals=nb_evals, nb_cross_validation=nb_cross_validation)
rs_results.save_all_results(results_path, experiment_title, dgen.model,
                            dgen.train_size, noise, mlp_for_rs.score(x_test, t_test))

"""
TPE (Tree-structured Parzen Estimator )
"""

# We do a deep copy of our MLP for the test, initialize a tuner with tpe method and set our search space
mlp_for_tpe = pickle.loads(save)
tpe_tuner = HPtuner(mlp_for_tpe, 'tpe')
コード例 #4
0
import DataManager as dm
import Model as mod
from Experiment_frame import run_experiment
from HPtuner import ContinuousDomain, DiscreteDomain


# We generate data for our tests and global variables for all tests
x_train, t_train, x_test, t_test = dm.load_digits_dataset()

dataset_name = 'Digits'
nb_cross_validation = 2
experiment_title = 'Digits'
total_budget = 150000
max_budget_per_config = 600

# We initialize an MLP with default hyper-parameters and 4 hidden layers of 20 neurons to classify our data
# and test its performance on both training and test data sets
svm = mod.SVM()


search_space = {'C': ContinuousDomain(-8, 0, log_scaled=True),
                'gamma': ContinuousDomain(-8, 0, log_scaled=True)}

grid_search_space = {'C': DiscreteDomain(list(linspace(10 ** -8, 1, 16))),
                     'gamma': DiscreteDomain(list(linspace(10 ** -8, 1, 16)))}

run_experiment(model=svm, experiment_title=experiment_title, x_train=x_train, t_train=t_train,
               x_test=x_test, t_test=t_test, search_space=search_space, grid_search_space=grid_search_space,
               total_budget=total_budget, max_budget_per_config=max_budget_per_config,
               dataset_name=dataset_name, nb_cross_validation=nb_cross_validation, train_size=len(x_train))
コード例 #5
0
                       lr=0.021457992,
                       alpha=0.001149,
                       eps=0.1,
                       b_size=100,
                       num_epoch=200,
                       num_stop_epoch=20,
                       lr_decay_rate=10,
                       num_lr_decay=4,
                       valid_size=0.05,
                       tol=0.004,
                       save_path="checkpoint.pth")

print(net)

search_space = {
    'lr': ContinuousDomain(-7, -1, log_scaled=True),
    'alpha': ContinuousDomain(-10, -1, log_scaled=True),
    'eps': DiscreteDomain([1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 0.1,
                           1.0]),
    'b_size': DiscreteDomain(np.arange(50, 360, 10).tolist()),
    'num_res': DiscreteDomain([2, 3, 4, 5, 6, 7, 8, 9, 10]),
    'lr_decay_rate': DiscreteDomain(np.arange(2, 40, 1).tolist()),
    'activation': DiscreteDomain(['elu', 'relu', 'swish', 'mish']),
    'version': DiscreteDomain([1, 2])
}

run_experiment(model=net,
               experiment_title=experiment_title,
               search_space=search_space,
               total_budget=total_budget,
               max_budget_per_config=max_budget_per_config,
コード例 #6
0
                         tol=0.004,
                         save_path="checkpoint.pth")

# print(net)

# ------------------------------------------------------------------------------------------
#                                       RANDOM SEARCH
# ------------------------------------------------------------------------------------------

test = 'random_search'

tune = HPtuner(net, test)

tune.set_search_space({
    'lr':
    ContinuousDomain(-7, -1, log_scaled=True),
    'alpha':
    ContinuousDomain(-10, -1, log_scaled=True),
    'eps':
    DiscreteDomain([1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 0.1, 1.0]),
    'b_size':
    DiscreteDomain(np.arange(50, 360, 10).tolist()),
    'num_res':
    DiscreteDomain([2, 3, 4, 5, 6]),
    'lr_decay_rate':
    DiscreteDomain(np.arange(2, 40, 1).tolist()),
    'activation':
    DiscreteDomain(['elu', 'relu', 'swish', 'mish']),
    'version':
    DiscreteDomain([1, 2])
})