Esempio n. 1
0
def tune():
    X, y = get_data()

    too = torch.optim.Adam, torch.optim.Adadelta, torch.optim.Adagrad, torch.optim.ASGD
    to = ht.CategoricalParameter('torch_optimizer', options=too)
    eta = ht.ContinuousParameter('eta', lower_bound=1e-10, upper_bound=1e-1)
    mi = ht.DiscreteParameter('max_iter', lower_bound=1e2, upper_bound=1e4)

    hl1 = ht.DiscreteParameter('', lower_bound=10, upper_bound=100)
    hl2 = ht.DiscreteParameter('', lower_bound=10, upper_bound=100)
    hls = ht.TupleParameter('hidden_layer_sizes', values=(hl1, hl2))

    tp1 = ht.CategoricalParameter('', options=(nn.Linear, ))
    tp2 = ht.CategoricalParameter('', options=(nn.Linear, ))
    tp3 = ht.CategoricalParameter('', options=(nn.Linear, ))
    top = ht.TupleParameter('topology', values=(tp1, tp2, tp3))

    hypers = [to, eta, mi, hls, top]

    tuner = ht.HyperTune(algorithm=Net,
                         parameters=hypers,
                         train_func=Net.fit,
                         objective_func=Net.mse,
                         train_func_args=(X, y),
                         objective_func_args=(X, y),
                         max_evals=100,
                         maximize=False,
                         num_replications=1)

    tuner.tune()
    print(tuner.get_results())
    def test_discrete_param(self):
        d = ht.DiscreteParameter('d', lower_bound=0, upper_bound=1)
        self.assertEqual(d.name, 'd')
        self.assertEqual(d.shape, 1)

        r = rand()
        v = d.get_val(r)
        exp = int(np.round((r[0] + 1) / 2))
        self.assertEqual(v, exp)
    def test_object_param(self):
        o = ht.ObjectParameter('o', obj=CA, parameters=tuple([]))
        self.assertEqual(o.shape, 0)

        prams = 'o', DA, (1, )
        self.assertRaises(ValueError, ht.ObjectParameter, *prams)

        cba = ht.ConstantParameter('a', value='a')
        o = ht.ObjectParameter('o', obj=CB, parameters=(cba, ))
        self.assertEqual(o.shape, 0)

        dba = ht.ConstantParameter('a', value='aaa')
        o = ht.ObjectParameter('', obj=DB, parameters=(dba, ))
        v = o.get_val([1])
        self.assertEqual(v, 'aaa')

        cba = ht.DiscreteParameter('a', lower_bound=0, upper_bound=1)
        o = ht.ObjectParameter('o', obj=CB, parameters=(cba, ))
        self.assertEqual(o.shape, 1)

        p = [-1]
        v = o.get_val(p)
        self.assertEqual(v.a, cba.get_val(p))

        cca = ht.DiscreteParameter('a', lower_bound=0, upper_bound=1)
        ccb = ht.DiscreteParameter('b', lower_bound=-100, upper_bound=50)
        o = ht.ObjectParameter('o', obj=CC, parameters=(cca, ccb))
        p = [0.5, 0.75]
        v = o.get_val(p)
        self.assertEqual(v.a, cca.get_val(p[:1]))
        self.assertEqual(v.b, ccb.get_val(p[1:]))

        cca = ht.ConstantParameter('a', value='aaa')
        ccb = ht.ConstantParameter('b', value='bbb')
        o = ht.ObjectParameter('o', obj=CC, parameters=(ccb, cca))
        p = [0.5, 0.75]
        v = o.get_val(p)
        self.assertEqual(v.a, cca.get_val(p[:1]))
        self.assertEqual(v.b, ccb.get_val(p[1:]))
    def test_tuple_param(self):
        t = ht.TupleParameter('t', values=(1, 1, 1))
        self.assertEqual(t.shape, 0)

        c = ht.ContinuousParameter('c', lower_bound=0, upper_bound=1)
        d = ht.DiscreteParameter('d', lower_bound=0, upper_bound=1)
        co = ht.ConstantParameter('co', value='world')
        t = ht.TupleParameter('t', values=(c, d, co))
        self.assertEqual(t.shape, 2)

        r = [-1, 0.5]
        v = t.get_val(r)
        exp = (0.0, 1, 'world')
        self.assertEqual(v, exp)

        t = ht.TupleParameter('t', values=(c, {'z': -1}, d))
        r = [-1, 0.5]
        v = t.get_val(r)
        exp = (0.0, {'z': -1}, 1)
        self.assertEqual(v, exp)
Esempio n. 5
0
import numpy as np

# make an example dataset
p = 75
X = np.random.rand(100, 3)
y = np.random.rand(100)
X_train, X_test, y_train, y_test = X[:p], X[p:], y[:p], y[p:]

# define the target hyperparameters
activation = ht.CategoricalParameter('activation',
                                     options=('identity', 'logistic', 'tanh',
                                              'relu'))
learning_rate_init = ht.ContinuousParameter('learning_rate_init',
                                            lower_bound=10**-5,
                                            upper_bound=0.1)
max_iter = ht.DiscreteParameter('max_iter', lower_bound=500, upper_bound=10**3)

hl1 = ht.DiscreteParameter('', lower_bound=50, upper_bound=250)
hl2 = ht.DiscreteParameter('', lower_bound=100, upper_bound=250)
hl3 = ht.DiscreteParameter('', lower_bound=1, upper_bound=100)
hidden_layer_sizes = ht.TupleParameter('hidden_layer_sizes',
                                       values=(hl1, hl2, hl3))

hypers = [
    activation, learning_rate_init, max_iter, learning_rate, hidden_layer_sizes
]

# define a Hypertune object
tuner = ht.HyperTune(algorithm=MLPRegressor,
                     parameters=hypers,
                     train_func=MLPRegressor.fit,
Esempio n. 6
0
    def mse(self, X, y):
        y_hat = self.predict(X)
        return self._loss_func_(y, y_hat).item()


# make an example dataset
p = 750
X = np.random.rand(10**4, 3)
y = np.random.rand(10**4)
X_train, X_test, y_train, y_test = X[:p], X[p:], y[:p], y[p:]

# define the target hyperparameters
too = torch.optim.Adam, torch.optim.Adadelta, torch.optim.Adagrad, torch.optim.ASGD
to = ht.CategoricalParameter('torch_optimizer', options=too)
eta = ht.ContinuousParameter('eta', lower_bound=1e-10, upper_bound=1e-1)
mi = ht.DiscreteParameter('max_iter', lower_bound=1e2, upper_bound=1e4)
hl1 = ht.DiscreteParameter('', lower_bound=10, upper_bound=100)
hl2 = ht.DiscreteParameter('', lower_bound=10, upper_bound=100)
hls = ht.TupleParameter('hidden_layer_sizes', values=(hl1, hl2))
tp1 = ht.CategoricalParameter('', options=(nn.Linear, ))
tp2 = ht.CategoricalParameter('', options=(nn.Linear, ))
tp3 = ht.CategoricalParameter('', options=(nn.Linear, ))
top = ht.TupleParameter('topology', values=(tp1, tp2, tp3))
hypers = [to, eta, mi, hls, top]

# define a Hypertune object
tuner = ht.HyperTune(algorithm=Net,
                     parameters=hypers,
                     train_func=Net.fit,
                     objective_func=Net.mse,
                     train_func_args=(X_train, y_train),
Esempio n. 7
0
        X_test, y_test = X[test_idxs], y[test_idxs]

        algo.fit(X_train, y_train)
        results[i] = algo.score(X_test, y_test)

    return np.mean(results)


X, y = datasets.iris(return_splits=False)
splits = list(KFold(n_splits=4).split(X))

learning_rate = ht.CategoricalParameter('learning_rate',
                                        options=('constant', 'invscaling',
                                                 'adaptive'))
learning_rate_init = ht.ContinuousParameter('learning_rate_init',
                                            lower_bound=10**-5,
                                            upper_bound=0.1)
max_iter = ht.DiscreteParameter('max_iter', lower_bound=500, upper_bound=10**3)

hypers = [learning_rate, learning_rate_init, max_iter]

tuner = ht.HyperTune(algorithm=MLPClassifier,
                     parameters=hypers,
                     train_func=train,
                     objective_func=objective_func,
                     objective_func_args=(X, y, splits),
                     max_evals=10**2)

results = tuner.tune()
print(results)
Esempio n. 8
0

def aux_objective_func(algo, X, y):
    _y = np.where(y >= 0.0, 1, -1)
    y_hat = np.where(algo.predict(X) >= 0.0, 1, -1)
    return np.mean(_y == y_hat)


X, y = datasets.iris(return_splits=False)
X = X[y != 2]
y = y[y != 2]
y[y == 0] = -1
X_train, X_test, y_train, y_test = datasets.split(X, y)

alpha = ht.ContinuousParameter('alpha', lower_bound=10**-10, upper_bound=0.01)
epochs = ht.DiscreteParameter('epochs', lower_bound=200, upper_bound=10**4)

hypers = [alpha, epochs]

tuner = ht.HyperTune(algorithm=Perceptron,
                     parameters=hypers,
                     train_func=Perceptron.fit,
                     objective_func=Perceptron.mse,
                     train_func_args=(X_train, y_train),
                     objective_func_args=(X_test, y_test),
                     max_evals=50,
                     maximize=False)

results = tuner.tune()
print(results)