Beispiel #1
0
def OptNN(d1, h1, d2, h2, d3, start, stop, max_epochs):
    params2 = params.copy()
    on_epoch = [AdjustVariable('update_learning_rate', 
                               start = start, stop = stop),
                AdjustVariable('update_momentum', start = .9, stop = .999)]
    params2['dropout1_p']           = d1
    params2['dropout2_p']           = d2
    params2['dropout3_p']           = d3
    params2['dropout4_p']           = d4
    params2['hidden1_num_units']    = h1
    params2['hidden2_num_units']    = h2
    params2['hidden3_num_units']    = h3
    params2['max_epochs']           = max_epochs
    params2['on_epoch_finished'] = on_epoch
    kcv = StratifiedKFold(Y, 5, shuffle = True)
    res = np.empty((len(Y), len(np.unique(Y)))); i = 1
    CVScores = []
    for train_idx, valid_idx in kcv:
        logger.info("Running fold %d...", i); i += 1
        net = NeuralNet(**params2)
        net.set_params(eval_size = None)
        net.fit(X[train_idx], Y[train_idx])
        res[valid_idx, :] = net.predict_proba(X[valid_idx]) 
        CVScores.append(log_loss(Y[valid_idx], res[valid_idx]))
    return -np.mean(CVScores)
Beispiel #2
0
def OptNN2(d0, d1,d2, d3, h1, h2, h3, me, ls, le):
    h1, h2, h3 = int(h1), int(h2), int(h3); 
    me = int(me)
    params = dict(
        layers = [
            ('input', layers.InputLayer),
            ('dropout1', layers.DropoutLayer),
            ('hidden1', layers.DenseLayer),
            ('dropout2', layers.DropoutLayer),
            ('hidden2', layers.DenseLayer),
            ('dropout3', layers.DropoutLayer),
            ('hidden3', layers.DenseLayer),
            ('dropout4', layers.DropoutLayer),
            ('output', layers.DenseLayer),
            ],

        input_shape = (None, 93),
        dropout1_p = d0,
        hidden1_num_units = h1,
        dropout2_p = d1,
        hidden2_num_units = h2,
        dropout3_p = d2,
        hidden3_num_units = h3,
        dropout4_p = d3,
        output_nonlinearity = softmax,
        output_num_units = 9,

        update = nesterov_momentum,
        update_learning_rate = theano.shared(float32(l_start)),
        update_momentum = theano.shared(float32(m_start)),

        regression = False,
        on_epoch_finished = [
            AdjustVariable('update_learning_rate', start = ls, 
                stop = le, is_log = True),
            AdjustVariable('update_momentum', start = m_start, 
                stop = m_stop, is_log = False),
            ],
        max_epochs = me,
        verbose = 1,
        )

    CVScores = []
    res = np.empty((len(Y), len(np.unique(Y))))
    kcv = StratifiedKFold(Y, 5, shuffle = True); i = 1
    for train_idx, valid_idx in kcv:
        logger.info("Running fold %d...", i); i += 1
        net = NeuralNet(**params)
        net.set_params(eval_size = None)
        net.fit(X[train_idx], Y[train_idx])
        res[valid_idx, :] = net.predict_proba(X[valid_idx])
        CVScores.append(log_loss(Y[valid_idx], res[valid_idx]))
    return -np.mean(CVScores)