Ejemplo n.º 1
0
            train_output = np.vstack([train_output, o])


    nn = MLP(841, (100, 26),
                       activation_functions=[sigmoid, sigmoid],
                       rng=(lambda n: np.random.normal(0, 0.01, n)))
    train_cost, cv_cost = \
        nn.train_backprop(train_input, train_output,
                          d_f_list=[d_sigmoid, d_sigmoid],
                          goal=log_Bernoulli_likelihood,
                          d_goal=d_log_Bernoulli_likelihood,
                          batch_size=None,
                          max_iter=2500,
                          learning_rate=0.1,
                          momentum_rate=0.9,
                          neural_local_gain=(0.005, 0.995, 0.001, 1000),
                          stop_threshold=0.05,
                          cv_input_data=cv_input,
                          cv_output_data=cv_output,
                          #regularization_rate=0.1,
                          #regularization_norm=l2,
                          #d_regularization_norm=d_l2
                          verbose=True
                          )

    t = np.argmax(train_output, axis=1)
    y = np.argmax(nn.compute_output(train_input), axis=1)

    print('%s / %s' % (sum(t == y), train_output.shape[0]))

    t = np.argmax(test_output, axis=1)
Ejemplo n.º 2
0
            train_output = np.vstack([train_output, o])


    nn = MLP(841, (100, 26),
                       activation_functions=[tanh, softmax],
                       rng=(lambda n: np.random.normal(0, 0.01, n)))
    train_cost, cv_cost = \
        nn.train_backprop(train_input, train_output,
                          d_f_list=[d_tanh, d_softmax],
                          goal=cross_entropy,
                          d_goal=d_cross_entropy,
                          batch_size=1,
                          max_iter=100,
                          learning_rate=0.01,
                          momentum_rate=0.9,
                          #neural_local_gain=(0.0005, 0.9995, 0.001, 1000),
                          stop_threshold=0.05,
                          cv_input_data=cv_input,
                          cv_output_data=cv_output,
                          #regularization_rate=0.1,
                          #regularization_norm=l2,
                          #d_regularization_norm=d_l2,
                          verbose=True
                          )

    t = np.argmax(train_output, axis=1)
    y = np.argmax(nn.compute_output(train_input), axis=1)

    print('%s / %s' % (sum(t == y), train_output.shape[0]))

    t = np.argmax(test_output, axis=1)
Ejemplo n.º 3
0
            train_input = np.vstack([train_input, v])
            train_output = np.vstack([train_output, o])

    nn = MLP(841, (100, 26),
             activation_functions=[sigmoid, sigmoid],
             rng=(lambda n: np.random.normal(0, 0.01, n)))
    train_cost, cv_cost = \
        nn.train_backprop(train_input, train_output,
                          d_f_list=[d_sigmoid, d_sigmoid],
                          goal=log_Bernoulli_likelihood,
                          d_goal=d_log_Bernoulli_likelihood,
                          batch_size=None,
                          max_iter=2500,
                          learning_rate=0.1,
                          momentum_rate=0.9,
                          neural_local_gain=(0.005, 0.995, 0.001, 1000),
                          stop_threshold=0.05,
                          cv_input_data=cv_input,
                          cv_output_data=cv_output,
                          #regularization_rate=0.1,
                          #regularization_norm=l2,
                          #d_regularization_norm=d_l2
                          verbose=True
                          )

    t = np.argmax(train_output, axis=1)
    y = np.argmax(nn.compute_output(train_input), axis=1)

    print('%s / %s' % (sum(t == y), train_output.shape[0]))

    t = np.argmax(test_output, axis=1)
Ejemplo n.º 4
0
    df = read_csv('./../data/africa-soil/training.csv')
    x = df.as_matrix(columns=df.columns[1:3595])
    x[:, -1] = (x[:, -1] == 'Topsoil') * 1.0
    x = x.astype(float)
    y = df.as_matrix(columns=df.columns[3595:])
    y = y.astype(float)

    idx_train = list(
        np.random.choice(range(x.shape[0]), size=int(round(0.8 * x.shape[0]))))
    idx_cv = list(set(range(x.shape[0])) - set(idx_train))

    nn = MLP(3594, (50, 5),
             activation_functions=[tanh, identity],
             rng=(lambda n: np.random.normal(0, 0.01, n)))
    train_cost, cv_cost = \
        nn.train_backprop(x[idx_train, :], y[idx_train, :],
                          d_f_list=[d_tanh, d_identity],
                          batch_size=None,
                          max_iter=1000,
                          learning_rate=0.001,
                          momentum_rate=0.9,
                          neural_local_gain=(0.0005, 0.9995, 0.001, 1000),
                          stop_threshold=0.05,
                          cv_input_data=x[idx_cv, :],
                          cv_output_data=y[idx_cv, :],
                          #regularization_rate=0.1,
                          #regularization_norm=l2,
                          #d_regularization_norm=d_l2
                          verbose=True
                          )
Ejemplo n.º 5
0
if __name__ == '__main__':

    df = read_csv('./../data/africa-soil/training.csv')
    x = df.as_matrix(columns=df.columns[1:3595])
    x[:, -1] = (x[:, -1] == 'Topsoil') * 1.0
    x = x.astype(float)
    y = df.as_matrix(columns=df.columns[3595:])
    y = y.astype(float)

    idx_train = list(np.random.choice(range(x.shape[0]), size=int(round(0.8 * x.shape[0]))))
    idx_cv = list(set(range(x.shape[0])) - set(idx_train))

    nn = MLP(3594, (50, 5),
                       activation_functions=[tanh, identity],
                       rng=(lambda n: np.random.normal(0, 0.01, n)))
    train_cost, cv_cost = \
        nn.train_backprop(x[idx_train, :], y[idx_train, :],
                          d_f_list=[d_tanh, d_identity],
                          batch_size=None,
                          max_iter=1000,
                          learning_rate=0.001,
                          momentum_rate=0.9,
                          neural_local_gain=(0.0005, 0.9995, 0.001, 1000),
                          stop_threshold=0.05,
                          cv_input_data=x[idx_cv, :],
                          cv_output_data=y[idx_cv, :],
                          #regularization_rate=0.1,
                          #regularization_norm=l2,
                          #d_regularization_norm=d_l2
                          verbose=True
                          )