Example #1
0
    model12 = MLP_auto([Layers[0], Layers[1]], ['sigmoid'])
    print("pre-training autoencoder 2")
    model12.train(out1, out1, alpha, 12, max_iter)

    out2 = model12.output_hidden(out1)

    model13 = MLP_auto([Layers[1], Layers[2]], ['sigmoid'])
    print("pre-training autoencoder 3")
    model13.train(out2, out2, alpha, 12, max_iter)

    # finetuning the stacked autoencoder
    print("fine tuning stacked autoencoder")

    # deep neural network using stacke autoencoder
    final_model = MLP([n, *Layers, 2],
                      ['sigmoid', 'sigmoid', 'sigmoid', 'sigmoid'])
    # final_model.W_list[0:3] = model.W_list[0:3]
    final_model.W_list[0] = model11.W_list[0]
    final_model.W_list[1] = model12.W_list[0]
    final_model.W_list[2] = model13.W_list[0]

    # training deep neural network
    alpha = 0.5
    batch_size = 12
    max_iter = 200
    final_model.train(X_train, y_train, X_test, y_test, alpha, batch_size,
                      max_iter)
    print(final_model.accuracy(X_test, y_test))
    print(final_model.conf_mat(X_test, y_test))
Example #2
0
from MLP import MLP
from utils import *

# build dataset
xs, ys = generate_spiral(1000, 2, 3, show=False)
train_xs, train_ys, test_xs, test_ys = train_test_split(xs, ys)

# normalize data
train_mean = np.mean(train_xs, axis=0)
train_std = np.std(train_xs, axis=0)
train_xs = (train_xs - train_mean) / train_std
test_xs = (test_xs - train_mean) / train_std

# specify neural network
layer_sizes = [2, 100, 3]
hyperparam_dict = {'mb_size': 64, 'lr': 0.001}

# initialize data batching class (DataSet) and MLP
ds = DataSet(train_xs, train_ys, hyperparam_dict['mb_size'])
mlp = MLP(layer_sizes, hyperparam_dict)

n_epochs = 1000
n_batches = n_epochs * ds.batches_in_epoch
for batch_i in range(n_batches):
    mb_xs, mb_ys = ds.get_mb()
    mlp.fit_mb(mb_xs, mb_ys)
    # test_output = mlp.feedforward(test_xs)
    # print(mlp.loss(test_output, test_ys))
test_output = mlp.feedforward(test_xs)
print(mlp.accuracy(test_output, test_ys))