for i in range(25): img = X_train[y_train == 7][i].reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.show() # Save data in compressed npz file #np.savez_compressed('mnist_scaled.npz', X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test) mnist = np.load('mnist_scaled.npz') mnist.files X_test, X_train, y_train, y_test = [mnist[f] for f in mnist.files] from neuralnet import NeuralNetMLP nn = NeuralNetMLP(n_hidden=100, l2=0.01, epochs=200, eta=0.0005, minibatch_size=100, shuffle=True, seed=1) nn.fit(X_train=X_train[:55000], y_train=y_train[:55000], X_valid=X_train[55000:], y_valid=y_train[55000:])
# ax[0].set_xticks([]) # ax[0].set_yticks([]) # plt.tight_layout() # plt.show() # # np.savetxt('train_img.csv', X_train,fmt='%i', delimiter='.') # np.savetxt('train_labels.csv', y_train,fmt='%i', delimiter='.') # np.savetxt('test_img.csv', X_test,fmt='%i', delimiter='.') # np.savetxt('test_labels.csv', y_test,fmt='%i', delimiter='.') nn = NeuralNetMLP(n_output=10, n_features=X_train.shape[1], n_hidden=50, l2=0.1, l1=0.0, epochs=1000, eta=0.001, alpha=0.001, decrease_const=0.00001, shuffle=True, minibatches=50, random_state=1) nn.fit(X_train, y_train, print_progress=True) plt.plot(range(len(nn.cost_)), nn_cost_) plt.ylim([0, 2000]) plt.ylabel('Cost') plt.xlabel('Epochs * 50') plt.tight_layout() plt.show()
import time import numpy as np from neuralnet import NeuralNetMLP X_train = np.genfromtxt('train_img.csv' , dtype=int, delimiter=',') y_train = np.genfromtxt('train_labels.csv', dtype=int, delimiter=',') X_test = np.genfromtxt('test_img.csv', dtype=int, delimiter=',') y_test = np.genfromtxt('test_labels.csv', dtype=int, delimiter=',') nn = NeuralNetMLP(n_output=10, n_features= X_train.shape[1], n_hidden = 50, l2 = 0.1, l1 = 0.0, epochs=1000, eta=0.001, alpha=0.001, decrease_const=0.00001, shuffle=True, minibatches=50, random_state= 1) start_time = time.time() nn.fit(X_train,y_train, print_progress=True) y_train_pred = nn.predict(X_train) y_test_pred = nn.predict(X_test) end_time = time.time()
labels = np.fromfile(lbpath, dtype=np.uint8) with open(images_path, 'rb') as imgpath: magic, num, rows, cols = struct.unpack(">IIII", imgpath.read(16)) images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784) return images, labels X_train, y_train = load_mnist('mnist', kind='train') print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1])) from neuralnet import NeuralNetMLP # new neural network with 10 hidden layers, no regularization (l1, l2) = (0, 0) # no adaotuve learning, no momentum learning and regular gradient descent # using minibatches to 1 nn_check = NeuralNetMLP(n_output=10, n_features=X_train.shape[1], n_hidden=10, l2=0.0, l1=0.0, epochs=10, eta=0.001, alpha=0.0, decrease_const=0.0, minibatches=1, random_state=1) nn_check.fit(X_train[:5], y_train[:5], print_progress=False)
ax[i].imshow(img, cmap='Greys', interpolation='nearest') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.show() ### 003 implement a multi-layer perceptron print('### 003') from neuralnet import NeuralNetMLP nn = NeuralNetMLP(n_output=10, n_features=X_train.shape[1], n_hidden=50, l2=0.1, l1=0.0, epochs=1000, eta=0.001, alpha=0.001, decrease_const=0.00001, shuffle=True, minibatches=50, random_state=1) nn.fit(X_train, y_train, print_progress=True) # only plot every 50th step to account for the 50 mini-batches (50 mini-batches x 1000 epochs) plt.plot(range(len(nn.cost_)), nn.cost_) plt.ylim([0, 2000]) plt.ylabel('Cost') plt.xlabel('Epochs * 50') plt.tight_layout() plt.show()
X_trn = [YOUR CODE HERE] y_trn = [YOUR CODE HERE] X_vld = [YOUR CODE HERE] y_vld = [YOUR CODE HERE] X_tst = [YOUR CODE HERE] y_tst = [YOUR CODE HERE] # ===================================================================== # 多層パーセプトロン(MLP)のインスタンスの生成と学習 nn = NeuralNetMLP(n_output=10, # 出力ユニット数 n_features=X_trn.shape[1], # 入力ユニット数 n_hidden=30, # 隠れユニット数 l2=0.1, # L2正則化のλパラメータ l1=0.0, # L1正則化のλパラメータ epochs=600, # 学習エポック数 eta=0.001, # 学習率の初期値 alpha = 0.001, # モーメンタム学習の1つ前の勾配の係数 decrease_const=0.00001, # 適応学習率の減少定数 minibatches=50, # 各エポックでのミニバッチ数 shuffle=True, # データのシャッフル random_state=3) # 乱数シードの状態 nn.fit(X_trn, y_trn, print_progress=True) plt.figure(0) plt.plot(range(len(nn.cost_)), nn.cost_) plt.ylim([0, 1000]) plt.ylabel('Cost') plt.xlabel('Epochs * 50') plt.tight_layout()
img = img.reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() fig.savefig('mnist_0_9.pdf') y_train=np.argmax(y_train, axis=1) y_test=np.argmax(y_test, axis=1) np.savez_compressed('mnist_scaled.npz', X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test) ''' mnist = np.load('mnist_scaled.npz') X_train, X_test, y_train, y_test = mnist['X_train'], mnist['X_test'], mnist['y_train'], mnist['y_test'] from neuralnet import NeuralNetMLP nn = NeuralNetMLP(n_hidden=100, l2=0.01, epochs=200, eta=0.005, minibatch_size=100, shuffle=True, seed=1) nn.fit(X_train=X_train[:50000], y_train=y_train[:50000], X_valid=X_train[50000:], y_valid=y_train[50000:]) fig = plt.figure() plt.plot(range(nn.epochs), nn.eval_['cost']) plt.ylabel('Cost') plt.xlabel('Epochs') fig.savefig('NeuralNetMLP_cost.pdf') fig = plt.figure() plt.plot(range(nn.epochs), nn.eval_['train_acc'], label='training') plt.plot(range(nn.epochs), nn.eval_['valid_acc'], label='validation', linestyle='--') plt.ylabel('Accuracy') plt.xlabel('Epochs') fig.savefig('NeuralNetMLP_acc.pdf')
from neuralnet import NeuralNetMLP from mnist_data import load_mnist import numpy as np import matplotlib.pyplot as plt from wine import X_train, X_test, y_train, y_test nn = NeuralNetMLP(n_output=3, n_features=X_train.shape[1], n_hidden=50, l2=0.0, l1=0.0, epochs=40, eta=0.001, alpha=0.001, decrease_const=0.00001, minibatches=20, shuffle=True, random_state=1) nn.fit(X_train, y_train, print_progress=True) import sys # Predict on training Data y_train_pred = nn.predict(X_train) if sys.version_info < (3, 0): acc = (np.sum(y_train == y_train_pred, axis=0)).astype('float') / X_train.shape[0] else: acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]