Exemple #1
0
for i in range(25):
    img = X_train[y_train == 7][i].reshape(28, 28)
    ax[i].imshow(img, cmap='Greys')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()

# Save data in compressed npz file
#np.savez_compressed('mnist_scaled.npz', X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test)

mnist = np.load('mnist_scaled.npz')
mnist.files

X_test, X_train, y_train, y_test = [mnist[f] for f in mnist.files]

from neuralnet import NeuralNetMLP

nn = NeuralNetMLP(n_hidden=100,
                  l2=0.01,
                  epochs=200,
                  eta=0.0005,
                  minibatch_size=100,
                  shuffle=True,
                  seed=1)

nn.fit(X_train=X_train[:55000],
       y_train=y_train[:55000],
       X_valid=X_train[55000:],
       y_valid=y_train[55000:])
Exemple #2
0
nn = NeuralNetMLP(n_output=10,
        n_features= X_train.shape[1],
        n_hidden = 50,
        l2 = 0.1,
        l1 = 0.0,
        epochs=1000,
        eta=0.001,
        alpha=0.001,
        decrease_const=0.00001,
        shuffle=True,
        minibatches=50,
        random_state= 1)

start_time = time.time()

nn.fit(X_train,y_train, print_progress=True)

y_train_pred = nn.predict(X_train)

y_test_pred = nn.predict(X_test)

end_time = time.time()

total_time = start_time - end_time

f = open('out.txt', 'w')

for val in y_train_pred:
     f.write(str(val) + '\n')
Exemple #3
0
# ax[0].set_xticks([])
# ax[0].set_yticks([])
# plt.tight_layout()
# plt.show()
#
# np.savetxt('train_img.csv', X_train,fmt='%i', delimiter='.')
# np.savetxt('train_labels.csv', y_train,fmt='%i', delimiter='.')
# np.savetxt('test_img.csv', X_test,fmt='%i', delimiter='.')
# np.savetxt('test_labels.csv', y_test,fmt='%i', delimiter='.')

nn = NeuralNetMLP(n_output=10,
                  n_features=X_train.shape[1],
                  n_hidden=50,
                  l2=0.1,
                  l1=0.0,
                  epochs=1000,
                  eta=0.001,
                  alpha=0.001,
                  decrease_const=0.00001,
                  shuffle=True,
                  minibatches=50,
                  random_state=1)

nn.fit(X_train, y_train, print_progress=True)

plt.plot(range(len(nn.cost_)), nn_cost_)
plt.ylim([0, 2000])
plt.ylabel('Cost')
plt.xlabel('Epochs * 50')
plt.tight_layout()
plt.show()
    with open(images_path, 'rb') as imgpath:
        magic, num, rows, cols = struct.unpack(">IIII", imgpath.read(16))

        images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)

    return images, labels


X_train, y_train = load_mnist('mnist', kind='train')
print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))

from neuralnet import NeuralNetMLP

# new neural network with 10 hidden layers, no regularization (l1, l2) = (0, 0)
# no adaotuve learning, no momentum learning and regular gradient descent
# using minibatches to 1
nn_check = NeuralNetMLP(n_output=10,
                        n_features=X_train.shape[1],
                        n_hidden=10,
                        l2=0.0,
                        l1=0.0,
                        epochs=10,
                        eta=0.001,
                        alpha=0.0,
                        decrease_const=0.0,
                        minibatches=1,
                        random_state=1)

nn_check.fit(X_train[:5], y_train[:5], print_progress=False)
#a 784-50-10 MLP
nn = NeuralNetMLP(n_output = 10,
                  n_features = X_train.shape[1],
                  n_hidden = 50,
                  l2 = 0.1,
                  l1 = 0.0,
                  epochs = 1000,
                  eta = 0.001,
                  alpha = 0.001,
                  decrease_const = 0.00001,
                  shuffle = True,
                  minibatches = 50,
                  random_state = 1)

nn.fit(X_train,y_train,print_progress = True)

#plot every 50th step to account for the 50 mini-batches (50 mini-batches x1000 epochs) 
plt.plot(range(len(nn.cost_)),nn.cost_)
plt.ylim([0,2000])
plt.ylabel('Cost')
plt.xlabel('Epochs *50')
plt.tight_layout()
plt.show()

#plot a smoother version of the cost function against the number of epochs by averaging over the mini-batch
#intervals
batches = np.array_split(range(len(nn.cost_)),1000)
cost_ary = np.array(nn.cost_)
cost_avgs = [np.mean(cost_ary[i]) for i in batches]
Exemple #6
0
		ax[i].imshow(img, cmap='Greys')
	ax[0].set_xticks([])
	ax[0].set_yticks([])
	plt.tight_layout()
	fig.savefig('mnist_0_9.pdf')

	y_train=np.argmax(y_train, axis=1)
	y_test=np.argmax(y_test, axis=1)
	np.savez_compressed('mnist_scaled.npz', X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test)
	'''

	mnist = np.load('mnist_scaled.npz')
	X_train, X_test, y_train, y_test = mnist['X_train'], mnist['X_test'], mnist['y_train'], mnist['y_test']
	from neuralnet import NeuralNetMLP
	nn = NeuralNetMLP(n_hidden=100, l2=0.01, epochs=200, eta=0.005, minibatch_size=100, shuffle=True, seed=1)
	nn.fit(X_train=X_train[:50000], y_train=y_train[:50000], X_valid=X_train[50000:], y_valid=y_train[50000:])

	fig = plt.figure()
	plt.plot(range(nn.epochs), nn.eval_['cost'])
	plt.ylabel('Cost')
	plt.xlabel('Epochs')
	fig.savefig('NeuralNetMLP_cost.pdf')

	fig = plt.figure()
	plt.plot(range(nn.epochs), nn.eval_['train_acc'], label='training')
	plt.plot(range(nn.epochs), nn.eval_['valid_acc'], label='validation', linestyle='--')
	plt.ylabel('Accuracy')
	plt.xlabel('Epochs')
	fig.savefig('NeuralNetMLP_acc.pdf')