Esempio n. 1
0
from mnist_dnn_data import load_data
from dnn import DNN

(train_x, train_y), (test_x, test_y) = load_data()

num_input = train_x.shape[1]
num_hiddens = [100, 50]
num_output = train_y.shape[1]

model = DNN(num_input, num_hiddens, num_output)

history = model.fit(train_x, train_y, epochs=5, batch_size=100, validation_split=0.2)

performance_test = model.evaluate(test_x, test_y, batch_size=100)
print('Test Loss and Accuracy ->', performance_test)
Esempio n. 2
0
from keras.datasets import mnist
from keras.utils import np_utils, to_categorical

(x_train, y_train), (x_test, y_test) = mnist.load_data()
mnist_original_size = 28
x_train = x_train.reshape(x_train.shape[0], mnist_original_size, mnist_original_size, 1)
x_test = x_test.reshape(x_test.shape[0], mnist_original_size, mnist_original_size, 1)
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)


model1 = DNN(width=x_train.shape[1], height=x_train.shape[2], depth=x_train.shape[3], classes=10)
model1.compile(loss="categorical_crossentropy", optimizer='adadelta', metrics=["accuracy"])
model1.fit(x_train, y_train, epochs=1, verbose=1)

loss, accuracy = model1.evaluate(x_test, y_test, verbose=1)
print('\nloss: {:.2f}%, accuracy: {:.2f}%'.format(loss*100, accuracy*100))
    
m1 = model1.predict_proba(x_test)



model2 = DNN(width=x_train.shape[1], height=x_train.shape[2], depth=x_train.shape[3], classes=10)
model2.compile(loss="categorical_crossentropy", optimizer='adadelta', metrics=["accuracy"])
model2.fit(x_train, y_train, nb_epoch=1, verbose=1)

loss, accuracy = model2.evaluate(x_test, y_test, verbose=1)
print('\nloss: {:.2f}%, accuracy: {:.2f}%'.format(loss*100, accuracy*100))
    
m2 = model2.predict_proba(x_test)