Beispiel #1
0
print("Performing TSNE")
model = TSNE(n_components=2, random_state=0, init="pca")
toPlot = model.fit_transform(prefilter_train[:1000])
plotTSNE(toPlot, y_train[:1000], nb_classes,
         "7_t-SNE embedding of auto-encoded data ")

print("Classifying and comparing")
# Classify results from Autoencoder
print("Building classical fully connected layer for classification")
model = Sequential()
model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation))

model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(prefilter_train,
          Y_train,
          batch_size=batch_size,
          nb_epoch=nb_epoch,
          show_accuracy=False,
          verbose=0,
          validation_data=(prefilter_test, Y_test))

score = model.evaluate(prefilter_test, Y_test, verbose=0, show_accuracy=True)
print('\nscore:', score)

print('Loss change:',
      100 * (score[0] - classical_score[0]) / classical_score[0], '%')
print('Accuracy change:',
      100 * (score[1] - classical_score[1]) / classical_score[1], '%')
                             save_best_only=True,
                             save_weights_only=False,
                             verbose=1)

from keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='loss', patience=90)
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(X_train,
          Y_train,
          epochs=400,
          batch_size=2,
          callbacks=[es, checkpoint],
          validation_split=0.2)
model.evaluate(X_test, Y_test)

model.save('./model/sample/iris/model_test01.h5')
model.save_weights('./model/sample/iris/test_weight1.h5')

# 6. 모델 사용하기
xhat_idx = np.random.choice(X_test.shape[0], 5)

xhat = X_test[xhat_idx]

yhat = model.predict_classes(xhat)
print(yhat)
for i in range(5):
    print('True : ' + str(np.argmax(Y_test[xhat_idx[i]])) + ', Predict : ' +
          str(yhat[i]))
print("prefilter_train: ", prefilter_train.shape)
print("prefilter_test: ", prefilter_test.shape)

print("Performing PCA")
X_pca = pca(prefilter_train)
plotScatter(X_pca, y_train, title="6_PCA reduction (2d) of auto-encoded data (%dd)" % prefilter_train.shape[1])

print("Performing TSNE")
model = TSNE(n_components=2, random_state=0, init="pca")
toPlot = model.fit_transform(prefilter_train[:1000])
plotTSNE(toPlot, y_train[:1000], nb_classes, "7_t-SNE embedding of auto-encoded data ")


print("Classifying and comparing")
# Classify results from Autoencoder
print("Building classical fully connected layer for classification")
model = Sequential()
model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation))

model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(prefilter_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(prefilter_test, Y_test))

score = model.evaluate(prefilter_test, Y_test, verbose=0, show_accuracy=True)
print('\nscore:', score)

print('Loss change:', 100*(score[0] - classical_score[0])/classical_score[0], '%')
print('Accuracy change:', 100*(score[1] - classical_score[1])/classical_score[1], '%')