monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=True) hist = model.fit(train_set_R1, Y_train, validation_data=(test_set_R1, Y_test), batch_size=16, nb_epoch=jumEpoch, shuffle=True, verbose=1, callbacks=[checkpointer]) # Evaluate the model # load best model model.load_weights(nama_filenya) Y_pred = model.predict(test_set_R1, batch_size=8) #print(Y_pred) k_val = 1 Y_pred_label = [] for idt in range(len(Y_pred)): Y_pred_label.append(np.argmax(Y_pred[idt])) print Y_test.shape print Y_pred.shape print np.array(Y_pred_label).shape print np.argmax(Y_test, axis=1) print("Skor Model:") accScore = accuracy_score(np.argmax(Y_test, axis=1), Y_pred_label) print(accScore)
# Some output model.summary() plot(model, to_file="architecture.png", show_shapes=True) if __name__ == '__main__': # output predicted labels in separate folder for easy viewing for i in range(10): os.system("mkdir -p predicted_images/" + str(i)) # select which data you want to evaluate on (validation or testing) X_evaluation = X_test Y_evaluation = Y_test y_evaluation = y_test with tf.device('/cpu:0'): model.load_weights( os.path.join(MODEL_PATH, 'WRN-16-2-own-81accuracy.h5')) model.compile(optimizer=sgd, loss="categorical_crossentropy", metrics=['accuracy']) validation_datagen = ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True, zca_whitening=True) validation_datagen.fit(X_train) generator = validation_datagen.flow(X_evaluation, Y_evaluation, batch_size=1, shuffle=False) total_correct = 0