def report_accuracy(X, y, coarse=True, prefix_string='benchmark'): if coarse: Y = np_utils.to_categorical(y, nb_classes_coarse) else: Y = np_utils.to_categorical(y, nb_classes_fine) # Test the model Y_predict = model.predict(X, batch_size=batch_size, verbose=1) # Convert floating point vector to a clean binary vector with only two 1's Y_predict_clean = clean_vec(Y_predict) acc = accuracy(Y_predict_clean, Y) print("%s accuracy: %f" % (prefix_string, acc))
Y_test = Y_test_fine # Test the model Y_predict_test = model.predict(X_test, batch_size=batch_size, verbose=1) Y_predict_train = model.predict(X_train, batch_size=batch_size, verbose=1) #print(Y_predict_train) # Convert floating point vector to a clean binary vector with only two 1's Y_predict_test_clean = clean_vec(Y_predict_test) Y_predict_train_clean = clean_vec(Y_predict_train) if 'coarse' in model_name: label_type = 'coarse' elif 'fine' in model_name: label_type = 'fine' test_accuracy = accuracy(Y_predict_test_clean, Y_test) print(label_type + " test accuracy: %f" % test_accuracy) train_accuracy = accuracy(Y_predict_train_clean, Y_train) print(label_type + " train accuracy: %f" % train_accuracy) # For generalization test, show the accuracy on the things it was not trained on if '_gen' in model_name: coarse = 'coarse' in model_name # Indices of the things it was trained on indices_base = np.where(y_train_fine % 5 != 0)[0] y_train_fine_base = y_train_fine[indices_base] y_train_coarse_base = y_train_coarse[indices_base] X_train_base = X_train[indices_base] # Indices of the things it was not trained on
samples_per_epoch=X_train.shape[0], nb_epoch=nb_epoch, show_accuracy=True, validation_data=(X_test, Y_test_coarse), nb_worker=1) model.save_weights('net_output/cifar100_coarse_%s_weights.h5' % model_name) json_string = model.to_json() open('net_output/cifar100_coarse_%s_architecture.json' % model_name, 'w').write(json_string) pickle.dump(history.history, open('net_output/cifar100_coarse_%s_history.p' % model_name,'w')) print("saving to: cifar100_coarse_%s" % model_name) elif load_matching: load_custom_weights(model, 'net_output/keras_cifar100_matching_weights.h5') Y_predict_test = model.predict(X_test, batch_size=batch_size, verbose=1) Y_predict_train = model.predict(X_train, batch_size=batch_size, verbose=1) test_accuracy_coarse = accuracy(Y_predict_test, Y_test_coarse) print("Coarse test accuracy: %f" % test_accuracy_coarse) train_accuracy_coarse = accuracy(Y_predict_train, Y_train_coarse) print("Coarse train accuracy: %f" % train_accuracy_coarse) else: model.load_weights('cifar100_coarse_%s_weights.h5' % model_name) Y_predict_test = model.predict(X_test, batch_size=batch_size, verbose=1) Y_predict_train = model.predict(X_train, batch_size=batch_size, verbose=1) Y_predict_test_coarse = Y_predict_test['output_coarse'] test_accuracy_coarse = accuracy(Y_predict_test_coarse, Y_test_coarse) print("Fine test accuracy: %f" % test_accuracy_coarse) Y_predict_train_coarse = Y_predict_train['output_coarse'] train_accuracy_coarse = accuracy(Y_predict_train_coarse, Y_train_coarse)
show_accuracy=True, validation_data=(X_test, Y_test_fine), nb_worker=1, ) model.save_weights("net_output/cifar100_fine_%s_weights.h5" % model_name) json_string = model.to_json() open("net_output/cifar100_fine_%s_architecture.json" % model_name, "w").write(json_string) pickle.dump(history.history, open("net_output/cifar100_fine_%s_history.p" % model_name, "w")) print("saving to: cifar100_fine_%s" % model_name) elif load_matching: load_custom_weights(model, "net_output/keras_cifar100_matching_weights.h5") Y_predict_test = model.predict(X_test, batch_size=batch_size, verbose=1) Y_predict_train = model.predict(X_train, batch_size=batch_size, verbose=1) test_accuracy_fine = accuracy(Y_predict_test, Y_test_fine) print("Fine test accuracy: %f" % test_accuracy_fine) train_accuracy_fine = accuracy(Y_predict_train, Y_train_fine) print("Fine train accuracy: %f" % train_accuracy_fine) else: model.load_weights("cifar100_fine_%s_weights.h5" % model_name) Y_predict_test = model.predict(X_test, batch_size=batch_size, verbose=1) Y_predict_train = model.predict(X_train, batch_size=batch_size, verbose=1) Y_predict_test_fine = Y_predict_test["output_fine"] test_accuracy_fine = accuracy(Y_predict_test_fine, Y_test_fine) print("Fine test accuracy: %f" % test_accuracy_fine) Y_predict_train_fine = Y_predict_train["output_fine"] train_accuracy_fine = accuracy(Y_predict_train_fine, Y_train_fine)
# fit the model on the batches generated by datagen.flow() history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), samples_per_epoch=X_train.shape[0], nb_epoch=nb_epoch, show_accuracy=True, validation_data=(X_test, Y_test), nb_worker=1) model.save_weights('net_output/keras_cifar100_%s_weights.h5' % model_name) json_string = model.to_json() open('net_output/keras_cifar100_%s_architecture.json' % model_name, 'w').write(json_string) pickle.dump(history.history, open('net_output/keras_cifar100_%s_history.p' % model_name,'w')) print("saving to: keras_cifar100_%s" % model_name) else: model.load_weights('net_output/keras_cifar100_%s_weights.h5' % model_name) Y_predict_test = model.predict({'input':X_test}, batch_size=batch_size, verbose=1) Y_predict_train = model.predict({'input':X_train}, batch_size=batch_size, verbose=1) Y_predict_test_fine = Y_predict_test['output_fine'] Y_predict_test_coarse = Y_predict_test['output_coarse'] test_accuracy_fine = accuracy(Y_predict_test_fine, Y_test_fine) test_accuracy_coarse = accuracy(Y_predict_test_coarse, Y_test_coarse) print("Fine test accuracy: %f" % test_accuracy_fine) print("Coarse test accuracy: %f" % test_accuracy_coarse) Y_predict_train_fine = Y_predict_train['output_fine'] Y_predict_train_coarse = Y_predict_train['output_coarse'] train_accuracy_fine = accuracy(Y_predict_train_fine, Y_train_fine) train_accuracy_coarse = accuracy(Y_predict_train_coarse, Y_train_coarse) print("Fine train accuracy: %f" % train_accuracy_fine) print("Coarse train accuracy: %f" % train_accuracy_coarse)