training: False }) test_accuracy.append(test_acc_value) test_recall.append(test_recall_value) test_predictions.append(yhat) ground_truth.append(y_batch) print("Evaluating on test data") # print the results print("Mean Test Accuracy:", np.mean(test_accuracy)) print("Mean Test Recall:", np.mean(test_recall)) # unlist the predictions and truth test_predictions = flatten(test_predictions) ground_truth = flatten(ground_truth) # save the predictions and truth for review np.save(os.path.join("data", "predictions_" + model_name + ".npy"), test_predictions) np.save(os.path.join("data", "truth_" + model_name + ".npy"), ground_truth) sess.run(tf.local_variables_initializer()) ## evaluate on MIAS data X_te, y_te = load_validation_data(how=how, data="mias", which=dataset) mias_test_accuracy = [] mias_test_recall = [] mias_test_predictions = []
X: X_batch, y: y_batch, training: False }) test_accuracy.append(test_acc_value) test_recall.append(test_recall_value) test_predictions.append(yhat) ground_truth.append(y_batch) # print the results print("Mean Test Accuracy:", np.mean(test_accuracy)) print("Mean Test Recall:", np.mean(test_recall)) # unlist the predictions and truth test_predictions = flatten(test_predictions) ground_truth = flatten(ground_truth) # save the predictions and truth for review np.save(os.path.join("data", "predictions_" + model_name + ".npy"), test_predictions) np.save(os.path.join("data", "truth_" + model_name + ".npy"), ground_truth) sess.run(tf.local_variables_initializer()) # print("Evaluating on MIAS data") # # ## evaluate on MIAS dataset 9 which is the closest to raw images we have # X_te, y_te = load_validation_data(how=how, data="mias", which=9) # # mias_test_accuracy = [] # mias_test_recall = []