예제 #1
0
)

print("Now we evaluate on the test data")
results = model.evaluate(test_ds)
print("test loss:", results[0], ", test acc:", results[1])

# Now we generate the confusion matrix and loss graph

# Confusion matrix
# It's jank because we have a dataset of tuples, and there's not a great way to separate the halves without iterating twice.
# However, iterating twice will not work because our dataset is shuffled on each iteration.
test_y_hat = []
test_y = []

# This is actually very slow
for e in test_ds:
    test_y_hat.extend(list(np.argmax(model.predict(e[0]), axis=1)))

    test_y.extend(list(np.argmax(e[1].numpy(), axis=1)))

#test_y_hat = np.ndarray.flatten(np.array(test_y_hat))
#test_y     = np.ndarray.flatten(np.array(test_y))

# I've checked both of these calls, they work correctly
confusion = tf.math.confusion_matrix(test_y, test_y_hat)
#plot_confusion_matrix(confusion)
save_confusion_matrix(confusion)

# Loss curve
#plot_loss_curve(history)
save_loss_curve(history)
예제 #2
0
        )

        with open(results_csv_path, "a") as f:
            f.write("{},{},{},{},{}\n".format(distance, val_results[0],
                                              val_results[1], test_results[0],
                                              test_results[1]))

        print("Calculate the confusion matrix")
        total_confusion = None
        f = None
        for e in test_ds.unbatch().batch(50000).prefetch(5):
            confusion = tf.math.confusion_matrix(np.argmax(e[1].numpy(),
                                                           axis=1),
                                                 np.argmax(model.predict(e[0]),
                                                           axis=1),
                                                 num_classes=RANGE)

            if total_confusion == None:
                total_confusion = confusion
            else:
                total_confusion = total_confusion + confusion

        save_confusion_matrix(confusion,
                              path="confusion_distance-{}".format(distance))

    save_loss_curve(history)

    end_time = time.time()

    with open(results_csv_path, "a") as f:
        f.write("total time seconds: {}\n".format(end_time - start_time))