plt.show() # Plot Training, testing ll vs iteration number fig, ax = plt.subplots() # Plot 0/1 loss ax.plot(iter_train, train_01, lw=2, color="green", label=r"Train") ax.plot(iter_train, test_01, lw=2, color="blue", label=r"Test") # Plot BGD best fit on the testing set from before plt.axhline(y=0.106, xmin=-1, xmax=100, ls="--", linewidth=2, color = 'k', label="BGD Testing Best Fit") # Format plot ax.legend(loc="upper right") ax.set_xlabel("Iteration") ax.set_ylabel("0/1 Loss") fig.tight_layout() if save_plots: fig.savefig("sgd_nn_mnist_multi_train_test_01.pdf") plt.show() ####################################################### # # Compute, output final losses # ####################################################### val.summarize_loss(X_train, y_train_true, X_test, y_test_true, w, w0, y_train_label=y_train, y_test_label=y_test, classfn=cu.multi_logistic_classifier, lossfn=val.logloss_multi)
fig, ax = plt.subplots() # Plot -(log likelikehood) to get logloss ax.plot(iter_train, train_01, lw=2, color="green", label=r"Train") ax.plot(iter_train, test_01, lw=2, color="blue", label=r"Test") # Format plot ax.legend(loc="upper right") ax.set_xlabel("Iteration") ax.set_ylabel("0/1 Loss") fig.tight_layout() if save_plots: fig.savefig("bgd_mnist_multi_train_test_01.pdf") plt.show() ####################################################### # # Compute, output final losses # ####################################################### val.summarize_loss(X_train, y_train_true, X_test, y_test_true, w, w0, y_train_label=y_train, y_test_label=y_test, classfn=cu.multi_logistic_classifier, lossfn=val.logloss_multi)
# Format plot ax.legend(loc="upper right") ax.set_xlabel("Iteration") ax.set_ylabel("LogLoss") fig.tight_layout() if save_plots: fig.savefig("mnist_bin_train_test_ll.pdf") plt.show() # Plot Training, testing ll vs iteration number fig, ax = plt.subplots() # Plot -(log likelikehood) to get logloss ax.plot(iter_train, train_01, lw=2, color="green", label=r"Train") ax.plot(iter_train, test_01, lw=2, color="blue", label=r"Test") # Format plot ax.legend(loc="upper right") ax.set_xlabel("Iteration") ax.set_ylabel("0/1 Loss") fig.tight_layout() if save_plots: fig.savefig("mnist_bin_train_test_01.pdf") plt.show() # Output loss metrics! val.summarize_loss(X_train, y_train_true, X_test, y_test_true, w, w0, classfn=cu.logistic_classifier, lossfn=val.logloss_bin)
if save_plots: fig.savefig("mnist_bin_train_test_ll.pdf") plt.show() # Plot Training, testing ll vs iteration number fig, ax = plt.subplots() # Plot -(log likelikehood) to get logloss ax.plot(iter_train, train_01, lw=2, color="green", label=r"Train") ax.plot(iter_train, test_01, lw=2, color="blue", label=r"Test") # Format plot ax.legend(loc="upper right") ax.set_xlabel("Iteration") ax.set_ylabel("0/1 Loss") fig.tight_layout() if save_plots: fig.savefig("mnist_bin_train_test_01.pdf") plt.show() # Output loss metrics! val.summarize_loss(X_train, y_train_true, X_test, y_test_true, w, w0, classfn=cu.logistic_classifier, lossfn=val.logloss_bin)