import torch import numpy as np from statistics.calc_statistics import calc_dataset_acc, calc_dataset_loss if __name__ == "__main__": # D_in is input dimension; H is hidden dimension; D_out is output dimension. epochs = 40 batch_size = 32 # selected optimization parameters std, learning_rate, momentum = 0.1, 1e-3, 0.9 # load dataset train_dataloader, test_dataloader = load_dataset(batch_size) # Define Loss function loss_fn = torch.nn.CrossEntropyLoss() network_widths = [2**6, 2**10, 2**12] for network_width in network_widths: # Define NN model net = Baseline_Network(net_width=network_width) # random initialize net net.normal_random_init(std=std) # Define Optimizer optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum)
np.array([ train_loss_per_epoch, test_loss_per_epoch, train_acc_per_epoch, test_acc_per_epoch ])) print() if __name__ == "__main__": epochs = 100 # selected optimization parameters std, learning_rate, momentum = 0.1, 1e-3, 0.9 # load dataset batch_size = 32 train_data, test_data = load_dataset(batch_size) # Define CNN model net = Conv_Network() # Define Loss function loss_func = torch.nn.CrossEntropyLoss() # random initialize net net.random_init(std=std) # Define Optimizeres sgd_optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum) adam_optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate)
np.array([ train_loss_per_epoch, test_loss_per_epoch, train_acc_per_epoch, test_acc_per_epoch ])) print() if __name__ == "__main__": # D_in is input dimension; H is hidden dimension; D_out is output dimension. epochs = 30 batch_size = 32 # selected optimization parameters std, learning_rate, momentum = 0.1, 1e-3, 0.9 # load dataset train_data, test_data = load_dataset(batch_size) # Define NN model net = Conv_Network() # Define Loss function loss_func = torch.nn.CrossEntropyLoss() # Define Optimizer optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum) # random initialize net net.random_init(std=std) train_network(net, optimizer, loss_func, epochs, train_data, test_data,
test_acc = linear_clf.score(test_images, test_labels) return test_acc def rbf_svm_classifier(train_images, train_labels, test_images, test_labels): # initialize RBF SVM classifier rbf_clf = svm.SVC(kernel='rbf') # fit data rbf_clf.fit(train_images, train_labels) # calculate accuracy on test test_acc = rbf_clf.score(test_images, test_labels) return test_acc if __name__ == "__main__": # load dataset train_images, train_labels, test_images, test_labels = load_dataset() # preform linear svm classification linear_svm_test_acc = linear_svm_classifier(train_images, train_labels, test_images, test_labels) print("Linear SVM Test Accuracy: " + str(linear_svm_test_acc)) # preform rbf svm classification rbf_svm_test_acc = rbf_svm_classifier(train_images, train_labels, test_images, test_labels) print("RBF SVM Test Accuracy: " + str(rbf_svm_test_acc))