def prob_6(): # Repeat experiments for magic telescope and housing using weights (w = 1/dist**2) arff = Arff('datasets/credit.arff') arff.shuffle() test = arff.create_subset_arff(slice(arff.instance_count // 4)) train = arff.create_subset_arff(slice(arff.instance_count // 4, None)) train.normalize() test.normalize() krange = np.arange(1, 16, 2) accs = [] for k in krange: knn = KNN(k, weighting=True, vdm=True) predictions = knn.knn(train.get_features(), train.get_labels(), test.get_features()) acc = predictions == np.ravel(test.get_labels().data) print("k:", k, "accuracy:", sum(acc) / len(acc)) accs.append(sum(acc) / len(acc)) plt.plot(krange, accs) plt.title("K Size Versus Accuracy on Credit Approval (Weighted)") plt.xlabel("K") plt.ylabel("Accuracy") plt.show()
def prob5(): arff = Arff(sys.argv[2]) imp_atts = [1, 3, 4, 5, 7, 9, 11, 12, 13] arff.shuffle() n = len(arff.get_labels().data) t = int(n * .55) v = n - int(n * .20) train_set = arff.create_subset_arff(row_idx=slice(0, t, 1), col_idx=imp_atts) test_set = arff.create_subset_arff(row_idx=slice(t, v, 1), col_idx=imp_atts) validation_set = arff.create_subset_arff(row_idx=slice(v, n, 1), col_idx=imp_atts) epochs = [] momentums = np.linspace(0, 1.5, 20) # momentums = [.5, 1] for momentum in momentums: print(momentum) nn = NeuralNetwork(8, [30], 11, LR=.1, momentum=momentum) all_acc_va, all_mse_va, all_mse_te, all_mse_tr = nn.train_set( train_set, test_set, validation_set, w=5) epochs.append(len(all_acc_va)) plt.plot(momentums, epochs) plt.title("Vowel Momentum vs Epoch Convergence") plt.xlabel("Momentum") plt.ylabel("Epochs til Conv.") plt.show()
def prob_5(): arff = Arff('datasets/cars.arff') arff.shuffle() test = arff.create_subset_arff(slice(arff.instance_count//10)) training = arff.create_subset_arff(slice(arff.instance_count//10,None)) tf = test.get_features() tl = test.get_labels() splits = k_fold_cv(arff) arff = arff.create_subset_arff(slice(arff.instance_count//4,None)) d = DecisionTreeLearner() d.train(arff.get_features(), arff.get_labels()) a = d.tree arff = Arff('datasets/voting.arff') arff.shuffle() arff = arff.create_subset_arff(slice(arff.instance_count//4,None)) d = DecisionTreeLearner() d.train(arff.get_features(), arff.get_labels()) b = d.tree return a, b
def prob_1(): arff = Arff('datasets/lenses.arff') acc = [] for _ in range(10): arff.shuffle() testing = arff.create_subset_arff(slice(arff.instance_count//5)) training = arff.create_subset_arff(slice(arff.instance_count//5, None)) d = DecisionTreeLearner() features = training.get_features() labels = training.get_labels() t_feat = testing.get_features() t_labels = testing.get_labels() d.train(features, labels) accuracy = d.get_accuracy(t_feat, t_labels) print(accuracy) acc.append(accuracy) print(sum(acc)/len(acc))
def prob_4(lr): # read in vowels dataset arff = Arff('datasets/vowels.arff') # Leave out the test/train and person features, which are unceccessary. arff = arff.create_subset_arff(col_idx=slice(2, None), label_count=1) # Get a 75/25 split arff.shuffle() training = arff.create_subset_arff(slice(arff.instance_count // 4)) test = arff.create_subset_arff(slice(arff.instance_count // 4, -1)) t_features = test.get_features() t_labels = test.get_labels() # Get a 15% Validation set validation = training.create_subset_arff(slice(arff.instance_count // 5)) training = training.create_subset_arff( slice(arff.instance_count // 5, None)) v_features = validation.get_features() v_labels = validation.get_labels() domain = 2**np.arange(0, 8) training_mse = [] validation_mse = [] test_mse = [] for nodes in domain: mse = 0 vmse = 0 tmse = 0 for _ in range(100): learner = MultilayerPerceptronLearner( [training.features_count, nodes, 11], momentum=0) # learner.zero_weights() learner.lr = lr learner.max_epoch = 500 learner.train(training.get_features(), training.get_labels(), validation.get_features(), validation.get_labels()) tmse += learner.get_mse(test.get_features(), test.get_labels()) mse += learner.get_mse(training.get_features(), training.get_labels()) vmse += learner.get_mse(validation.get_features(), validation.get_labels()) training_mse.append(mse / 100) validation_mse.append(vmse / 100) test_mse.append(tmse / 100) plt.semilogx(domain, test_mse, basex=2, label="Test Set MSE") plt.semilogx(domain, training_mse, basex=2, label="Training Set MSE") plt.semilogx(domain, validation_mse, basex=2, label="Validation Set MSE") plt.title("MSE vs Number of Hidden Nodes") plt.xlabel("Number of Hidden Nodes") plt.ylabel("Mean Squared Error") plt.legend() plt.show()
def prob_6_b(): # read in vowels dataset arff = Arff('datasets/vowels.arff') # Leave out the test/train and person features, which are unceccessary. arff = arff.create_subset_arff(col_idx=slice(2, None), label_count=1) # Get a 75/25 split arff.shuffle() training = arff.create_subset_arff(slice(arff.instance_count // 4)) test = arff.create_subset_arff(slice(arff.instance_count // 4, -1)) t_features = test.get_features() t_labels = test.get_labels() # Get a 15% Validation set validation = training.create_subset_arff(slice(arff.instance_count // 5)) training = training.create_subset_arff( slice(arff.instance_count // 5, None)) v_features = validation.get_features() v_labels = validation.get_labels() features = training.get_features() labels = training.get_labels() taccuracy = [] accuracy = [] domain = 2**np.arange(1, 7) for i in domain: tacc = 0 acc = 0 for _ in range(3): learner = MultilayerPerceptronLearner([training.features_count] + [i] * (32 // i) + [11], momentum=.85) # learner.zero_weights() learner.lr = .1 learner.max_epoch = 500 learner.train(training.get_features(), training.get_labels(), validation.get_features(), validation.get_labels()) tacc += learner.get_accuracy(t_features, t_labels) acc += learner.get_accuracy(features, labels) accuracy.append(acc / 3) taccuracy.append(tacc / 3) plt.semilogx(domain, accuracy, basex=2, label="Training Set Accuracy") plt.semilogx(domain, taccuracy, basex=2, label="Test Set Accuracy") plt.title("Node Distribution vs Accuracy") plt.xlabel("Number of Nodes per Hidden Layer") plt.ylabel("Accuracy") plt.legend() plt.show()
def prob0haccomplete(): arff = Arff('datasets/labor.arff', label_count=1) # Trim the id column arff = arff.create_subset_arff(col_idx=slice(1, None)) arff = arff.get_features() hac = HAC(simple=False) hac.train(arff, verbose=True, printk=[5])
def prob0(): arff = Arff('datasets/labor.arff', label_count=1) # Trim the id column arff = arff.create_subset_arff(col_idx=slice(1, None)) arff = arff.get_features() km = KMeans(5) km.train(arff, verbose=True, centers=arff.data[:5])
def prob3(): """ """ arff = Arff(sys.argv[2]) imp_atts = [1, 3, 4, 5, 7, 9, 11, 12, 13] arff.shuffle() n = len(arff.get_labels().data) t = int(n * .55) v = n - int(n * .20) train_set = arff.create_subset_arff(row_idx=slice(0, t, 1), col_idx=imp_atts) test_set = arff.create_subset_arff(row_idx=slice(t, v, 1), col_idx=imp_atts) validation_set = arff.create_subset_arff(row_idx=slice(v, n, 1), col_idx=imp_atts) best_mse_te = [] best_mse_tr = [] best_mse_va = [] epochs = [] LRS = [.01, .1, .5, .8, 1.5] for LR in LRS: # print(LR) nn = NeuralNetwork(8, [16], 11, LR=LR, momentum=0) all_acc_va, all_mse_va, all_mse_te, all_mse_tr = nn.train_set( train_set, test_set, validation_set, w=5) best_mse_te.append(min(all_mse_te)) best_mse_tr.append(min(all_mse_tr)) best_mse_va.append(min(all_mse_va)) epochs.append(len(all_mse_va)) plt.plot(LRS, best_mse_te, label="MSE Te") plt.plot(LRS, best_mse_tr, label="MSE Tr") plt.plot(LRS, best_mse_va, label="MSE V.A") plt.title("Vowel MSE vs Learning Rate") plt.xlabel("Learning Rate") plt.ylabel("MSE") plt.legend() plt.show() plt.plot(LRS, epochs) plt.title("Vowel Epochs vs Learning Rate") plt.xlabel("Learning Rate") plt.ylabel("Epochs") plt.legend() plt.show()
def prob_5(lr, hidden): # read in vowels dataset arff = Arff('datasets/vowels.arff') # Leave out the test/train and person features, which are unceccessary. arff = arff.create_subset_arff(col_idx=slice(2, None), label_count=1) # Get a 75/25 split arff.shuffle() training = arff.create_subset_arff(slice(arff.instance_count // 4)) test = arff.create_subset_arff(slice(arff.instance_count // 4, -1)) t_features = test.get_features() t_labels = test.get_labels() # Get a 15% Validation set validation = training.create_subset_arff(slice(arff.instance_count // 5)) training = training.create_subset_arff( slice(arff.instance_count // 5, None)) v_features = validation.get_features() v_labels = validation.get_labels() epochs = [] accuracy = [] domain = np.linspace(0, 1, 20) for momentum in domain: e = 0 acc = 0 for _ in range(10): learner = MultilayerPerceptronLearner( [training.features_count, hidden, 11], momentum=momentum) # learner.zero_weights() learner.lr = lr learner.max_epoch = 500 learner.train(training.get_features(), training.get_labels(), validation.get_features(), validation.get_labels()) acc += learner.get_accuracy(t_features, t_labels) e += learner.epochs epochs.append(e / 10) accuracy.append(acc / 10) print(accuracy) plt.plot(domain, epochs) plt.title("Number of Training Epochs vs Momentum") plt.xlabel("Momentum Constant") plt.ylabel("Number of Training Epochs") plt.show()
def setup(): arff = Arff('datasets/labor.arff', label_count=1) # Trim the id column arff = arff.create_subset_arff(col_idx=slice(1, None)) arff = arff.get_features() hac = HAC() hac.nominal_indicies = np.where(np.array(arff.attr_types) == 'nominal')[0] print('33,44', hac.get_distance(arff.data[33], arff.data[44])) print('25,34', hac.get_distance(arff.data[25], arff.data[34]))
def prob_5(): cont_mask = [1, 2, 7, 10, 13, 14, 16] cate_mask = [0, 3, 4, 5, 6, 8, 9, 11, 12, 15] arff = Arff("credit_approval_data.arff") arff.shuffle() arff.normalize() n = len(arff.get_labels().data) t = int(n * .7) train_data = arff.create_subset_arff(row_idx=slice(0, t, 1)) test_data = arff.create_subset_arff(row_idx=slice(t, n, 1)) test_data = np.hstack((test_data.get_features().data, test_data.get_labels().data)) train_data = np.hstack((train_data.get_features().data, train_data.get_labels().data)) #b,30.83,0,u,g,w,v,1.25,t,t,01,f,g,00202,0,+ dist_matrix = np.ones((16, 16)) np.fill_diagonal(dist_matrix, 0) KNNC = KNNClassifier(8, train_data, test_data) print(KNNC.get_accuracy_mixed(cate_mask, cont_mask, dist_matrix))
def prob_2(): # Get arff from iris database iris = Arff('datasets/iris.arff') # Get a random 75/25 split iris.shuffle() validation = iris.create_subset_arff(slice(iris.instance_count // 4)) v_features = validation.get_features() v_labels = validation.get_labels() training = iris.create_subset_arff(slice(iris.instance_count // 4, None)) features = training.get_features() labels = training.get_labels() # Make the learner # shape = [4 8 3] learner = MultilayerPerceptronLearner([4, 8, 3], momentum=0) training_mse = [] validation_mse = [] classification_accuracy = [] for epoch in range(learner.max_epoch): learner.train_one_epoch(features, labels) training_mse.append(learner.get_mse(features, labels)) validation_mse.append(learner.get_mse(v_features, v_labels)) classification_accuracy.append( learner.get_accuracy(v_features, v_labels)) if learner.check_for_convergence(v_features, v_labels): break fig, (ax1, ax2) = plt.subplots(nrows=2, sharex=True) ax1.plot(training_mse, label="Training Data MSE") ax1.plot(validation_mse, label="Validation Data MSE") ax1.legend() ax2.plot(classification_accuracy, label="Classification Accuracy") plt.legend() plt.show()
def prob2(): arff = Arff(sys.argv[1]) arff.shuffle() n = len(arff.get_labels().data) t = int(n * .55) v = n - int(n * .20) train_set = arff.create_subset_arff(row_idx=slice(0, t, 1)) test_set = arff.create_subset_arff(row_idx=slice(t, v, 1)) validation_set = arff.create_subset_arff(row_idx=slice(v, n, 1)) nn = NeuralNetwork(4, [9], 3, LR=.1) all_acc_va, all_mse_va, all_mse_te, all_mse_tr = nn.train_set( train_set, test_set, validation_set) d = [x for x in range(len(all_acc_va))] plt.plot(d, all_mse_te, label="test MSE") plt.plot(d, all_mse_va, label="Val. MSE") plt.plot(d, all_acc_va, label="Val. Accuracy") plt.title("Iris Dataset") plt.xlabel("Epochs") plt.ylabel("%") plt.legend() plt.show()
def prob4(): arff = Arff(sys.argv[2]) imp_atts = [1, 3, 4, 5, 7, 9, 11, 12, 13] arff.shuffle() n = len(arff.get_labels().data) t = int(n * .55) v = n - int(n * .20) train_set = arff.create_subset_arff(row_idx=slice(0, t, 1), col_idx=imp_atts) test_set = arff.create_subset_arff(row_idx=slice(t, v, 1), col_idx=imp_atts) validation_set = arff.create_subset_arff(row_idx=slice(v, n, 1), col_idx=imp_atts) best_mse_te = [] best_mse_tr = [] best_mse_va = [] hidden_nodes = [1, 3, 6, 10, 13, 15, 16, 18, 20, 22, 25, 30, 40] for nodes in hidden_nodes: # print(nodes) nn = NeuralNetwork(8, [nodes], 11, LR=.1, momentum=0) all_acc_va, all_mse_va, all_mse_te, all_mse_tr = nn.train_set( train_set, test_set, validation_set, w=5) best_mse_te.append(min(all_mse_te)) best_mse_tr.append(min(all_mse_tr)) best_mse_va.append(min(all_mse_va)) plt.plot(hidden_nodes, best_mse_te, label="MSE Te") plt.plot(hidden_nodes, best_mse_tr, label="MSE Tr") plt.plot(hidden_nodes, best_mse_va, label="MSE V.A") plt.title("Vowel MSE vs Hidden Nodes") plt.xlabel("Hidden Nodes") plt.ylabel("MSE") plt.legend() plt.show()
def prob_3(): # read in vowels dataset arff = Arff('datasets/vowels.arff') # Leave out the test/train and person features, which are unceccessary. arff = arff.create_subset_arff(col_idx=slice(2, None), label_count=1) # Get a 75/25 split arff.shuffle() training = arff.create_subset_arff(slice(arff.instance_count // 4)) test = arff.create_subset_arff(slice(arff.instance_count // 4, -1)) t_features = test.get_features() t_labels = test.get_labels() # Get a 15% Validation set validation = training.create_subset_arff(slice(arff.instance_count // 5)) training = training.create_subset_arff( slice(arff.instance_count // 5, None)) v_features = validation.get_features() v_labels = validation.get_labels() training_mse = [] validation_mse = [] test_mse = [] epochs = [] domain = np.logspace(-3, 0) for lr in domain: mse = 0 vmse = 0 tmse = 0 e = 0 for _ in range(3): learner = MultilayerPerceptronLearner( [training.features_count, 2 * training.features_count, 11], momentum=0) learner.zero_weights() learner.lr = lr learner.max_epoch = 500 learner.train(training.get_features(), training.get_labels(), validation.get_features(), validation.get_labels()) e += learner.epochs tmse += learner.get_mse(test.get_features(), test.get_labels()) mse += learner.get_mse(training.get_features(), training.get_labels()) vmse += learner.get_mse(validation.get_features(), validation.get_labels()) epochs.append(e / 3) training_mse.append(mse / 3) validation_mse.append(vmse / 3) test_mse.append(tmse / 3) plt.semilogx(domain, test_mse, label="Test Set MSE") plt.semilogx(domain, training_mse, label="Training Set MSE") plt.semilogx(domain, validation_mse, label="Validation Set MSE") plt.title("MSE vs Learning Rate") plt.xlabel("Learning Rate") plt.ylabel("Mean Squared Error") plt.legend() plt.show() plt.semilogx(domain, epochs) plt.title("Number of Training Epochs vs Learning Rate") plt.xlabel("Learning Rate") plt.ylabel("Number of Training Epochs") plt.show()