valid[1][:n]), (test[0][:n], test[1][:n]) mean, std = train[0].mean(), train[0].std() train = standardize_dataset(train, mean, std) valid = standardize_dataset(valid, mean, std) test = standardize_dataset(test, mean, std) # for i in [1, 2, 5, 10, 20, 100]: for i in [20]: print(f"x-----> {i} hidden nodes <-----x") net = MLP(train[0], number_to_one_hoot(train[1]), i, outtype="softmax") # net.train(train[0], number_to_one_hoot(train[1]), 0.1, 1000) net.earlystopping_primitive(train[0], number_to_one_hoot(train[1]), valid[0], number_to_one_hoot(valid[1]), eta=0.1, niteration=100, early_stop_count=2) net.confmat(train[0], number_to_one_hoot(train[1])) net.confmat(test[0], number_to_one_hoot(test[1])) print() print() """ Confusion matrix: [[4827 0 15 6 13 16 23 5 15 21] [ 1 5541 22 12 7 8 9 12 37 8] [ 10 25 4700 60 15 5 13 38 24 18] [ 8 24 41 4780 2 81 2 18 51 31] [ 6 6 40 2 4659 25 13 34 12 71] [ 23 19 18 90 4 4260 46 7 40 27]
# Data normalization inputs = (inputs - inputs.mean(0)) inputs /= np.abs(inputs).max(0) print(np.abs(inputs).max(0)) # Targets to one hoot targets = number_to_one_hoot(targets, 3) train = (inputs[::2], targets[::2]) valid = (inputs[1::4], targets[1::4]) test = (inputs[3::4], targets[3::4]) accs = [] for i in range(10): net = MLP(train[0], train[1], 10) net.earlystopping_primitive(train[0], train[1], valid[0], valid[1], 0.1, 10, 2) # net.train(train[0], train[1], 0.1, 100000) print("Train") net.confmat(train[0], train[1]) print("Valid") net.confmat(valid[0], valid[1]) print("Test") acc = conf_mat_acc(net.confmat(test[0], test[1])) accs.append(acc) print(f"Mean accuracy: {np.mean(np.array(accs)) * 100:2.4f}") """ 5. Number of Instances: 150 (50 in each of three classes) 6. Number of Attributes: 4 numeric, predictive attributes and the class 7. Attribute Information: