def main(): train_images = mnist.train_images() train_images = train_images.reshape( (train_images.shape[0], train_images.shape[1] * train_images.shape[2])) train_labels = mnist.train_labels() test_images = mnist.test_images() test_images = test_images.reshape( (test_images.shape[0], test_images.shape[1] * test_images.shape[2])) test_labels = mnist.test_labels() iters = 1 accuracy = np.zeros((iters, 1)) models = [] for i in range(iters): train_ids = np.random.choice(len(train_labels), 5000) X_train = train_images[train_ids] Y_train = train_labels[train_ids] Y_train = Y_train.reshape(Y_train.shape[0], 1) test_ids = np.random.choice(len(test_labels), 100) X_test = test_images[test_ids] Y_test = test_labels[test_ids] Y_test = Y_test.reshape(Y_test.shape[0], 1) mlm = MLM() mlm.train(X_train, Y_train, int(round(len(X_train) * 0.5))) Y_hat = mlm.predict(X_test) accuracy[i] = np.sum(np.where(Y_hat == Y_test, 1, 0)) / len(Y_test) models.append([mlm, X_test, Y_test, Y_hat]) mlm, X_test, Y_test, Y_hat = models[(np.abs(accuracy - np.mean(accuracy))).argmin()] conf_matrix = confusion_matrix(Y_test, Y_hat) print("Confusion Matrix", conf_matrix) print(np.sum(np.where(Y_hat == Y_test, 1, 0)) / len(Y_test)) labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title("Confusion Matrix") fig.colorbar(cax) ax.set_xticklabels([""] + labels) ax.set_yticklabels([""] + labels) plt.xlabel("Predicted") plt.ylabel("Desired") plt.show()
def main(): dataset = np.genfromtxt('breast-cancer-wisconsin.data', delimiter=',') dataset = dataset[~np.isnan(dataset).any(axis=1), 1:11] dataset[:, 9] = dataset[:, 9] / 2 - 1 train, test = train_test_split(dataset, test_size=0.33) X_train = train[:, 0:9] Y_train = train[:, 9] X_test = test[:, 0:9] Y_test = test[:, 9] Y_train = Y_train.reshape((X_train.shape[0], 1)) Y_test = Y_test.reshape((X_test.shape[0], 1)) learning_rate = 0.00001 training_iters = 10 batch_size = 20 mgdmlm = MGDMLM(learning_rate=learning_rate, training_iters=training_iters, batch_size=batch_size) cost = mgdmlm.train(X_train, Y_train, int(round(len(X_train) * 0.5))) Y_hat = mgdmlm.predict(test[:, 0:9]) plt.plot(range(training_iters), cost) plt.show() conf_matrix = confusion_matrix(Y_test, Y_hat) print(conf_matrix) print(1 - np.mean(abs(Y_test - Y_hat))) labels = ['Benigno', 'Maligno'] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title('Matriz de Confusão do Classificador') fig.colorbar(cax) ax.set_xticklabels([''] + labels) ax.set_yticklabels([''] + labels) plt.xlabel('Predito') plt.ylabel('Esperado') plt.show() mlm = MLM() mlm.train(X_train, Y_train, int(round(len(X_train) * 0.5))) Y_hat = mlm.predict(test[:, 0:9]) conf_matrix = confusion_matrix(Y_test, Y_hat) print(conf_matrix) print(1 - np.mean(abs(Y_test - Y_hat)))
def main(): train_images, train_labels_coarse, train_labels_fine, \ test_images, test_labels_coarse, test_labels_fine = cifar100(path="./data/cifar100") train_images = train_images.reshape((train_images.shape[0], -1)) test_images = test_images.reshape((test_images.shape[0], -1)) train_labels_coarse = np.argmax(train_labels_coarse, axis=1) train_labels_fine = np.argmax(train_labels_fine, axis=1) test_labels_coarse = np.argmax(test_labels_coarse, axis=1) test_labels_fine = np.argmax(test_labels_fine, axis=1) train_labels = np.stack((train_labels_coarse, train_labels_fine), axis=1) test_labels = np.stack((test_labels_coarse, test_labels_fine), axis=1) iters = 1 accuracy = np.zeros((iters, 1)) models = [] for i in range(iters): train_ids = np.random.choice(len(train_labels), 5000) X_train = train_images[train_ids] Y_train = train_labels[train_ids] Y_train = Y_train.reshape(Y_train.shape[0], -1) test_ids = np.random.choice(len(test_labels), 1000) X_test = test_images[test_ids] Y_test = test_labels[test_ids] Y_test = Y_test.reshape(Y_test.shape[0], -1) mlm = MLM() print("Training") #mlm.train(X_train, Y_train, 10000, 'kmedoids') mlm.train(X_train, Y_train, 500, 'rand') print("Testing") Y_hat = mlm.predict(X_test) accuracy[i] = np.sum(np.where(Y_hat == Y_test, 1, 0)) / len(Y_test) models.append([mlm, X_test, Y_test, Y_hat]) mlm, X_test, Y_test, Y_hat = models[(np.abs(accuracy - np.mean(accuracy))).argmin()] print(np.sum(np.where(Y_hat == Y_test, 1, 0)) / len(Y_test)) conf_matrix_super_class = confusion_matrix(Y_test[:, 0], Y_hat[:, 0]) print("Confusion Matrix", conf_matrix_super_class) conf_matrix_class = confusion_matrix(Y_test[:, 1], Y_hat[:, 1]) print("Confusion Matrix", conf_matrix_class)
def main(): train_images, train_labels, test_images, test_labels = cifar10_web.cifar10(path=None) train_images = train_images.reshape((train_images.shape[0], -1)) test_images = test_images.reshape((test_images.shape[0], -1)) train_labels = np.argmax(train_labels, axis=1) test_labels = np.argmax(test_labels, axis=1) iters = 1 accuracy = np.zeros((iters, 1)) models = [] for i in range(iters): train_ids = np.random.choice(len(train_labels), 5000) X_train = train_images[train_ids] Y_train = train_labels[train_ids] Y_train = Y_train.reshape(Y_train.shape[0], 1) test_ids = np.random.choice(len(test_labels), 100) X_test = test_images[test_ids] Y_test = test_labels[test_ids] Y_test = Y_test.reshape(Y_test.shape[0], 1) mlm = MLM() mlm.train(X_train, Y_train, int(round(len(X_train)*0.8))) Y_hat = mlm.predict(X_test) accuracy[i] = np.sum(np.where(Y_hat == Y_test, 1, 0))/len(Y_test) models.append([mlm, X_test, Y_test, Y_hat]) mlm, X_test, Y_test, Y_hat = models[(np.abs(accuracy - np.mean(accuracy))).argmin()] conf_matrix = confusion_matrix(Y_test, Y_hat) print("Confusion Matrix", conf_matrix) print(np.sum(np.where(Y_hat == Y_test, 1, 0))/len(Y_test)) labels = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title("Confusion Matrix") fig.colorbar(cax) ax.set_xticklabels([""] + labels) ax.set_yticklabels([""] + labels) plt.xlabel("Predicted") plt.ylabel("Desired") plt.show()
Y_test = Y_test.reshape((X_test.shape[0], 1)) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) ''' learning_rate = 0.00001 training_iters = 20 sgdmlm = SGDMLM(learning_rate=learning_rate, training_iters=training_iters) cost = sgdmlm.train(X_train, Y_train, int(round(len(X_train)*0.4))) Y_hat = sgdmlm.predict(X_test) plt.plot(range(training_iters), cost) plt.show() mse = (np.square(Y_test - Y_hat)).mean(axis=0) print(mse) ''' mlm = MLM() mlm.train(X_train, Y_train, k=0.2) Y_hat = mlm.predict(X_test, method="nn") error = Y_test - Y_hat mse = np.square(error).mean(axis=0) print("MSE:", mse) print("RMSE:", mse**(1 / 2)) nmse = np.mean(np.square(error) / (np.mean(Y_test) * np.mean(Y_hat)), axis=0) print("NMSE: ", nmse)
print(conf_matrix) print(np.sum(np.where(Y_hat == Y_test, 1, 0))/len(Y_test)) labels = ["Setosa", "Versicolor", "Virginica"] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title("Matriz de Confusão do Classificador") fig.colorbar(cax) ax.set_xticklabels([""] + labels) ax.set_yticklabels([""] + labels) plt.xlabel("Predito") plt.ylabel("Esperado") plt.show() mlm = MLM() mlm.train(X_train, Y_train, len(X_train)) Y_hat = mlm.predict(X_test) conf_matrix = confusion_matrix(Y_test, Y_hat) print(conf_matrix) print(np.sum(np.where(Y_hat == Y_test, 1, 0))/len(Y_test)) labels = ["Setosa", "Versicolor", "Virginica"] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title("Matriz de Confusão do Classificador") fig.colorbar(cax) ax.set_xticklabels([""] + labels) ax.set_yticklabels([""] + labels)
from sklearn.utils.validation import check_X_y import matplotlib.pyplot as plt mydata = np.genfromtxt('/Users/sauloafoliveira/Dropbox/thesis_code/mcycle.csv', delimiter=",") X = mydata[:, :-1].reshape(-1, 1) y = mydata[:, -1].reshape(-1, 1) X, y = check_X_y(X, y, multi_output=True) scaler = StandardScaler().fit(X) X = scaler.transform(X) mlm1 = MLM(selector=KSSelection()) mlm1.fit(X, y) r = mlm1.score(X, y) mlm2 = MLM(selector=NLSelection()) mlm2.fit(X, y) s = mlm2.score(X, y) print(r, s) print(mlm1.sparsity(), mlm2.sparsity()) f, ax = plt.subplots(2, 2, sharey=True) from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR
print(conf_matrix) print(np.sum(np.where(Y_hat == Y_test, 1, 0)) / len(Y_test)) labels = ["Terá diabetes", "Não terá diabetes"] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title("Matriz de Confusão do Classificador") fig.colorbar(cax) ax.set_xticklabels([""] + labels) ax.set_yticklabels([""] + labels) plt.xlabel("Predito") plt.ylabel("Esperado") plt.show() mlm = MLM() mlm.train(X_train, Y_train, int(round(len(X_train) * 0.4))) Y_hat = mlm.predict(X_test) conf_matrix = confusion_matrix(Y_test, Y_hat) print(conf_matrix) print(np.sum(np.where(Y_hat == Y_test, 1, 0)) / len(Y_test)) labels = ["Terá diabetes", "Não terá diabetes"] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title("Matriz de Confusão do Classificador") fig.colorbar(cax) ax.set_xticklabels([""] + labels) ax.set_yticklabels([""] + labels)
def main(): train_images = mnist.train_images() train_images = train_images.reshape((train_images.shape[0], train_images.shape[1] * train_images.shape[2])) train_labels = mnist.train_labels() test_images = mnist.test_images() test_images = test_images.reshape((test_images.shape[0], test_images.shape[1] * test_images.shape[2])) test_labels = mnist.test_labels() train_ids = np.random.choice(len(train_labels), 5000) X_train = train_images[train_ids] Y_train = train_labels[train_ids] Y_train = Y_train.reshape(Y_train.shape[0], 1) test_ids = np.random.choice(len(test_labels), 100) X_test = test_images[test_ids] Y_test = test_labels[test_ids] Y_test = Y_test.reshape(Y_test.shape[0], 1) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) learning_rate = 0.000001 training_iters = 5 sgdmlm = SGDMLM(learning_rate=learning_rate, training_iters=training_iters) cost = sgdmlm.train(X_train, Y_train, int(round(len(X_train)*0.2))) Y_hat = sgdmlm.predict(X_test) print(cost[0]) print(cost[-1]) plt.plot(np.arange(0, training_iters), cost) plt.xlabel("Epoch #") plt.ylabel("Loss") plt.show() print(np.sum(np.where(Y_hat == Y_test, 1, 0))/len(Y_test)) conf_matrix = confusion_matrix(Y_test, Y_hat) #print("Confusion Matrix", conf_matrix) labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title("Confusion Matrix") fig.colorbar(cax) ax.set_xticklabels([""] + labels) ax.set_yticklabels([""] + labels) plt.xlabel("Predicted") plt.ylabel("Desired") plt.show() mlm = MLM() mlm.train(X_train, Y_train, int(round(len(X_train)*0.2))) Y_hat = mlm.predict(X_test) print(np.sum(np.where(Y_hat == Y_test, 1, 0))/len(Y_test)) conf_matrix = confusion_matrix(Y_test, Y_hat) #print("Confusion Matrix", conf_matrix) labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title("Confusion Matrix") fig.colorbar(cax) ax.set_xticklabels([""] + labels) ax.set_yticklabels([""] + labels) plt.xlabel("Predicted") plt.ylabel("Desired") plt.show()
print(conf_matrix) print(np.sum(np.where(Y_hat == Y_test, 1, 0)) / len(Y_test)) labels = ["1", "2", "3"] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title("Matriz de Confusão do Classificador") fig.colorbar(cax) ax.set_xticklabels([""] + labels) ax.set_yticklabels([""] + labels) plt.xlabel("Predito") plt.ylabel("Esperado") plt.show() mlm = MLM() mlm.train(X_train, Y_train, len(X_train)) Y_hat = mlm.predict(X_test) conf_matrix = confusion_matrix(Y_test, Y_hat) print(conf_matrix) print(np.sum(np.where(Y_hat == Y_test, 1, 0)) / len(Y_test)) labels = ["1", "2", "3"] fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(conf_matrix) plt.title("Matriz de Confusão do Classificador") fig.colorbar(cax) ax.set_xticklabels([""] + labels) ax.set_yticklabels([""] + labels)
from sklearn import datasets from sklearn.preprocessing import StandardScaler from mlm import MinimalLearningMachine as MLM from mlm.selectors import KSSelection, NLSelection boston = datasets.load_boston() X = boston.data y = boston.target scaler = StandardScaler().fit(X) X = scaler.transform(X) mlm1 = MLM() mlm1.fit(X, y) mlm_r1 = (mlm1.score(X, y)) mlm2 = MLM(selector=KSSelection()) mlm2.fit(X, y) mlm_r2 = mlm2.score(X, y) mlm3 = MLM(selector=NLSelection()) mlm3.fit(X, y) mlm_r3 = mlm3.score(X, y) print(f'RN: R2 of {round(mlm_r1, 2)} with sparsity of {mlm1.sparsity()}') print(f'KS: R2 of {round(mlm_r2, 2)} with sparsity of {mlm2.sparsity()}') print(f'NL: R2 of {round(mlm_r3, 2)} with sparsity of {mlm3.sparsity()}')