def main(): (X, Y) = preProcessing_Data("./data/semeion.data") scaler = MinMaxScaler() X_norm = scaler.fit_transform(X) #Y_norm = scaler.fit_transform(Y) #rbfSize = 64 #kmeans = KMeans(n_clusters=rbfSize).fit(X_norm) #print(kmeans.cluster_centers_) #print(np.shape(kmeans.cluster_centers_)) #rbf = RBF(X_norm, Y, kmeans.cluster_centers_, hL_size=rbfSize, oL_size=10) #rbf.learningPhase_2() kf = KFold(n_splits=5) fold = 1 for train_index, test_index in kf.split(X_norm): X_train, X_test = X[train_index], X[test_index] Y_train, Y_test = Y[train_index], Y[test_index] rbfSize = 16 print(fold, "º Fold") while (rbfSize <= 72): kmeans = KMeans(n_clusters=rbfSize).fit(X_train) rbf = RBF(X_train, Y_train, kmeans.cluster_centers_, hL_size=rbfSize, oL_size=10) rbf.learningPhase_2() Y_pred = [] for i in X_test: Y_pred.append(rbf.predict(i)) score = accuracy_score(Y_test, np.array(Y_pred)) print("\tRBF Size: ", rbfSize, "| Score: ", score) rbfSize += 16 fold += 1 print("=======================")
def test_XOR(self): X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) T = np.array([[0], [1], [1], [0]]) rbf = RBF(centers=X) # centers are data itself rbf.fit(X, T) prediction = rbf.predict(X) self.assertTrue(np.all( (prediction > 0.5) == T))
def test_sin(self): n = 10000 X = np.random.rand(n).reshape(-1,1) noise = 0.3 T = 0.5*np.sin(4*np.pi*X) + 0.5 + np.random.normal(size = n, scale = noise).reshape(-1,1) rbf = RBF(n_centers=20, activation='gaussian', sigma = 0.05) rbf.fit(X,T) Tp = rbf.predict(X) error = RMSE(Tp, T) # Xp = np.linspace(0,1,1000).reshape(-1,1) # Tp = rbf.predict(Xp) # plt.scatter(X,T) # plt.plot(Xp,Tp, c = 'y') # plt.show() epsilon = 0.005 self.assertTrue(error < noise + epsilon)
def test_reg(self): # sinusoidal function def f(x): return 0.5*np.sin(4*np.pi*x) + 0.5 # train on data noisily following f n = 80 X = np.random.rand(n).reshape(-1,1) noise = 0.05 T = f(X) + np.random.normal(size = n, scale = noise).reshape(-1,1) rbf = RBF(n_centers=20, activation='gaussian', sigma = 0.05, lambdaReg=20.) rbf.fit(X,T) xl = np.linspace(0,1,1000).reshape(-1,1) yl = rbf.predict(xl) # plt.scatter(X, T) # training data # plt.plot(xl, f(xl)) # true curve # plt.plot(xl,yl) # learned curve # plt.show() epsilon = 0.01 true_error = RMSE(yl, f(xl)) self.assertLess(true_error, noise + epsilon)
def test_sin_redundancy(self): n = 1000 X1 = np.random.rand(n).reshape(-1,1) X2 = np.random.rand(n).reshape(-1,1) # redundant dimension X = np.concatenate([X1, X2], axis = 1) noise = 0.05 T = 0.5*np.sin(4*np.pi*X1) + 0.5 + np.random.normal(size = n, scale = noise).reshape(-1,1) # rbf train rbf = RBF(n_centers=150, activation='gaussian', sigma = 0.3, lambdaReg=1e-6) rbf.fit(X,T) # predict Tp = rbf.predict(X) error = RMSE(Tp, T) # Xp1 = np.linspace(0,1,1000).reshape(-1,1) # Xp2 = np.random.rand(1000).reshape(-1,1) # random 2nd co-ordinate # Xp = np.concatenate([Xp1,Xp2], axis = 1) # Tp = rbf.predict(Xp) # plt.scatter(X1,T) # plt.plot(Xp1.reshape(-1,1) ,Tp, c = 'y') # plt.show() epsilon = 0.01 self.assertTrue(error < noise + epsilon)
for row in r: if row: test.append(row) c = list(zip(input, target)) shuffle(c) input = [x[0] for x in c] target = [x[1] for x in c] input = np.array(input, np.float64) target = np.array(target, np.float64) test = np.transpose(np.array(test, np.float64)) print(len(input)) net = RBF(len(input), args.n, args.sigma) net.train(input, target, args.reg) x_list = [x[0] for x in oinput] y_list = [net.predict(x) for x in oinput] yo_list = [x for x in otarget] f = lambda x: 233.846 * (1 - np.exp(-0.00604 * x)) yf_list = [f(x) for x in x_list] error = sum([(y_list[i] - yo_list[i])**2 for i in range(len(y_list))]) / len(y_list) print("Error: ", error) plt.plot(x_list, y_list, label='Predicción', c='b') plt.plot(x_list, yo_list, label='Datos', c='r') plt.plot(x_list, yf_list, label='Modelo proporcionado', c='y') plt.scatter(test[0], test[1], label='Conjunto de prueba', c='g', marker='.') plt.xlabel("Edad") plt.ylabel("Peso") plt.legend(loc='upper left') #plt.savefig("graphs/rabbit_"+str(args.n)+"_neurons_"+str(args.eta)+".png",bbox_inches='tight') plt.show()
import numpy as np import matplotlib.pyplot as plt from rbf import RBF # создание тестовых данных x = np.linspace(0, 10, 100) y = np.sin(x) # предсказание с помощью RBF-сети model = RBF(hidden_shape=10, sigma=1.) model.fit(x, y) y_pred = model.predict(x) # отображение на графие plt.plot(x, y, 'b-', label='тест') plt.plot(x, y_pred, 'r-', label='RBF') plt.legend(loc='upper right') plt.title('Интерполяция при использовании RBF-сети') plt.show()
hit_rates = [] no_of_attributes = dataset.shape[1] - 1 no_of_classes = len(dataset[0, no_of_attributes]) # insert bias no_rows = dataset.shape[0] dataset = np.c_[-1 * np.ones(no_rows), dataset] # perceptron = Perceptron(no_of_classes, no_of_attributes, 5, 'logistic') for j in range(0, 20): print("realization %d" % j) train_X, train_y, test_X, test_y = Classifier.train_test_split(dataset) train_X = np.array(train_X, dtype=float) test_X = np.array(test_X, dtype=float) sigma, center = RBF.model_training(no_of_classes, no_of_attributes, train_X, train_y) rbf = RBF(no_of_classes, no_of_attributes, center, sigma) rbf.train(train_X, train_y) predictions = rbf.predict(test_X) hit_rates.append(rbf.evaluate(test_y, predictions)) print(rbf.confusion_matrix(test_y, predictions)) # rbf.plot_decision_boundaries(train_X, train_y, test_X, test_y, rbf, j) print('hit rates: {}'.format(hit_rates)) print('accuracy: {}'.format(np.mean(hit_rates))) print('std: {}'.format(np.std(hit_rates))) # RBF.show_plot_decision_boundaries()
import numpy as np import matplotlib.pyplot as plt from rbf import RBF # Gerando 100 dados aleatórios X = np.random.uniform(0., 1., 100) X = np.sort(X, axis=0) # Gerando a saída baseado nos dados aleatórios # Seno de (2 * PI * X) + noise noise = np.random.uniform(-0.1, 0.1, 100) Y = np.sin(2 * np.pi * X) + noise # Criando e treinando a rede learning_rate = 0.01 # Taxa de aprendizado epochs = 100 # Epocas k = 2 # Número de neurônios rbf = RBF(k=k, learning_rate=learning_rate, epochs=epochs) rbf.train(X, Y) # Previsão predictions = rbf.predict(X) # Ploantdo dados de treinamento vs previsão plt.plot(X, Y, '-o', label='train') plt.plot(X, predictions, '-o', label='predict') plt.legend() plt.tight_layout() plt.savefig('train-and-predict.png')
ys = h(xs) noise = np.random.normal(0, 1, ys.shape) ys_noise = ys + noise input = [[x] for x in xs] shuffle(input) input = np.array(input, np.float64) ns = [10, 20, 25, 50, 60, 70, 80] sigmas = [0.1, 0.25, 0.5, 0.75, 1, 100, 200, 300, 400, 500] regs = [0, 1, 0.5, 0.1, 0.01, 0.001] for n in ns: for sigma in sigmas: for reg in regs: net = RBF(len(input), n, sigma) net.train(input, ys_noise, reg) approx = [net.predict(x) for x in xs] plt.plot(xs, ys, label='Función', c='b') plt.plot(xs, ys_noise, label='Función con ruido', c='g') plt.plot(xs, approx, label='Aproximación', c='r') plt.legend(loc='upper left') error = sum([(ys_noise[i] - approx[i])**2 for i in range(len(ys_noise))]) / len(ys_noise) print("Centros: {} | Sigma: {} | Lambda: {} | Error: {}".format( n, sigma, reg, error)) plt.savefig("graphs/centers_" + str(n) + "_sigma_" + str(sigma) + "_lambda_" + str(reg) + ".png", bbox_inches='tight') plt.clf()