def plot_RBF_grid_search(data, weights=[0.6, 0.8, 1, 1.2], n_nodes=[10, 15, 20, 25], learning_mode=LearningMode.BATCH, centers_sampling=CentersSampling.LINEAR): results = {} for n in n_nodes: for w in weights: rbf_net = RBF(centers_sampling, n_nodes=n, n_inter=n, sigma=w) if learning_mode == LearningMode.BATCH: y_hat, error = rbf_net.batch_learning(data.x, data.y, data.x_test, data.y_test) else: y_hat, error = rbf_net.delta_learning(data.x, data.y, data.x_test, data.y_test, max_iters=20, lr=0.001) results[(rbf_net.n_nodes, w)] = error keys = np.array(list(results.keys())) plt.scatter(keys[:, 0], keys[:, 1], c=list(results.values()), cmap='tab20b', s=200) plt.xlabel('units') plt.ylabel('width') plt.title('Absolute residual error for different RBF configurations') plt.colorbar() plt.show() return results
class IterRBF(mathtools.IterSurface): def __init__(self, Rn0=0, stopR=0.05, coatingstep=0.003, points=[]): mathtools.IterSurface.__init__(self, Rn0, stopR, coatingstep) self.points = points self.rbf = RBF('r3', self.points) self.rbf.make() def update(self): self.rbf._points[:, 0: 3] = self.rbf._points[:, 0: 3] - self.coatingstep * self.rbf._points[:, 3: 6] return def f(self, p): return self.rbf.f(p) def df(self, p): return self.rbf.df(p) def f_array(self, p): result = [] for pi in p: result.append(self.f(pi)) return result def find_iter(self, p0): n = abs(self.f(p0)) if n <= 1e-2: return for i in arange(self._Rn0, self.stopR, self.coatingstep): self.rbf._points[:, 0: 3] = self.rbf._points[:, 0: 3] - self.coatingstep * self.rbf._points[:, 3: 6] n = abs(self.f(p0)) if n <= 1e-2: self._Rn = i + self.coatingstep break else: raise ValueError('Outside rbf') return def criteria(self): return self._Rn < self.stopR def findnextparallel(self, p): self.find_iter(p) self.update() return def name(self): return 'IterRBF'
def main(): (X, Y) = preProcessing_Data("./data/semeion.data") scaler = MinMaxScaler() X_norm = scaler.fit_transform(X) #Y_norm = scaler.fit_transform(Y) #rbfSize = 64 #kmeans = KMeans(n_clusters=rbfSize).fit(X_norm) #print(kmeans.cluster_centers_) #print(np.shape(kmeans.cluster_centers_)) #rbf = RBF(X_norm, Y, kmeans.cluster_centers_, hL_size=rbfSize, oL_size=10) #rbf.learningPhase_2() kf = KFold(n_splits=5) fold = 1 for train_index, test_index in kf.split(X_norm): X_train, X_test = X[train_index], X[test_index] Y_train, Y_test = Y[train_index], Y[test_index] rbfSize = 16 print(fold, "º Fold") while (rbfSize <= 72): kmeans = KMeans(n_clusters=rbfSize).fit(X_train) rbf = RBF(X_train, Y_train, kmeans.cluster_centers_, hL_size=rbfSize, oL_size=10) rbf.learningPhase_2() Y_pred = [] for i in X_test: Y_pred.append(rbf.predict(i)) score = accuracy_score(Y_test, np.array(Y_pred)) print("\tRBF Size: ", rbfSize, "| Score: ", score) rbfSize += 16 fold += 1 print("=======================")
def test_XOR(self): X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) T = np.array([[0], [1], [1], [0]]) rbf = RBF(centers=X) # centers are data itself rbf.fit(X, T) prediction = rbf.predict(X) self.assertTrue(np.all( (prediction > 0.5) == T))
def plot_error_delta(data, n_nodes=10, lr=0.01, max_iters=100): rbf_net = RBF(CentersSampling.LINEAR, n_nodes=n_nodes) y_hat, error = rbf_net.delta_learning(data.x, data.y, data.x_test, data.y_test, max_iters=max_iters, lr=lr) print(f'Error on the test set: {error}') plt.plot(rbf_net.training_errors) plt.xlabel("Sample number") plt.ylabel('absolute residual error') plt.title(f'Train error for delta rule with {lr} lr') plt.show()
def test_sin(self): n = 10000 X = np.random.rand(n).reshape(-1,1) noise = 0.3 T = 0.5*np.sin(4*np.pi*X) + 0.5 + np.random.normal(size = n, scale = noise).reshape(-1,1) rbf = RBF(n_centers=20, activation='gaussian', sigma = 0.05) rbf.fit(X,T) Tp = rbf.predict(X) error = RMSE(Tp, T) # Xp = np.linspace(0,1,1000).reshape(-1,1) # Tp = rbf.predict(Xp) # plt.scatter(X,T) # plt.plot(Xp,Tp, c = 'y') # plt.show() epsilon = 0.005 self.assertTrue(error < noise + epsilon)
def experiment(data, learning_mode, centers_sampling, n_nodes=None, error=None, n=20, n_iter=3, weight=1.0, drop=2**9 - 1, sigma=1.0, neigh=1, max_iter=20, lr=0.1): rbf_net = RBF(centers_sampling, n_nodes=n, n_inter=n_iter, weight=weight, drop=drop, x=data.x, sigma=sigma) if learning_mode == LearningMode.BATCH: y_hat, err = rbf_net.batch_learning(data.x, data.y, data.x_test, data.y_test) elif learning_mode == LearningMode.DELTA: y_hat, err = rbf_net.delta_learning(data.x, data.y, data.x_test, data.y_test, lr=lr, max_iters=max_iter) else: y_hat, err = rbf_net.hybrid_learning(data.x, data.y, data.x_test, data.y_test, lr=lr, neigh=neigh, max_iters=max_iter) if n_nodes != None and error != None: n_nodes.append(rbf_net.n_nodes) error.append(err) return y_hat, err, rbf_net
def main(): ## generate data and define inputs mu = np.arange(0, 2 * math.pi, 0.1) sigma = 0.1 x_train = np.arange(0, 2 * math.pi, 0.1) x_test = np.arange(0.05, 2 * math.pi, 0.1) # #! DELTA RULE #! LEAST SQUARE ## init rbf class dim = mu.shape[0] rbf_LS = RBF(dim) ## Generate data sinus, square = rbf_LS.generateData(x_train) sinus_test, square_test = rbf_LS.generateData(x_test) ## Init and train. weights = rbf_LS.initWeights() weights, error = rbf_LS.train_DELTA(x_train, square, weights, mu, sigma) ## Evaluation print(rbf_LS) y_test = rbf_LS.evaluation_DELTA(x_test, weights, mu, sigma) # y_test=threshold(y_test) re = residualError(y_test, square_test) plt.figure('Least Square Error') plt.plot(x_test, y_test, label='Approximation') plt.plot(x_test, square_test, label='True value') plt.title('Least Square Error') plt.legend() plt.show()
def set_higher_order_params(self, params, order=ORDER): if ORTHOGONAL: if RBF_GAUSSIAN: self.second_order[0] = RBF(params[:order]) self.second_order[1] = RBF(params[order:order * 2]) self.second_order[2] = RBF(params[order * 2:]) else: self.second_order[0] = Polynomial(params[:order]) self.second_order[1] = Polynomial(params[order:order * 2]) self.second_order[2] = Polynomial(params[order * 2:]) else: count = 0 for i in range(order): for j in range(order - i): for k in range(order - i - j): for dim in range(3): self.second_order[dim][i, j, k] = params[-count] count += 1
def error_estimate_delta(data, n_nodes=20, sigma=0.5): errors = [] for seed in range(20): rbf_net = RBF(CentersSampling.LINEAR, seed=seed, n_nodes=n_nodes, sigma=sigma) y_hat, error = rbf_net.delta_learning(data.x, data.y, data.x_test, data.y_test, seed=seed, max_iters=100, lr=0.1) errors.append(error) print( f'Error for delta rule avg: {np.mean(errors)}, variance: {np.var(errors)}' )
def __init__(self): if ORTHOGONAL: if RBF_GAUSSIAN: self.second_order = [RBF([0])] * 3 else: self.second_order = [Polynomial([0])] * 3 else: self.second_order = [np.zeros((ORDER, ORDER, ORDER))] * 3 self.centre = np.zeros(3) self.transform = np.identity(3) self.gaussians = None
def test_reg(self): # sinusoidal function def f(x): return 0.5*np.sin(4*np.pi*x) + 0.5 # train on data noisily following f n = 80 X = np.random.rand(n).reshape(-1,1) noise = 0.05 T = f(X) + np.random.normal(size = n, scale = noise).reshape(-1,1) rbf = RBF(n_centers=20, activation='gaussian', sigma = 0.05, lambdaReg=20.) rbf.fit(X,T) xl = np.linspace(0,1,1000).reshape(-1,1) yl = rbf.predict(xl) # plt.scatter(X, T) # training data # plt.plot(xl, f(xl)) # true curve # plt.plot(xl,yl) # learned curve # plt.show() epsilon = 0.01 true_error = RMSE(yl, f(xl)) self.assertLess(true_error, noise + epsilon)
def load_model(self, xml_file): """ Method loads the models and the used modeling configurations. Args: xml_file: (str) xml file path. Examples: >>> load_model('xml_path.xml') """ path = dirname(dirname(dirname(xml_file))) try: xml = ET.parse(open(xml_file)) except IOError: raise IOError('Model could not be loaded. Call method BladeModeling::make_model') if xml.find('iter_surface').find('type').text != 'None': self.model_iter_surface = getattr(mathtools, xml.find('iter_surface').find('type').text)( float(xml.find('iter_surface').find('Rn').text), 0, 0) self.models_index = ast.literal_eval(xml.find('iter_surface').find('switching_parameters').text) else: self.model_iter_surface = None self.intersection_between_divisions = float(xml.find('intersection_between_divisions').text) self.number_of_points_per_model = int(xml.find('number_of_points_per_model').text) self.models = [] for xml_model in xml.findall('interpolation'): if xml_model.find('type').text == 'RBF': points = loadtxt(join(path, xml_model.find('points').text), delimiter=',') eps = float(xml_model.find('eps').text) kernel = xml_model.find('kernel').text gausse = None if xml_model.find("gauss_parameter") is not None: gausse = float(xml_model.find("gauss_parameter").text) model = RBF(kernel, points, eps, gausse) model._w = loadtxt(join(path, xml_model.find('w').text), delimiter=',') self.models.append(model) else: raise TypeError('This type of interpolation was not yet implemented') return
def plot_estimate(data, centers_sampling=CentersSampling.LINEAR, learning_type='batch', n_nodes=20, delta_max_iters=100, sigma=0.5, delta_lr=0.1, weight=1): rbf_net = RBF(centers_sampling, n_nodes=n_nodes, sigma=sigma, weight=weight) if learning_type == 'batch': y_hat, error = rbf_net.batch_learning(data.x, data.y, data.x_test, data.y_test) else: y_hat, error = rbf_net.delta_learning(data.x, data.y, data.x_test, data.y_test, max_iters=delta_max_iters, lr=delta_lr) centers, n_nodes = rbf_net.centers, rbf_net.n_nodes plt.plot(data.x_test, data.y_test, label="Target") plt.plot(data.x_test, y_hat, label="Estimate") plt.scatter(centers, [0] * n_nodes, c="r", label="RBF Centers") plt.xlabel("x") plt.ylabel("y") if sigma != 0.5: plt.title( f'{learning_type}, {n_nodes} units, {sigma} width, error= {round(error,5)}' ) else: plt.title( f'{learning_type} learning, {n_nodes} RBF units, error= {round(error,5)}' ) plt.legend() plt.grid(True) plt.show()
def test_sin_redundancy(self): n = 1000 X1 = np.random.rand(n).reshape(-1,1) X2 = np.random.rand(n).reshape(-1,1) # redundant dimension X = np.concatenate([X1, X2], axis = 1) noise = 0.05 T = 0.5*np.sin(4*np.pi*X1) + 0.5 + np.random.normal(size = n, scale = noise).reshape(-1,1) # rbf train rbf = RBF(n_centers=150, activation='gaussian', sigma = 0.3, lambdaReg=1e-6) rbf.fit(X,T) # predict Tp = rbf.predict(X) error = RMSE(Tp, T) # Xp1 = np.linspace(0,1,1000).reshape(-1,1) # Xp2 = np.random.rand(1000).reshape(-1,1) # random 2nd co-ordinate # Xp = np.concatenate([Xp1,Xp2], axis = 1) # Tp = rbf.predict(Xp) # plt.scatter(X1,T) # plt.plot(Xp1.reshape(-1,1) ,Tp, c = 'y') # plt.show() epsilon = 0.01 self.assertTrue(error < noise + epsilon)
def Train(): global MLPObj, PrepareObj, RBFObj, PCAObj, sc_x MLPObj = MLP() PrepareObj = Preparation() RBFObj = RBF() PCAObj = pca() x_train, y_train, x_test, y_test, Original_x_train, Original_x_test = PrepareObj.GetDataset( "C:\\Users\\Lenovo-PC\\Desktop\\neural-network-course\\Project\\Data set\\Training", "C:\\Users\\Lenovo-PC\\Desktop\\neural-network-course\\Project\\Data set\\Testing" ) if NNPCAVar.get(): PCAObj.LoadWeights() x_train = PCAObj.transform(Original_x_train) x_test = PCAObj.transform(Original_x_test) from sklearn.preprocessing import StandardScaler sc_x = StandardScaler() x_train = sc_x.fit_transform(x_train) x_test = sc_x.transform(x_test) if LoadTrainVar.get(): if AlgoVar.get(): MLPObj.TrainTheModel(Hidden_Entry.get(), epochs_Entry.get(), LearningRate_Entry.get(), Neurons_Entry.get(), Activation_Entry.get(), MSE_Entry.get(), var.get(), x_train, y_train, x_test, y_test) else: RBFObj.TrainTheModel_rbf(Neurons_Entry.get(), LearningRate_Entry.get(), MSE_Entry.get(), epochs_Entry.get(), 5, x_train, y_train, x_test, y_test) else: if AlgoVar.get(): MLPObj.LoadWeights(Hidden_Entry.get(), epochs_Entry.get(), LearningRate_Entry.get(), Neurons_Entry.get(), Activation_Entry.get(), MSE_Entry.get(), var.get(), x_train, y_train, x_test, y_test) else: RBFObj.LoadWeights(Neurons_Entry.get(), LearningRate_Entry.get(), MSE_Entry.get(), epochs_Entry.get(), 5, x_train, y_train, x_test, y_test)
def square_LS(x_test, x_train, mu, sigma): dim = mu.shape[0] rbf = RBF(dim) _, square = rbf.generateData(x_train, noise=True) _, square_test = rbf.generateData(x_test, noise=True) ## Init and train. weights = rbf.initWeights() weights, error = rbf.train_LS(x_train, square, weights, mu, sigma) ## Evaluation y_test = rbf.evaluation_LS(x_test, weights, mu, sigma) residual_error = np.sum(abs(y_test - square_test)) / y_test.shape[0] return residual_error, y_test, square_test
def sinus_delta(x_test, x_train, mu, sigma): dim = mu.shape[0] rbf = RBF(dim) ## Generate data sinus, _ = rbf.generateData(x_train, noise=True) sinus_test, _ = rbf.generateData(x_test, noise=True) sinus_test = sinus_test.reshape((sinus_test.shape[0], 1)) ## Init and train. weights = rbf.initWeights() weights, _, _ = rbf.train_DELTA(x_train, weights, mu, sigma) ## Evaluation y_test = rbf.evaluation_DELTA(x_test, weights, mu, sigma) tmp = abs(y_test - sinus_test) residual_error = np.sum(abs(y_test - sinus_test)) / y_test.shape[0] return residual_error, y_test, sinus_test
def square_delta(x_test, x_train, mu, sigma): dim = mu.shape[0] rbf = RBF(dim) _, square = rbf.generateData(x_train, noise=True) _, square_test = rbf.generateData(x_test, noise=True) square_test = square_test.reshape((square_test.shape[0], 1)) ## Init and train. weights = rbf.initWeights() weights, _, _ = rbf.train_DELTA(x_train, weights, mu, sigma, sinus_type=False) ## Evaluation y_test = rbf.evaluation_DELTA(x_test, weights, mu, sigma) residual_error = np.sum(abs(y_test - square_test)) / y_test.shape[0] return residual_error, y_test, square_test
class Particle(): def __init__(self, positions, strategy_parameters): self.current_position = positions self.strategy_parameters = strategy_parameters self.best_particle = self.current_position self.fitness = 1000000000.0 self.rbf = RBF(10) def aply_function_on_current_position(self, hidden_neurons_number=10): self.rbf.centers = self.current_position[0:hidden_neurons_number] self.rbf.radius = self.current_position[ hidden_neurons_number:hidden_neurons_number * 2] self.rbf.weights = self.current_position[hidden_neurons_number * 2:hidden_neurons_number * 3] self.fitness = self.rbf.calculate_fitness() def clone_particle(self): clone_object = copy.copy(self) clone_object.current_position = copy.deepcopy(self.current_position) clone_object.strategy_parameters = copy.deepcopy( self.strategy_parameters) clone_object.fitness = copy.deepcopy(self.fitness) clone_object.rbf = copy.deepcopy(self.rbf) return clone_object
def initialize_population(self): for i in range(num_of_individuos): solution = ES(RBF()) self.population(solution)
with open('rabbit_test.csv', newline='') as f: r = csv.reader(f) for row in r: if row: test.append(row) c = list(zip(input, target)) shuffle(c) input = [x[0] for x in c] target = [x[1] for x in c] input = np.array(input, np.float64) target = np.array(target, np.float64) test = np.transpose(np.array(test, np.float64)) print(len(input)) net = RBF(len(input), args.n, args.sigma) net.train(input, target, args.reg) x_list = [x[0] for x in oinput] y_list = [net.predict(x) for x in oinput] yo_list = [x for x in otarget] f = lambda x: 233.846 * (1 - np.exp(-0.00604 * x)) yf_list = [f(x) for x in x_list] error = sum([(y_list[i] - yo_list[i])**2 for i in range(len(y_list))]) / len(y_list) print("Error: ", error) plt.plot(x_list, y_list, label='Predicción', c='b') plt.plot(x_list, yo_list, label='Datos', c='r') plt.plot(x_list, yf_list, label='Modelo proporcionado', c='y') plt.scatter(test[0], test[1], label='Conjunto de prueba', c='g', marker='.') plt.xlabel("Edad") plt.ylabel("Peso")
import numpy as np import matplotlib.pyplot as plt from rbf import RBF # создание тестовых данных x = np.linspace(0, 10, 100) y = np.sin(x) # предсказание с помощью RBF-сети model = RBF(hidden_shape=10, sigma=1.) model.fit(x, y) y_pred = model.predict(x) # отображение на графие plt.plot(x, y, 'b-', label='тест') plt.plot(x, y_pred, 'r-', label='RBF') plt.legend(loc='upper right') plt.title('Интерполяция при использовании RBF-сети') plt.show()
discount = 0.999 #using high discount factor valueFunction = ValueFunction(alpha, numOfTilings) ##feature map features = [] # centers = np.array([[0.0, 0.0], [0.0, 0.2], [0.0, 0.4], [0.0, 0.6], [0.0, 0.8], [0.0, 1.0], # [0.25, 0.0], [0.25, 0.25], [0.25, 0.5], [0.25, 0.75], [0.25, 1.0], # [0.5, 0.0], [0.5, 0.25], [0.5, 0.5], [0.5, 0.75], [0.5, 1.0], # [0.75, 0.0], [0.75, 0.25], [0.75, 0.5], [0.75, 0.75], [0.75, 1.0], # [1.0, 0.0], [1.0, 0.25], [1.0, 0.5], [1.0, 0.75], [1.0, 1.0]]) centers = generate_grid_centers(rbf_grid_size) print(centers) widths = 0.15 * np.ones(len(centers)) rbfun = RBF(centers, widths, env.action_space.n) fMap = Rbf_2D_Feature_Map(rbfun) #generate plot of rbf activations # x = np.linspace(0,1) # y = np.ones(len(x)) # activations = [] # for i,x_i in enumerate(x): # activations.append(fMap.map_features([x_i,y[i]])) # print(activations) # plt.plot(x, activations) # plt.show() writer = open("data/mcar_mwal_seed" + str(seed) + "_demos" + str(reps), "w")
plot_segments_true(histograms, args.image, num_bins, args.output) else: f_params = { 'sigma': args.variance, 'basis_size': args.size, 'epsilon': 0.1, 'mu': [], 'alpha': [0, 0], 'y': histograms, 'tau': 1.2 } f_params['mu'].append(np.linspace(0, 255, f_params['basis_size'])) f_params['mu'].append(np.linspace(0, 255, f_params['basis_size'])) optim_params = { 'method': args.method, 'mxitr': 10, 'tol_g': 1e-8, 'tol_x': 1e-8, 'tol_f': 1e-8 } f1 = RBF(f_params['basis_size'], f_params['sigma'], f_params['y'][0].flatten()) f2 = RBF(f_params['basis_size'], f_params['sigma'], f_params['y'][1].flatten()) train(optim_params, f_params, 100, f1, f2) plot_segments(f1, f2, f_params, args.image, num_bins, args.output)
def gerar_rbf(qtd_lags, numCent, beta): from rbf import RBF rbf = RBF(qtd_lags, numCent, beta) return rbf
test = form.getvalue('test') #Decodificacion de datos datosEntrenamiento = json.loads(entrenamiento) datosTest = json.loads(test) datosRed = json.loads(datos) entradasEntrenamiento = datosEntrenamiento['entradas'] salidasEntrenamiento = datosEntrenamiento['salidas'] entradasTest = datosTest['entradas'] numNeuronasCapaOculta = int(datosRed['NumCapasOcultas']) numEntradas = len(entradasEntrenamiento[0]) #creacion del RBF rbf = RBF(numEntradas, numNeuronasCapaOculta, entradasEntrenamiento) #Entrenamiento rbf.entrenar(entradasEntrenamiento, salidasEntrenamiento) #Respuesta Entrenamiento respuestaEntrenamiento = rbf.sim(entradasEntrenamiento) #Respuesta Test respuestaTest = rbf.sim(entradasTest) #Codificacion de las Respuestas print json.dumps({'respuestaentrenamiento': respuestaEntrenamiento,'respuestatest':respuestaTest})
def error_estimate_batch(data, n_nodes=20, sigma=0.5): rbf_net = RBF(CentersSampling.LINEAR, n_nodes=n_nodes, sigma=0.5) y_hat, error = rbf_net.batch_learning(data.x, data.y, data.x_test, data.y_test) print(f'Error for batch learning: {error}')
verts2,faces2 = read("test2.obj") verts2_index = [265,995, 1247,1248,100,591, 792,671,1252,1250, 574,610,785,578,32,812, 92,1228,800,1236,1242,620, 619,607] writeWithColor(verts1,faces1,verts1_index,"out1.obj") writeWithColor(verts2,faces2,verts2_index,"out2.obj") # RBF变形 original_control_points = verts1[verts1_index,...] deformed_control_points = verts2[verts2_index,...] # RBF function func_name = "gaussian_spline" # RBF radius radius = getRadius(verts1) print("mesh1:{0},mesh2:{1}\n".format(radius,getRadius(verts2))) rbf = RBF(original_control_points,deformed_control_points,func_name,2)# radius new_verts = rbf(verts1) writeWithColor(new_verts,faces1,verts1_index,"deformed.obj") print("new radius:",getRadius(new_verts)) ''' plot 3D ''' # X = np.zeros((3,faces.shape[0])) # Y = np.zeros((3,faces.shape[0])) # Z = np.zeros((3,faces.shape[0])) # for i in range(faces.shape[0]): # X[0,i] = verts[faces[i,0]-1,0] # X[1,i] = verts[faces[i,1]-1,0] # X[2,i] = verts[faces[i,2]-1,0] # Y[0,i] = verts[faces[i,0]-1,1]
hit_rates = [] no_of_attributes = dataset.shape[1] - 1 no_of_classes = len(dataset[0, no_of_attributes]) # insert bias no_rows = dataset.shape[0] dataset = np.c_[-1 * np.ones(no_rows), dataset] # perceptron = Perceptron(no_of_classes, no_of_attributes, 5, 'logistic') for j in range(0, 20): print("realization %d" % j) train_X, train_y, test_X, test_y = Classifier.train_test_split(dataset) train_X = np.array(train_X, dtype=float) test_X = np.array(test_X, dtype=float) sigma, center = RBF.model_training(no_of_classes, no_of_attributes, train_X, train_y) rbf = RBF(no_of_classes, no_of_attributes, center, sigma) rbf.train(train_X, train_y) predictions = rbf.predict(test_X) hit_rates.append(rbf.evaluate(test_y, predictions)) print(rbf.confusion_matrix(test_y, predictions)) # rbf.plot_decision_boundaries(train_X, train_y, test_X, test_y, rbf, j) print('hit rates: {}'.format(hit_rates)) print('accuracy: {}'.format(np.mean(hit_rates))) print('std: {}'.format(np.std(hit_rates))) # RBF.show_plot_decision_boundaries()
""" Get Central points from SOM network """ som = SOM(2, 2, 2) print('Running SOM network...') som.train(input_list, num_epochs=200, init_learning_rate=0.3) som_result = [np.array(som.output[i]) for i in range(len(som.output))] """ Create RBF objects and run RBF network """ print('RBF with random c, updated by LMS: \n') rbf_1 = RBF(input_list, output_list, 9, 'random', som_c_list=None, update='lms') rbf_1.run() print("-------------------------------------") print('RBF with random c, updated by SGA: \n') rbf_2 = RBF(input_list, output_list, 9, 'random', som_c_list=None, update='sga') rbf_2.run() print("-------------------------------------") print('RBF with som c, updated by LMS: \n') rbf_3 = RBF(input_list, output_list, 9, 'som', som_c_list=som_result, update='lms') rbf_3.run() # Extra test """ Smaller number of central points (4)