def compare_to_ann(): """ Regression problem on two functions (sin, square) with added noise using batch learning with a 2 layer neural network """ epochs = 5000 hidden_neurons = 63 # same number as RBF nodes output_neurons = 1 sin, square = generate_data.sin_square(verbose=verbose) sin = add_noise_to_data(sin) square = add_noise_to_data(square) data = square # use which dataset to train and test batch_size = data.train_X.shape[ 0] # set batch size equal to number of data points ann = ANN(epochs, batch_size, hidden_neurons, output_neurons) y_pred = ann.solve(data.train_X, data.train_Y, data.test_X, data.test_Y) error = 0. for i in range(data.test_Y.shape[0]): error += np.abs(data.test_Y[i] - y_pred[i]) test_error = error / data.test_Y.shape[0] print('Test error: ', test_error) plotter.plot_2d_function(data.test_X, data.test_Y, y_pred=y_pred)
def train_noisy_test_clean(): """ Regression problem on two functions (sin, square) trained on noisy data but tested on clean data """ method = 'least_squares' # delta_rule , least_squares learning_rate = 0.001 # optimal value: 0.01 bigger overshoots, smaller underperforms sin, square = generate_data.sin_square(verbose=verbose) clean_data, _ = generate_data.sin_square(verbose=verbose) noisy_data = add_noise_to_data(sin) random_mu = False network_size = 63 sigma = 0.5 # TRAIN ON NOISY DATA rbf_net = RBF_Net(network_size, noisy_data.train_X, random_mu=random_mu, sigma=sigma) # TRAIN ON NOISY DATA y_train_pred, train_error = rbf_net.train(noisy_data.train_X, noisy_data.train_Y, method, lr=learning_rate) # TEST ON CLEAN DATA y_pred, test_error = rbf_net.test(noisy_data.test_X, noisy_data.test_Y) print('# Nodes: ', network_size, ' sigma: ', sigma) print('Train error: ', train_error) print('Test error: ', test_error) # plotter.plot_errors(sigmas, errors, title='Test error') plotter.plot_2d_function(data.train_X, data.train_Y, y_pred=y_train_pred, title='Train') plotter.plot_2d_function(noisy_data.test_X, noisy_data.test_Y, y_pred=y_pred, title='Test')
def part_3_2(): """ Regression problem on two functions (sin, square) with added noise using online (sequential) learning with delta rule """ method = 'delta_rule' # delta_rule , least_squares network_size = 63 learning_rate = 0.001 # optimal value: 0.01 bigger overshoots, smaller underperforms sin, square = generate_data.sin_square(verbose=verbose) data = sin # use which dataset to train and test noisy_data = add_noise_to_data(data) data = noisy_data random_mu = False iterations = 1 iter_results = {} errors = [] y_preds = [] # number of RBF nodes in the network network_size_list = [ 63 ] # [2, 5, 10, 20, 30, 40, 50, 63] # NOTE: larger than sample size sigmas = [0.25] # , 0.5, 0.6, 0.75, 0.8, 0.9, 1.] #sigmas = [0.001, 0.1, 1., 10.] for i in range(iterations): for sigma in sigmas: for network_size in network_size_list: rbf_net = RBF_Net(network_size, data.train_X, random_mu=random_mu, sigma=sigma) y_train_pred, train_error = rbf_net.train(data.train_X, data.train_Y, method, lr=learning_rate) y_pred, test_error = rbf_net.test(data.test_X, data.test_Y) print('# Nodes: ', network_size, ' sigma: ', sigma) print('Train error: ', train_error) print('Test error: ', test_error) errors.append(test_error) y_preds.append(y_pred) # plotter.plot_2d_function(data.train_X, data.train_Y, y_pred=y_train_pred, title='Train') plotter.plot_2d_function(data.test_X, data.test_Y, y_pred=y_pred, title='Test') iter_results[i] = [errors, y_preds]
def part_3_1(): """ Regression problem on two functions (sin, square) using batch learning with least squares """ method = 'least_squares' sin, square = generate_data.sin_square(verbose=verbose) data = sin # use which dataset to train and test filter_output = False errors = [] # number of RBF nodes in the network network_size_list = [2, 5, 10, 20, 30, 40, 50, 63] # NOTE: smaller than sample size for network_size in network_size_list: rbf_net = RBF_Net(network_size, data.train_X) y_train_pred, train_error = rbf_net.train(data.train_X, data.train_Y, method) y_pred, test_error = rbf_net.test(data.test_X, data.test_Y) # if you want to get perfect results for square, filter the output in the same manner as the data if (data == square and filter_output): y_pred = np.where(y_pred >= 0, 1, -1) test_error = rbf_net.calc_abs_res_error(data.test_Y, y_pred) print('# Nodes: ', network_size) print('Train error: ', train_error) print('Test error: ', test_error) errors.append(test_error) plotter.plot_errors(network_size_list, errors, title='Test error') plotter.plot_2d_function(data.train_X, data.train_Y, y_pred=y_train_pred, title='Train') plotter.plot_2d_function(data.test_X, data.test_Y, y_pred=y_pred, title='Test')
def part_3_3(): """ Regression problem on two functions (sin, square) using batch learning with least squares """ method = 'least_squares' network_size = 10 # number of RBF nodes in the network # NOTE: larger than sample size sin, square = generate_data.sin_square(verbose=verbose) data = sin # use which dataset to train and test rbf_net = RBF_Net(network_size, data.train_X) rbf_net2 = RBF_Net(network_size, data.train_X) vec_quant1 = VecQuantization(rbf_net2.RBF_Layer, iterations=1000, step_size=0.2, neighbor_bool = True) rbf_net2.RBF_Layer = vec_quant1.move_RBF(data.train_X) report_error(data, method, rbf_net, "normal") report_error(data, method, rbf_net2, "with VQ")