def init_LWPR(self): self.lwpr = [] for i in range(self.joints_num): lwpr = LWPR(1, 1) lwpr.init_D = 10000 * np.eye(1) lwpr.update_D = True lwpr.init_alpha = 10 * np.eye(1) lwpr.meta = False lwpr.penalty = 0.000000001 # lwpr.w_gen = 0.2 # lwpr.init_D = 200 * np.eye(1) # lwpr.update_D = True # lwpr.init_alpha = 0.1 * np.eye(1) # lwpr.meta = False # lwpr.penalty = 0.005 # lwpr.w_gen = 0.2 # lwpr.w_prune = 0.8 # double w_gen=0.2; # double w_prune=0.8; # bool update_D=true; # double init_alpha=0.1; # double penalty=0.005; # VectorXd init_D=VectorXd::Constant(input_dim,200); self.lwpr.append(lwpr)
t = np.arange(0, time, dt) Ytr = np.sin(t) Xtr = np.exp(-ax * t / time) * (Ytr[-1] - Ytr[0]) # print Xtr # print Ytr Xtr = Xtr.reshape((Ntr, 1)) Ytr = Ytr.reshape((Ntr, 1)) # initialize the LWPR model model = LWPR(1, 1) model.init_D = 10000 * np.eye(1) model.update_D = True model.init_alpha = 40 * np.eye(1) model.meta = True model.penalty = 0.00000000001 print model # train the model # for k in range(20): # ind = np.random.permutation(Ntr) # mse = 0 # for i in range(Ntr): # yp = model.update(Xtr[ind[i]], Ytr[ind[i]]) # mse = mse + (Ytr[ind[i], :] - yp)**2 # nMSE = mse / Ntr / np.var(Ytr) # print "#Data: %5i #RFs: %3i nMSE=%5.3f" % (model.n_data, model.num_rfs, nMSE) for i in range(Ntr):
def train_lwpr(datafile, resultfolder, max_num_train, patience_list, improvement_threshold, init_lwpr_setting, hist_window, start_epoch=0, cmd_scaler=1.0, modelfile='lwpr_model'): curr_path = os.getcwd() if resultfolder in os.listdir(curr_path): print "subfolder exists" else: print "Not Exist, so make subfolder" os.mkdir(resultfolder) # Load Data dataset = loadmat(datafile) train_data_x, train_data_y = dataset['train_data_x'], dataset['train_data_y'] valid_data_x, valid_data_y = dataset['valid_data_x'], dataset['valid_data_y'] num_data, num_valid = train_data_x.shape[0], valid_data_x.shape[0] speed_hw, cmd_hw = hist_window[0], hist_window[1] input_dim = 2*(speed_hw+cmd_hw) # normalize command part train_data_x[:, 2*speed_hw:] = train_data_x[:, 2*speed_hw:] * cmd_scaler valid_data_x[:, 2*speed_hw:] = valid_data_x[:, 2*speed_hw:] * cmd_scaler # Set-up Parameters/Model for Training Procedure max_num_trials = max_num_train improvement_threshold = improvement_threshold error_hist, best_model_error, prev_train_time = [], np.inf, 0 initD, initA, penalty = init_lwpr_setting[0], init_lwpr_setting[1], init_lwpr_setting[2] w_gen, w_prune = init_lwpr_setting[3], init_lwpr_setting[4] best_model_epoch = 0 if start_epoch < 1: # Initialize Two 1-Dimensional Models LWPR_model_left = LWPR(input_dim, 1) #LWPR_model_left.init_D = initD * np.eye(input_dim) tmp_arr = np.ones(input_dim) tmp_arr[input_dim-2*cmd_hw:input_dim] = init_lwpr_setting[5] LWPR_model_left.init_D = initD * np.diag(tmp_arr) LWPR_model_left.update_D = False # True #LWPR_model_left.init_alpha = initA * np.eye(input_dim) tmp_arr = np.ones(input_dim) tmp_arr[input_dim-2*cmd_hw:input_dim] = init_lwpr_setting[5] LWPR_model_left.init_alpha = initA * np.diag(tmp_arr) LWPR_model_left.penalty = penalty LWPR_model_left.meta = True LWPR_model_left.meta_rate = 20 LWPR_model_left.w_gen = w_gen LWPR_model_left.w_prune = w_prune LWPR_model_right = LWPR(input_dim, 1) #LWPR_model_right.init_D = initD * np.eye(input_dim) tmp_arr = np.ones(input_dim) tmp_arr[input_dim-2*cmd_hw:input_dim] = init_lwpr_setting[5] LWPR_model_right.init_D = initD * np.diag(tmp_arr) LWPR_model_right.update_D = False # True #LWPR_model_right.init_alpha = initA * np.eye(input_dim) tmp_arr = np.ones(input_dim) tmp_arr[input_dim-2*cmd_hw:input_dim] = init_lwpr_setting[5] LWPR_model_right.init_alpha = initA * np.diag(tmp_arr) LWPR_model_right.penalty = penalty LWPR_model_right.meta = True LWPR_model_right.meta_rate = 20 LWPR_model_right.w_gen = w_gen LWPR_model_right.w_prune = w_prune patience = patience_list[0] else: modelfile_name = './' + resultfolder + '/' + modelfile + '_left_epoch' + str(start_epoch-1) + '.bin' LWPR_model_left = LWPR(modelfile_name) print '\tRead LWPR model for left wheel(%d)' % (LWPR_model_left.num_rfs[0]) modelfile_name = './' + resultfolder + '/' + modelfile + '_right_epoch' + str(start_epoch-1) + '.bin' LWPR_model_right = LWPR(modelfile_name) print '\tRead LWPR model for right wheel(%d)' % (LWPR_model_right.num_rfs[0]) result_file_name = './' + resultfolder + '/Result_of_training_epoch' + str(start_epoch-1) + '.mat' result_file = loadmat(result_file_name) prev_train_time = result_file['train_time'] patience = result_file['patience'] best_model_error = result_file['best_model_error'] for cnt in range(start_epoch): error_hist.append([result_file['history_validation_error'][cnt][0], result_file['history_validation_error'][cnt][1], result_file['history_validation_error'][cnt][2]]) # Training Part model_prediction = np.zeros(valid_data_y.shape) tmp_x, tmp_y = np.zeros((input_dim, 1)), np.zeros((1,1)) print 'start training' start_train_time = timeit.default_timer() for train_cnt in range(start_epoch, max_num_trials): if patience < train_cnt: break rand_ind = np.random.permutation(num_data) for data_cnt in range(num_data): tmp_x[:,0] = train_data_x[rand_ind[data_cnt], 0:input_dim] tmp_y[0,0] = train_data_y[rand_ind[data_cnt], 0] _ = LWPR_model_left.update(tmp_x, tmp_y) tmp_y[0,0] = train_data_y[rand_ind[data_cnt], 1] _ = LWPR_model_right.update(tmp_x, tmp_y) if data_cnt % 5000 == 0: print '\ttrain epoch %d, data index %d, #rfs=%d/%d' % (train_cnt, data_cnt, LWPR_model_left.num_rfs, LWPR_model_right.num_rfs) for data_cnt in range(num_valid): tmp_x[:,0] = valid_data_x[data_cnt, 0:input_dim] model_prediction[data_cnt, 0], _ = LWPR_model_left.predict_conf(tmp_x) model_prediction[data_cnt, 1], _ = LWPR_model_right.predict_conf(tmp_x) diff = abs(valid_data_y - model_prediction) new_error = np.asarray([np.sum(diff)/float(num_valid), np.sqrt(np.sum(diff**2)/float(num_valid)), np.max(diff)]) error_hist.append([new_error[0], new_error[1], new_error[2]]) # save result of one training epoch modelfile_name = './' + resultfolder + '/' + modelfile + '_left_epoch' + str(train_cnt) + '.bin' LWPR_model_left.write_binary(modelfile_name) modelfile_name = './' + resultfolder + '/' + modelfile + '_right_epoch' + str(train_cnt) + '.bin' LWPR_model_right.write_binary(modelfile_name) if new_error[1] < best_model_error * improvement_threshold: best_model_epoch = train_cnt best_model_error = new_error[1] patience = max(patience, min(train_cnt+10, int(train_cnt * patience_list[1])) ) modelfile_name = './' + resultfolder + '/' + modelfile + '_best_left_epoch' + str(train_cnt) + '.bin' LWPR_model_left.write_binary(modelfile_name) modelfile_name = './' + resultfolder + '/' + modelfile + '_best_right_epoch' + str(train_cnt) + '.bin' LWPR_model_right.write_binary(modelfile_name) result_file_name = './' + resultfolder + '/Result_of_training_epoch' + str(train_cnt) + '.mat' result = {} result['train_time'] = timeit.default_timer() - start_train_time + prev_train_time result['best_model_error'] = best_model_error result['history_validation_error'] = error_hist result['patience'] = patience result['improvement_threshold'] = improvement_threshold result['init_D'] = initD result['init_alpha'] = initA result['penalty'] = penalty result['w_generate_criterion'] = w_gen result['w_prune_criterion'] = w_prune result['number_speed_in_input'] = 2*speed_hw result['number_cmd_in_input'] = 2*cmd_hw savemat(result_file_name, result) print '\n\tSave Intermediate Result Successfully' print '\t%d-th learning : #Data=%d/%d, #rfs=%d/%d, error=%f\n' %(train_cnt, LWPR_model_left.n_data, LWPR_model_right.n_data, LWPR_model_left.num_rfs, LWPR_model_right.num_rfs, error_hist[train_cnt][1]) print 'end training' return best_model_epoch
def testfunc(x): return 10 * sin(7.8 * log(1 + x)) / (1 + 0.1 * x**2) Ntr = 500 Xtr = 10 * random.random((Ntr, 1)) Ytr = 5 + testfunc(Xtr) + 0.1 * random.normal(0, 1, (Ntr, 1)) * Xtr # initialize the LWPR model model = LWPR(1, 1) model.init_D = 20 * eye(1) model.update_D = True model.init_alpha = 40 * eye(1) model.meta = False model.penalty = 1e-4 model.diag_only = True # train the model for k in range(20): ind = random.permutation(Ntr) mse = 0 for i in range(Ntr): yp = model.update(Xtr[ind[i]], Ytr[ind[i]]) mse = mse + (Ytr[ind[i], :] - yp)**2 nMSE = mse / Ntr / var(Ytr) print "#Data: %5i #RFs: %3i nMSE=%5.3f" % (model.n_data, model.num_rfs, nMSE)