def test_data(sess,params,X,Y,index_list,S_list,R_L_list,F_list,e, pre_test,n_batches): is_test=1 dic_state=ut.get_state_list(params) I= np.asarray([np.diag([1.0]*params['n_output']) for i in range(params["batch_size"])],dtype=np.float32) params["reset_state"]=-1 #Never reset state_reset_counter_lst=[0 for i in range(batch_size)] total_loss=0.0 total_pred_loss=0.0 total_meas_loss=0.0 total_n_count=0.0 for minibatch_index in xrange(n_batches): state_reset_counter_lst=[s+1 for s in state_reset_counter_lst] # print state_reset_counter_lst (dic_state,x,y,r,f,_,state_reset_counter_lst,_)= \ th.prepare_batch(is_test,index_list, minibatch_index, batch_size, S_list, dic_state, params, Y, X, R_L_list,F_list,state_reset_counter_lst) feed=th.get_feed(Model,params,r,x,y,I,dic_state, is_training=0) states,final_output,final_pred_output,final_meas_output,y =sess.run([Model.states,Model.final_output,Model.final_pred_output,Model.final_meas_output,Model.y], feed) for k in states.keys(): dic_state[k] = states[k] if params["normalise_data"]==3 or params["normalise_data"]==2: final_output=ut.unNormalizeData(final_output,params["y_men"],params["y_std"]) final_pred_output=ut.unNormalizeData(final_pred_output,params["y_men"],params["y_std"]) final_meas_output=ut.unNormalizeData(final_meas_output,params["x_men"],params["x_std"]) y=ut.unNormalizeData(y,params["y_men"],params["y_std"]) if params["normalise_data"]==4: final_output=ut.unNormalizeData(final_output,params["x_men"],params["x_std"]) final_pred_output=ut.unNormalizeData(final_pred_output,params["x_men"],params["x_std"]) final_meas_output=ut.unNormalizeData(final_meas_output,params["x_men"],params["x_std"]) y=ut.unNormalizeData(y,params["x_men"],params["x_std"]) test_loss,n_count=ut.get_loss(params,gt=y,est=final_output,r=r) test_pred_loss,n_count=ut.get_loss(params,gt=y,est=final_pred_output,r=r) test_meas_loss,n_count=ut.get_loss(params,gt=y,est=final_meas_output,r=r) total_loss+=test_loss*n_count total_pred_loss+=test_pred_loss*n_count total_meas_loss+=test_meas_loss*n_count total_n_count+=n_count # if (minibatch_index%show_every==0): # print pre_test+" test batch loss: (%i / %i / %i) %f"%(e,minibatch_index,n_train_batches,test_loss) total_loss=total_loss/total_n_count total_pred_loss=total_pred_loss/total_n_count total_meas_loss=total_meas_loss/total_n_count s =pre_test+' Loss --> epoch %i | error %f, %f, %f'%(e,total_loss,total_pred_loss,total_meas_loss) ut.log_write(s,params) return total_loss
def train(Model,params): I= np.asarray([np.diag([1.0]*params['n_output']) for i in range(params["batch_size"])],dtype=np.float32) batch_size=params["batch_size"] num_epochs=100000 decay_rate=0.9 show_every=100 deca_start=3 pre_best_loss=10000 with tf.Session() as sess:#config=gpu_config tf.global_variables_initializer().run() #saver = tf.train.Saver() # if params["model"] == "kfl_QRf": # ckpt = tf.train.get_checkpoint_state(params["mfile"]) # if ckpt and ckpt.model_checkpoint_path: # saver.restore(sess, ckpt.model_checkpoint_path) # mfile = ckpt.model_checkpoint_path # params["est_file"] = params["est_file"] + mfile.split('/')[-1].replace('.ckpt', '') + '/' # print "Loaded Model: %s" % ckpt.model_checkpoint_path # if params["model"] == "kfl_QRf": # for var in Model.tvars: # path = '/mnt/Data1/hc/tt/cp/weights/' + var.name.replace('transitionF/','') # if os.path.exists(path+'.npy'): # val=np.load(path+'.npy') # sess.run(tf.assign(var, val)) # print 'PreTrained LSTM model loaded...' # sess.run(Model.predict()) print ('Training model:'+params["model"]) noise_std = params['noise_std'] new_noise_std=0.0 for e in range(num_epochs): if e>(deca_start-1): sess.run(tf.assign(Model.lr, params['lr'] * (decay_rate ** (e)))) else: sess.run(tf.assign(Model.lr, params['lr'])) total_train_loss=0 state_reset_counter_lst=[0 for i in range(batch_size)] index_train_list_s=index_train_list dic_state = ut.get_state_list(params) # total_loss = test_data(sess, params, X_test, Y_test, index_test_list, S_Test_list, R_L_Test_list, # F_list_test, e, 'Test Check', n_test_batches) if params["shufle_data"]==1 and params['reset_state']==1: index_train_list_s = ut.shufle_data(index_train_list) for minibatch_index in xrange(n_train_batches): is_test = 0 state_reset_counter_lst=[s+1 for s in state_reset_counter_lst] (dic_state,x,y,r,f,_,state_reset_counter_lst,_)= \ th.prepare_batch(is_test,index_train_list_s, minibatch_index, batch_size, S_Train_list, dic_state, params, Y_train, X_train, R_L_Train_list,F_list_train,state_reset_counter_lst) if noise_std >0.0: u_cnt= e*n_train_batches + minibatch_index if u_cnt in params['noise_schedule']: if u_cnt==params['noise_schedule'][0]: new_noise_std=noise_std else: new_noise_std = noise_std * (u_cnt / (params['noise_schedule'][1])) s = 'NOISE --> u_cnt %i | error %f' % (u_cnt, new_noise_std) ut.log_write(s, params) if new_noise_std>0.0: noise=np.random.normal(0.0,new_noise_std,x.shape) x=noise+x feed = th.get_feed(Model, params, r, x, y, I, dic_state, is_training=1) train_loss,states,_ = sess.run([Model.cost,Model.states,Model.train_op], feed) for k in states.keys(): dic_state[k] = states[k] total_train_loss+=train_loss if (minibatch_index%show_every==0): print "Training batch loss: (%i / %i / %i) %f"%(e,minibatch_index,n_train_batches, train_loss) total_train_loss=total_train_loss/n_train_batches s='TRAIN --> epoch %i | error %f'%(e, total_train_loss) ut.log_write(s,params) pre_test = "TRAINING_Data" total_loss = test_data(sess, params, X_train, Y_train, index_train_list, S_Train_list, R_L_Train_list, F_list_train, e, pre_test, n_train_batches) pre_test="TEST_Data" total_loss= test_data(sess,params,X_test,Y_test,index_test_list,S_Test_list,R_L_Test_list,F_list_test,e, pre_test,n_test_batches) base_cp_path = params["cp_file"] + "/" lss_str = '%.5f' % total_loss model_name = lss_str + "_" + str(e) + "_" + str(params["rn_id"]) + params["model"] + "_model.ckpt" save_path = base_cp_path + model_name saved_path = False if pre_best_loss > total_loss: pre_best_loss = total_loss model_name = lss_str + "_" + str(e) + "_" + str(params["rn_id"]) + params["model"] + "_best_model.ckpt" save_path = base_cp_path + model_name saved_path = saver.save(sess, save_path) else: if e % 3.0 == 0: saved_path = saver.save(sess, save_path) if saved_path != "": s = 'MODEL_Saved --> epoch %i | error %f path %s' % (e, total_loss, saved_path) ut.log_write(s, params)
def train(tracker, params): I = np.asarray([ np.diag([1.0] * params['n_output']) for i in range(params["batch_size"]) ], dtype=np.float32) batch_size = params["batch_size"] decay_rate = 0.95 # show_every=100 deca_start = 10 # pre_best_loss=10000 with tf.Session(config=gpu_config) as sess: tf.global_variables_initializer().run() saver = tf.train.Saver() # sess.run(tracker.predict()) print 'Training model:' + params["model"] noise_std = params['noise_std'] new_noise_std = 0.0 median_result_lst = [] mean_result_lst = [] for e in range(num_epochs): if e == 2: params['lr'] = params['lr'] if e > (deca_start - 1): sess.run( tf.assign(tracker.lr, params['lr'] * (decay_rate**(e)))) else: sess.run(tf.assign(tracker.lr, params['lr'])) total_train_loss = 0 state_reset_counter_lst = [0 for i in range(batch_size)] index_train_list_s = index_train_list dic_state = ut.get_state_list(params) if params["shufle_data"] == 1 and params['reset_state'] == 1: index_train_list_s = ut.shufle_data(index_train_list) for minibatch_index in xrange(n_train_batches): is_test = 0 state_reset_counter_lst = [ s + 1 for s in state_reset_counter_lst ] (dic_state,x,y,r,f,_,state_reset_counter_lst,_)= \ th.prepare_batch(is_test,index_train_list_s, minibatch_index, batch_size, S_Train_list, dic_state, params, Y_train, X_train, R_L_Train_list,F_list_train,state_reset_counter_lst) if noise_std > 0.0: u_cnt = e * n_train_batches + minibatch_index if u_cnt in params['noise_schedule']: new_noise_std = noise_std * ( u_cnt / (params['noise_schedule'][0])) s = 'NOISE --> u_cnt %i | error %f' % (u_cnt, new_noise_std) ut.log_write(s, params) if new_noise_std > 0.0: noise = np.random.normal(0.0, new_noise_std, x.shape) x = noise + x feed = th.get_feed(tracker, params, r, x, y, I, dic_state, is_training=1) train_loss, states, _ = sess.run( [tracker.cost, tracker.states, tracker.train_op], feed) # print last_pred.shape # print states.shape for k in states.keys(): dic_state[k] = states[k] total_train_loss += train_loss # if e%5==0: # print total_train_loss pre_test = "TEST_Data" total_loss, median_result, mean_result, final_output_lst, file_lst, noise_lst = test_data( sess, params, X_test, Y_test, index_test_list, S_Test_list, R_L_Test_list, F_list_test, e, pre_test, n_test_batches) if len(full_median_result_lst) > 1: if median_result[0] < np.min(full_median_result_lst, axis=0)[0]: # ut.write_slam_est(est_file=params["est_file"],est=final_output_lst,file_names=file_lst) # ut.write_slam_est(est_file=params["noise_file"],est=noise_lst,file_names=file_lst) # save_path=params["cp_file"]+params['msg'] # saver.save(sess,save_path) print 'Writing estimations....' full_median_result_lst.append(median_result) median_result_lst.append(median_result) mean_result_lst.append(mean_result) # base_cp_path = params["cp_file"] + "/" # # lss_str = '%.5f' % total_loss # model_name = lss_str + "_" + str(e) + "_" + str(params["rn_id"]) + params["model"] + "_model.ckpt" # save_path = base_cp_path + model_name # saved_path = False # if pre_best_loss > total_loss: # pre_best_loss = total_loss # model_name = lss_str + "_" + str(e) + "_" + str(params["rn_id"]) + params["model"] + "_best_model.ckpt" # save_path = base_cp_path + model_name # saved_path = saver.save(sess, save_path) # else: # if e % 3.0 == 0: # saved_path = saver.save(sess, save_path) # if saved_path != "": # s = 'MODEL_Saved --> epoch %i | error %f path %s' % (e, total_loss, saved_path) # ut.log_write(s, params) return median_result_lst, mean_result_lst
def test_data(sess, params, X, Y, index_list, S_list, R_L_list, F_list, e, pre_test, n_batches): dic_state = ut.get_state_list(params) I = np.asarray([ np.diag([1.0] * params['n_output']) for i in range(params["batch_size"]) ], dtype=np.float32) dict_err = {} dict_name = {} uniq_lst = [item for item in collections.Counter(S_list)] is_test = 1 file_lst = [] for u in uniq_lst: idx = np.where(S_list == u) sname = F_list[idx][0][0][0].split('/')[-2] dict_name[u] = sname dict_err[u] = [] state_reset_counter_lst = [0 for i in range(batch_size)] total_loss = 0.0 total_n_count = 0.0 full_curr_id_lst = [] full_noise_lst = [] full_r_lst = [] full_y_lst = [] full_final_output_lst = [] for minibatch_index in xrange(n_batches): state_reset_counter_lst = [s + 1 for s in state_reset_counter_lst] (dic_state,x_sel,y,r,f,curr_sid,state_reset_counter_lst,curr_id_lst)= \ th.prepare_batch(is_test,index_list, minibatch_index, batch_size, S_list, dic_state, params, Y, X, R_L_list,F_list,state_reset_counter_lst) feed = th.get_feed(tracker, params, r, x_sel, y, I, dic_state, is_training=0) if params["model"] == "lstm": states, final_output, sel_y = sess.run( [tracker.states, tracker.final_output, tracker.y], feed) else: states,final_output,full_final_output,sel_y,x,qnoise_lst =\ sess.run([tracker.states,tracker.final_output,tracker.full_final_output,tracker.y,tracker.x,tracker.qnoise_lst], feed) full_final_output = np.asarray(full_final_output).reshape( (batch_size, params['seq_length'], params['n_output'])) for k in states.keys(): dic_state[k] = states[k] full_curr_id_lst.extend(curr_id_lst) full_r_lst.extend(r) file_lst.extend(f) full_final_output_lst.extend(full_final_output) full_y_lst.extend(y) if params["model"] != "lstm": full_noise_lst.extend(qnoise_lst) # total_loss=total_loss/total_n_count index_lst = sh.get_nondublicate_lst(full_curr_id_lst) full_r_lst = np.asarray(full_r_lst)[index_lst] # if params["model"] != "lstm": # full_noise_lst=np.asarray(full_noise_lst)[index_lst] # full_noise_lst=full_noise_lst[full_r_lst==1] full_final_output_lst = np.asarray(full_final_output_lst)[index_lst] full_y_lst = np.asarray(full_y_lst)[index_lst] file_lst = np.asarray(file_lst)[index_lst] file_lst = file_lst[full_r_lst == 1] full_final_output_lst = full_final_output_lst[full_r_lst == 1] full_y_lst = full_y_lst[full_r_lst == 1] dict_err = {} if params["normalise_data"] == 3 or params["normalise_data"] == 2: full_final_output_lst = ut.unNormalizeData(full_final_output_lst, params["y_men"], params["y_std"]) full_y_lst = ut.unNormalizeData(full_y_lst, params["y_men"], params["y_std"]) if params["normalise_data"] == 4: full_final_output_lst = ut.unNormalizeData(full_final_output_lst, params["x_men"], params["x_std"]) full_y_lst = ut.unNormalizeData(full_y_lst, params["x_men"], params["x_std"]) full_loss, dict_err = sh.get_loss(file_lst, gt=full_y_lst, est=full_final_output_lst) # np.savetxt('trials/garb/x',np.asarray(x_lst)) if params["sequence"] == "David": for u in dict_err.keys(): seq_err = dict_err[u] median_result = np.median(seq_err, axis=0) mean_result = np.mean(seq_err, axis=0) print 'Epoch:', e, ' full ', u, ' median/mean error ', median_result[ 0], '/', mean_result[0], 'm and ', median_result[ 1], '/', mean_result[1], 'degrees.' else: median_result = np.median(full_loss, axis=0) mean_result = np.mean(full_loss, axis=0) if params["data_mode"] == "xyx": print 'Epoch:', e, ' full sequence median/mean error ', median_result[ 0], '/', mean_result[0], '' elif params["data_mode"] == "q": print 'Epoch:', e, ' full sequence median/mean error ', median_result[ 0], '/', mean_result[0], 'degrees.' else: print 'Epoch:', e, ' full sequence median/mean error ', median_result[ 0], '/', mean_result[0], 'm and ', median_result[ 1], '/', mean_result[1], 'degrees.' # s =pre_test+' Loss --> epoch %i | error %f'%(e,total_loss) # ut.log_write(s,params) return total_loss, median_result, mean_result, full_final_output_lst, file_lst, full_noise_lst
def test_data(sess, params, X, Y, index_list, S_list, R_L_list, F_list, e, pre_test, n_batches): dic_state = ut.get_state_list(params) I = np.asarray([ np.diag([1.0] * params['n_output']) for i in range(params["batch_size"]) ], dtype=np.float32) is_test = 1 state_reset_counter_lst = [0 for i in range(batch_size)] total_loss = 0.0 total_n_count = 0.0 for minibatch_index in xrange(n_batches): state_reset_counter_lst = [s + 1 for s in state_reset_counter_lst] (dic_state,x,y,r,f,_,state_reset_counter_lst,_)= \ th.prepare_batch(is_test,index_list, minibatch_index, batch_size, S_list, dic_state, params, Y, X, R_L_list,F_list,state_reset_counter_lst) feed = th.get_feed(tracker, params, r, x, y, I, dic_state, is_training=0) if mode == 'klstm': states,final_output,final_pred_output,final_meas_output,q_mat,r_mat,k_mat,y =\ sess.run([tracker.states,tracker.final_output,tracker.final_pred_output,tracker.final_meas_output, tracker.final_q_output,tracker.final_r_output,tracker.final_k_output,tracker.y], feed) else: states, final_output, y = \ sess.run([tracker.states, tracker.final_output, tracker.y], feed) for k in states.keys(): dic_state[k] = states[k] if params["normalise_data"] == 3 or params["normalise_data"] == 2: final_output = ut.unNormalizeData(final_output, params["y_men"], params["y_std"]) y = ut.unNormalizeData(y, params["y_men"], params["y_std"]) if params["normalise_data"] == 4: final_output = ut.unNormalizeData(final_output, params["x_men"], params["x_std"]) y = ut.unNormalizeData(y, params["x_men"], params["x_std"]) if mode == 'klstm': final_pred_output = ut.unNormalizeData(final_pred_output, params["x_men"], params["x_std"]) final_meas_output = ut.unNormalizeData(final_meas_output, params["x_men"], params["x_std"]) test_loss, n_count = ut.get_loss(params, gt=y, est=final_output, r=None) f = f.reshape((-1, 2)) y_f = y.reshape(final_output.shape) r = r.flatten() fnames = f[np.nonzero(r)] # e=final_output[np.nonzero(r)] if mode == 'klstm': ut.write_est(est_file=params["est_file"] + "/kal_est/", est=final_output, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/kal_est_dif/", est=np.abs(final_output - y_f), file_names=fnames) ut.write_est(est_file=params["est_file"] + "/kal_pred/", est=final_pred_output, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/kal_pred_dif/", est=np.abs(final_pred_output - y_f), file_names=fnames) ut.write_est(est_file=params["est_file"] + "/meas/", est=final_meas_output, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/q_mat/", est=q_mat, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/r_mat/", est=r_mat, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/k_mat/", est=k_mat, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/y_f/", est=y_f, file_names=fnames) else: ut.write_est(est_file=params["est_file"], est=final_output, file_names=fnames) # print test/_loss total_loss += test_loss * n_count total_n_count += n_count print total_loss / total_n_count # if (minibatch_index%show_every==0): # print pre_test+" test batch loss: (%i / %i / %i) %f"%(e,minibatch_index,n_train_batches,test_loss) total_loss = total_loss / total_n_count s = pre_test + ' Loss --> epoch %i | error %f' % (e, total_loss) ut.log_write(s, params) return total_loss