def test_data(sess,params,X,Y,index_list,S_list,R_L_list,F_list,e, pre_test,n_batches): is_test=1 dic_state=ut.get_state_list(params) I= np.asarray([np.diag([1.0]*params['n_output']) for i in range(params["batch_size"])],dtype=np.float32) params["reset_state"]=-1 #Never reset state_reset_counter_lst=[0 for i in range(batch_size)] total_loss=0.0 total_pred_loss=0.0 total_meas_loss=0.0 total_n_count=0.0 for minibatch_index in xrange(n_batches): state_reset_counter_lst=[s+1 for s in state_reset_counter_lst] # print state_reset_counter_lst (dic_state,x,y,r,f,_,state_reset_counter_lst,_)= \ th.prepare_batch(is_test,index_list, minibatch_index, batch_size, S_list, dic_state, params, Y, X, R_L_list,F_list,state_reset_counter_lst) feed=th.get_feed(Model,params,r,x,y,I,dic_state, is_training=0) states,final_output,final_pred_output,final_meas_output,y =sess.run([Model.states,Model.final_output,Model.final_pred_output,Model.final_meas_output,Model.y], feed) for k in states.keys(): dic_state[k] = states[k] if params["normalise_data"]==3 or params["normalise_data"]==2: final_output=ut.unNormalizeData(final_output,params["y_men"],params["y_std"]) final_pred_output=ut.unNormalizeData(final_pred_output,params["y_men"],params["y_std"]) final_meas_output=ut.unNormalizeData(final_meas_output,params["x_men"],params["x_std"]) y=ut.unNormalizeData(y,params["y_men"],params["y_std"]) if params["normalise_data"]==4: final_output=ut.unNormalizeData(final_output,params["x_men"],params["x_std"]) final_pred_output=ut.unNormalizeData(final_pred_output,params["x_men"],params["x_std"]) final_meas_output=ut.unNormalizeData(final_meas_output,params["x_men"],params["x_std"]) y=ut.unNormalizeData(y,params["x_men"],params["x_std"]) test_loss,n_count=ut.get_loss(params,gt=y,est=final_output,r=r) test_pred_loss,n_count=ut.get_loss(params,gt=y,est=final_pred_output,r=r) test_meas_loss,n_count=ut.get_loss(params,gt=y,est=final_meas_output,r=r) total_loss+=test_loss*n_count total_pred_loss+=test_pred_loss*n_count total_meas_loss+=test_meas_loss*n_count total_n_count+=n_count # if (minibatch_index%show_every==0): # print pre_test+" test batch loss: (%i / %i / %i) %f"%(e,minibatch_index,n_train_batches,test_loss) total_loss=total_loss/total_n_count total_pred_loss=total_pred_loss/total_n_count total_meas_loss=total_meas_loss/total_n_count s =pre_test+' Loss --> epoch %i | error %f, %f, %f'%(e,total_loss,total_pred_loss,total_meas_loss) ut.log_write(s,params) return total_loss
def test_data(sess, X, Y, index_list, S_list, R_L_list, F_list, e, pre_test, n_batches): LStateList_F_t = ut.get_zero_state(params) LStateList_F_pre = ut.get_zero_state(params) LStateList_K_t = ut.get_zero_state(params, t='K') LStateList_K_pre = ut.get_zero_state(params, t='K') state_reset_counter_lst = [0 for i in range(batch_size)] total_loss = 0.0 total_n_count = 0.0 for minibatch_index in xrange(n_batches): state_reset_counter_lst = [s + 1 for s in state_reset_counter_lst] (LStateList_F_pre,LStateList_K_pre,_,x,y,r,f,state_reset_counter_lst)=\ dut.prepare_kfl_QRFf_batch(index_list, minibatch_index, batch_size, S_list, LStateList_F_t, LStateList_F_pre, LStateList_K_t, LStateList_K_pre, None, None, params, Y, X, R_L_list,F_list,state_reset_counter_lst) gt = y mes = x # print(r) feed = { tracker._z: mes, tracker.target_data: gt, tracker.repeat_data: r, tracker.initial_state: LStateList_F_pre, tracker.initial_state_Q_noise: LStateList_K_pre, tracker.output_keep_prob: 1 } # feed = {tracker._z: mes, tracker.target_data: gt, tracker.initial_state: LStateList_F_pre # , tracker._P_inp: P, tracker._I: I} LStateList_F_t,LStateList_K_t,final_output,y = \ sess.run([tracker.final_state_F,tracker.final_state_K, tracker.final_output,tracker.y], feed) tmp_lst = [] for item in LStateList_F_t: tmp_lst.append(item.c) tmp_lst.append(item.h) LStateList_F_t = tmp_lst tmp_lst = [] for item in LStateList_K_t: tmp_lst.append(item.c) tmp_lst.append(item.h) LStateList_K_t = tmp_lst # print(y) # print(y.shape) # print(final_output.shape) if params["normalise_data"] == 3 or params["normalise_data"] == 2: final_output = ut.unNormalizeData(final_output, params["y_men"], params["y_std"]) y = ut.unNormalizeData(y, params["y_men"], params["y_std"]) test_loss, n_count = ut.get_loss(params, gt=y, est=final_output) total_loss += test_loss * n_count total_n_count += n_count # if (minibatch_index%show_every==0): # print pre_test+" test batch loss: (%i / %i / %i) %f"%(e,minibatch_index,n_train_batches,test_loss) total_loss = total_loss / total_n_count s = pre_test + ' Loss --> epoch %i | error %f' % (e, total_loss) ut.log_write(s, params) return total_loss
def eval(params): batch_size = params['batch_size'] num_examples = len(params['test_files'][0]) with tf.Graph().as_default(): batch = dut.distorted_inputs(params,is_training=is_training) with slim.arg_scope(vgg.vgg_arg_scope()): logits, end_points = vgg.vgg_19(batch[0], num_classes=params['n_output'], is_training=is_training) init_fn=ut.get_init_fn(slim,params) config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = params['per_process_gpu_memory_fraction'] with tf.Session(config=config) as sess: # sess.run(tf.initialize_all_variables()) sess.run(tf.initialize_local_variables()) coord = tf.train.Coordinator() threads = [] for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS): threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True)) init_fn(sess) num_iter = int(math.ceil(num_examples / batch_size)) print('%s: Testing started.' % (datetime.now())) step = 0 loss_lst=[] run_lst=[] run_lst.append(logits) [run_lst.append(lst) for lst in batch[1:len(batch)]] while step < num_iter and not coord.should_stop(): try: batch_res= sess.run(run_lst) except tf.errors.OutOfRangeError: print ('Testing finished....%d'%step) break if(params['write_est']==True): ut.write_est(params,batch_res) est=batch_res[0] gt=batch_res[1] loss= ut.get_loss(params,gt,est) loss_lst.append(loss) s ='VAL --> batch %i/%i | error %f'%(step,num_iter,loss) ut.log_write(s,params) # joint_list=['/'.join(p1.split('/')[0:-1]).replace('joints','img').replace('.cdf','')+'/frame_'+(p1.split('/')[-1].replace('.txt','')).zfill(5)+'.png' for p1 in image_names] # print ('List equality check:') # print len(label_names) == len(set(label_names)) # print sum(joint_list==label_names)==(len(est)) # print(len(label_names)) step += 1 coord.request_stop() coord.join(threads) return np.mean(loss_lst)
def test_data(sess, X, Y, index_list, S_list, R_L_list, F_list, e, pre_test, n_batches): state_reset_counter_lst = [0 for i in range(batch_size)] total_loss = 0.0 total_losss = 0.0 total_n_count = 0.0 total_n_countt = 0.0 for minibatch_index in xrange(n_batches): state_reset_counter_lst = [s + 1 for s in state_reset_counter_lst] x = X[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 y = Y[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 feed = { model.input_data: x, model.target_data: y, model.is_training: False, model.output_keep_prob: 1.0 } final_output = sess.run([model.final_output], feed) final_output = final_output[0] test_loss, n_count = ut.get_loss(params, gt=y, est=final_output) test_losss, n_countt = ut.get_loss(params, gt=y, est=x) total_loss += test_loss * n_count total_losss += test_losss * n_countt total_n_count += n_count total_n_countt += n_countt if (minibatch_index % show_every == 0): print pre_test + " test batch loss: (%i / %i / %i) %f" % ( e, minibatch_index, n_train_batches, test_loss) total_loss = total_loss / total_n_count total_losss = total_losss / total_n_countt s = pre_test + ' Loss --> epoch %i | error %f, %f' % (e, total_loss, total_losss) ut.log_write(s, params) return total_loss
def train_rnn(params): rng = RandomStreams(seed=1234) (X_train, Y_train, S_Train_list, F_list_train, G_list_train, X_test, Y_test, S_Test_list, F_list_test, G_list_test) = du.load_pose(params) F_list_train, G_list_train = du.shuffle_in_unison_inplace( F_list_train, G_list_train) params["len_train"] = len(F_list_train) params["len_test"] = len(F_list_test) u.start_log(params) batch_size = params['batch_size'] n_train_batches = len(F_list_train) n_train_batches /= batch_size n_test_batches = len(F_list_test) n_test_batches /= batch_size nb_epochs = params['n_epochs'] print("Batch size: %i, train batch size: %i, test batch size: %i" % (batch_size, n_train_batches, n_test_batches)) u.log_write("Model build started", params) if params['run_mode'] == 1: model = model_provider.get_model_pretrained(params, rng) u.log_write("Pretrained loaded: %s" % (params['mfile']), params) else: model = model_provider.get_model(params, rng) u.log_write("Number of parameters: %s" % (model.n_param), params) train_errors = np.ndarray(nb_epochs) u.log_write("Training started", params) val_counter = 0 best_loss = 1000 for epoch_counter in range(nb_epochs): batch_loss = 0. # H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # initial hidden state sid = 0 for minibatch_index in range(n_train_batches): x_lst = F_list_train[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 y_lst = G_list_train[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 x, y = du.load_batch(params, x_lst, y_lst) # x=X_train[id_lst] #60*20*1024 # y=Y_train[id_lst]#60*20*54 is_train = 1 if (params["model"] == "blstmnp"): x_b = np.asarray(map(np.flipud, x)) loss = model.train(x, x_b, y) else: loss = model.train(x, y, is_train) batch_loss += loss if params['shufle_data'] == 1: F_list_train, G_list_train = du.shuffle_in_unison_inplace( F_list_train, G_list_train) train_errors[epoch_counter] = batch_loss batch_loss /= n_train_batches s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss) u.log_write(s, params) if (epoch_counter % 1 == 0): print("Model testing") batch_loss3d = [] H = C = np.zeros( shape=(batch_size, params['n_hidden']), dtype=dtype) # resetting initial state, since seq change sid = 0 for minibatch_index in range(n_test_batches): x_lst = F_list_test[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 y_lst = G_list_test[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 x, y = du.load_batch(params, x_lst, y_lst) # tmp_sid=S_Test_list[(minibatch_index + 1) * batch_size-1] # if(sid==0): # sid=tmp_sid # if(tmp_sid!=sid): # sid=tmp_sid # H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change # x=X_test[id_lst] #60*20*1024 # y=Y_test[id_lst]#60*20*54 is_train = 0 if (params["model"] == "blstmnp"): x_b = np.asarray(map(np.flipud, x)) pred = model.predictions(x, x_b) else: pred = model.predictions(x, is_train) loss3d = u.get_loss(params, y, pred) batch_loss3d.append(loss3d) batch_loss3d = np.nanmean(batch_loss3d) if (batch_loss3d < best_loss): best_loss = batch_loss3d ext = str(epoch_counter) + "_" + str(batch_loss3d) + "_best.p" u.write_params(model.params, params, ext) else: ext = str(val_counter % 2) + ".p" u.write_params(model.params, params, ext) val_counter += 1 #0.08 s = 'VAL--> epoch %i | error %f, %f' % (val_counter, batch_loss3d, n_test_batches) u.log_write(s, params)
def train_rnn(params): data = [] for sindex in range(0, params['seq_length'], 5): (X_train, Y_train, X_test, Y_test) = du.load_pose(params, sindex=sindex) data.append((X_train, Y_train, X_test, Y_test)) (X_train, Y_train, X_test, Y_test) = data[0] params["len_train"] = X_train.shape[0] * X_train.shape[1] params["len_test"] = X_test.shape[0] * X_test.shape[1] u.start_log(params) batch_size = params['batch_size'] n_train_batches = len(X_train) n_train_batches /= batch_size n_test_batches = len(X_test) n_test_batches /= batch_size nb_epochs = params['n_epochs'] print("Batch size: %i, train batch size: %i, test batch size: %i" % (batch_size, n_train_batches, n_test_batches)) u.log_write("Model build started", params) if params['resume'] == 1: model = model_provider.get_model_pretrained(params) else: model = model_provider.get_model(params) u.log_write("Number of parameters: %s" % (model.n_param), params) train_errors = np.ndarray(nb_epochs) u.log_write("Training started", params) val_counter = 0 best_loss = 10000 for epoch_counter in range(nb_epochs): (X_train, Y_train, X_test, Y_test) = data[np.mod(epoch_counter, len(data))] if params['shufle_data'] == 1: X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train) n_train_batches = len(X_train) n_train_batches /= batch_size n_test_batches = len(X_test) n_test_batches /= batch_size batch_loss = 0. for minibatch_index in range(n_train_batches): x = X_train[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 y = Y_train[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*54 if (params["model"] == "blstmnp"): x_b = np.asarray(map(np.flipud, x)) loss = model.train(x, x_b, y) else: loss = model.train(x, y) batch_loss += loss train_errors[epoch_counter] = batch_loss batch_loss /= n_train_batches s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss) u.log_write(s, params) if (epoch_counter % 10 == 0): print("Model testing") batch_loss = 0. batch_loss3d = 0. for minibatch_index in range(n_test_batches): x = X_test[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] y = Y_test[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] if (params["model"] == "blstmnp"): x_b = np.asarray(map(np.flipud, x)) pred = model.predictions(x, x_b) else: pred = model.predictions(x) loss = np.nanmean(np.abs(pred - y)**2) loss3d = u.get_loss(y, pred) batch_loss += loss batch_loss3d += loss3d batch_loss /= n_test_batches batch_loss3d /= n_test_batches if (batch_loss3d < best_loss): best_loss = batch_loss3d ext = str(batch_loss3d) + "_best.p" u.write_params(model.params, params, ext) else: ext = str(val_counter % 2) + ".p" u.write_params(model.params, params, ext) val_counter += 1 #0.08 s = 'VAL--> epoch %i | error %f, %f %f' % ( val_counter, batch_loss, batch_loss3d, n_test_batches) u.log_write(s, params)
def train_rnn(params): rng = RandomStreams(seed=1234) (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params) params["len_train"]=len(F_list_train) params["len_test"]=len(F_list_test) u.start_log(params) batch_size=params['batch_size'] n_train_batches = len(F_list_train) n_train_batches /= batch_size n_test_batches = len(F_list_test) n_test_batches /= batch_size nb_epochs=params['n_epochs'] print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches)) u.log_write("Model build started",params) if params['run_mode']==1: model= model_provider.get_model_pretrained(params,rng) u.log_write("Pretrained loaded: %s"%(params['mfile']),params) else: model= model_provider.get_model(params,rng) u.log_write("Number of parameters: %s"%(model.n_param),params) train_errors = np.ndarray(nb_epochs) u.log_write("Training started",params) val_counter=0 best_loss=1000 for epoch_counter in range(nb_epochs): batch_loss = 0. # H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # initial hidden state sid=0 for minibatch_index in range(n_train_batches): x_lst=F_list_train[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024 y_lst=G_list_train[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024 x,y=du.load_batch(params,x_lst,y_lst) # x=X_train[id_lst] #60*20*1024 # y=Y_train[id_lst]#60*20*54 is_train=1 if(params["model"]=="blstmnp"): x_b=np.asarray(map(np.flipud,x)) loss = model.train(x,x_b,y) else: loss= model.train(x, y,is_train) batch_loss += loss if params['shufle_data']==1: X_train,Y_train=du.shuffle_in_unison_inplace(X_train,Y_train) train_errors[epoch_counter] = batch_loss batch_loss/=n_train_batches s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss) u.log_write(s,params) if(epoch_counter%3==0): print("Model testing") batch_loss3d = [] H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change sid=0 for minibatch_index in range(n_test_batches): x_lst=F_list_test[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024 y_lst=G_list_test[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024 x,y=du.load_batch(params,x_lst,y_lst) # tmp_sid=S_Test_list[(minibatch_index + 1) * batch_size-1] # if(sid==0): # sid=tmp_sid # if(tmp_sid!=sid): # sid=tmp_sid # H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change # x=X_test[id_lst] #60*20*1024 # y=Y_test[id_lst]#60*20*54 is_train=0 if(params["model"]=="blstmnp"): x_b=np.asarray(map(np.flipud,x)) pred = model.predictions(x,x_b) else: pred= model.predictions(x,is_train) loss3d =u.get_loss(params,y,pred) batch_loss3d.append(loss3d) batch_loss3d=np.nanmean(batch_loss3d) if(batch_loss3d<best_loss): best_loss=batch_loss3d ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p" u.write_params(model.params,params,ext) else: ext=str(val_counter%2)+".p" u.write_params(model.params,params,ext) val_counter+=1#0.08 s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches) u.log_write(s,params)
batch_loss3d = [] batch_loss = [] loss_list=[] last_index=0 first_index=0 sq_loss_lst=[] for minibatch_index in range(n_batches): if(minibatch_index==0): x,y=du.prepare_cnn_batch(minibatch_index, batch_size, F_list, Y) pool = ThreadPool(processes=2) async_t = pool.apply_async(model.predictions, (x,is_train)) async_b = pool.apply_async(du.prepare_cnn_batch, (minibatch_index, batch_size, F_list, Y)) pool.close() pool.join() pred = async_t.get() # get the return value from your function. loss3d =u.get_loss(params,y,pred) loss=np.nanmean(np.abs(pred -y)) batch_loss3d.append(loss3d) batch_loss.append(loss) x=[] y=[] (x,y) = async_b.get() # get the return value from your function. if(minibatch_index==n_batches-1): pred= model.predictions(x,is_train) pred = pred[0:(len(pred)-residual)] y=y[0:(len(y)-residual)] loss3d =u.get_loss(params,y,pred) batch_loss3d.append(loss3d) # du.write_predictions(params,pred,n_list)
def train_rnn(params): rng = RandomStreams(seed=1234) (X_train, Y_train, S_Train_list, F_list_train, G_list_train, X_test, Y_test, S_Test_list, F_list_test, G_list_test) = du.load_pose(params) params["len_train"] = len(X_train) params["len_test"] = len(X_test) u.start_log(params) batch_size = params['batch_size'] n_train_batches = params["len_train"] n_train_batches /= batch_size n_test_batches = params["len_test"] n_test_batches /= batch_size nb_epochs = params['n_epochs'] print("Batch size: %i, train batch size: %i, test batch size: %i" % (batch_size, n_train_batches, n_test_batches)) u.log_write("Model build started", params) if params['run_mode'] == 1: model = model_provider.get_model_pretrained(params, rng) u.log_write("Pretrained loaded: %s" % (params['mfile']), params) else: model = model_provider.get_model(params, rng) u.log_write("Number of parameters: %s" % (model.n_param), params) train_errors = np.ndarray(nb_epochs) u.log_write("Training started", params) val_counter = 0 best_loss = 1000 for epoch_counter in range(nb_epochs): batch_loss = 0. sid = 0 is_train = 1 for minibatch_index in range(n_train_batches): x = X_train[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 y = Y_train[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 if (params["model"] == "blstmnp"): x_b = np.asarray(map(np.flipud, x)) loss = model.train(x, x_b, y) else: loss = model.train(x, y, is_train) batch_loss += loss if params['shufle_data'] == 1: X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train) train_errors[epoch_counter] = batch_loss batch_loss /= n_train_batches s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss) u.log_write(s, params) if (epoch_counter % 3 == 0): print("Model testing") batch_loss3d = [] is_train = 0 for minibatch_index in range(n_test_batches): x = X_test[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 y = Y_test[minibatch_index * batch_size:(minibatch_index + 1) * batch_size] #60*20*1024 pred = model.predictions(x, is_train) loss3d = u.get_loss(params, y, pred) batch_loss3d.append(loss3d) batch_loss3d = np.nanmean(batch_loss3d) if (batch_loss3d < best_loss): best_loss = batch_loss3d ext = str(epoch_counter) + "_" + str(batch_loss3d) + "_best.p" u.write_params(model.params, params, ext) else: ext = str(val_counter % 2) + ".p" u.write_params(model.params, params, ext) val_counter += 1 #0.08 s = 'VAL--> epoch %i | error %f, %f' % (val_counter, batch_loss3d, n_test_batches) u.log_write(s, params)
def train_rnn(params): rng = RandomStreams(seed=1234) (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params) params["len_train"]=Y_train.shape[0]*Y_train.shape[1] params["len_test"]=Y_test.shape[0]*Y_test.shape[1] u.start_log(params) index_train_list,S_Train_list=du.get_seq_indexes(params,S_Train_list) index_test_list,S_Test_list=du.get_seq_indexes(params,S_Test_list) batch_size=params['batch_size'] n_train_batches = len(index_train_list) n_train_batches /= batch_size n_test_batches = len(index_test_list) n_test_batches /= batch_size nb_epochs=params['n_epochs'] print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches)) u.log_write("Model build started",params) if params['run_mode']==1: model= model_provider.get_model_pretrained(params,rng) u.log_write("Pretrained loaded: %s"%(params['mfile']),params) else: model= model_provider.get_model(params,rng) u.log_write("Number of parameters: %s"%(model.n_param),params) train_errors = np.ndarray(nb_epochs) u.log_write("Training started",params) val_counter=0 best_loss=1000 for epoch_counter in range(nb_epochs): batch_loss = 0. LStateList_t=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden state LStateList_pre=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden state state_reset_counter_lst=[0 for i in range(batch_size)] is_train=1 for minibatch_index in range(n_train_batches): state_reset_counter_lst=[s+1 for s in state_reset_counter_lst] (LStateList_b,x,y,state_reset_counter_lst)=du.prepare_lstm_batch(index_train_list, minibatch_index, batch_size, S_Train_list,LStateList_t,LStateList_pre, F_list_train, params, Y_train, X_train,state_reset_counter_lst) LStateList_pre=LStateList_b args=(x, y,is_train)+tuple(LStateList_b) result= model.train(*args) loss=result[0] LStateList_t=result[1:len(result)] batch_loss += loss if params['shufle_data']==1: X_train,Y_train=du.shuffle_in_unison_inplace(X_train,Y_train) train_errors[epoch_counter] = batch_loss batch_loss/=n_train_batches s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss) u.log_write(s,params) if(epoch_counter%1==0): is_train=0 print("Model testing") state_reset_counter=0 batch_loss3d = [] LStateList_t=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden state LStateList_pre=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden sta state_reset_counter_lst=[0 for i in range(batch_size)] for minibatch_index in range(n_test_batches): state_reset_counter_lst=[s+1 for s in state_reset_counter_lst] (LStateList_b,x,y,state_reset_counter_lst)=du.prepare_lstm_batch(index_test_list, minibatch_index, batch_size, S_Test_list, LStateList_t,LStateList_pre, F_list_test, params, Y_test, X_test,state_reset_counter_lst) LStateList_pre=LStateList_b args=(x,is_train)+tuple(LStateList_b) result = model.predictions(*args) pred=result[0] LStateList_t=result[1:len(result)] loss3d =u.get_loss(params,y,pred) batch_loss3d.append(loss3d) batch_loss3d=np.nanmean(batch_loss3d) if(batch_loss3d<best_loss): best_loss=batch_loss3d ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p" u.write_params(model.params,params,ext) else: ext=str(val_counter%2)+".p" u.write_params(model.params,params,ext) val_counter+=1#0.08 s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches) u.log_write(s,params)
def train_rnn(params): data = [] for sindex in range(0, params["seq_length"], 5): (X_train, Y_train, X_test, Y_test) = du.load_pose(params, sindex=sindex) data.append((X_train, Y_train, X_test, Y_test)) (X_train, Y_train, X_test, Y_test) = data[0] params["len_train"] = X_train.shape[0] * X_train.shape[1] params["len_test"] = X_test.shape[0] * X_test.shape[1] u.start_log(params) batch_size = params["batch_size"] n_train_batches = len(X_train) n_train_batches /= batch_size n_test_batches = len(X_test) n_test_batches /= batch_size nb_epochs = params["n_epochs"] print("Batch size: %i, train batch size: %i, test batch size: %i" % (batch_size, n_train_batches, n_test_batches)) u.log_write("Model build started", params) if params["resume"] == 1: model = model_provider.get_model_pretrained(params) else: model = model_provider.get_model(params) u.log_write("Number of parameters: %s" % (model.n_param), params) train_errors = np.ndarray(nb_epochs) u.log_write("Training started", params) val_counter = 0 best_loss = 10000 for epoch_counter in range(nb_epochs): (X_train, Y_train, X_test, Y_test) = data[np.mod(epoch_counter, len(data))] if params["shufle_data"] == 1: X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train) n_train_batches = len(X_train) n_train_batches /= batch_size n_test_batches = len(X_test) n_test_batches /= batch_size batch_loss = 0.0 for minibatch_index in range(n_train_batches): x = X_train[minibatch_index * batch_size : (minibatch_index + 1) * batch_size] # 60*20*1024 y = Y_train[minibatch_index * batch_size : (minibatch_index + 1) * batch_size] # 60*20*54 if params["model"] == "blstmnp": x_b = np.asarray(map(np.flipud, x)) loss = model.train(x, x_b, y) else: loss = model.train(x, y) batch_loss += loss train_errors[epoch_counter] = batch_loss batch_loss /= n_train_batches s = "TRAIN--> epoch %i | error %f" % (epoch_counter, batch_loss) u.log_write(s, params) if epoch_counter % 10 == 0: print("Model testing") batch_loss = 0.0 batch_loss3d = 0.0 for minibatch_index in range(n_test_batches): x = X_test[minibatch_index * batch_size : (minibatch_index + 1) * batch_size] y = Y_test[minibatch_index * batch_size : (minibatch_index + 1) * batch_size] if params["model"] == "blstmnp": x_b = np.asarray(map(np.flipud, x)) pred = model.predictions(x, x_b) else: pred = model.predictions(x) loss = np.nanmean(np.abs(pred - y) ** 2) loss3d = u.get_loss(y, pred) batch_loss += loss batch_loss3d += loss3d batch_loss /= n_test_batches batch_loss3d /= n_test_batches if batch_loss3d < best_loss: best_loss = batch_loss3d ext = str(batch_loss3d) + "_best.p" u.write_params(model.params, params, ext) else: ext = str(val_counter % 2) + ".p" u.write_params(model.params, params, ext) val_counter += 1 # 0.08 s = "VAL--> epoch %i | error %f, %f %f" % (val_counter, batch_loss, batch_loss3d, n_test_batches) u.log_write(s, params)
S_list,LStateList_t,LStateList_pre, params, Y, X,R_L_list,F_list,state_reset_counter_lst) LStateList_pre = LStateList_b # y=y.reshape(batch_size*params["seq_length"],params["n_output"]) # feed = {model.input_data: x, model.target_data: y, model.initial_state: LStateList_b, model.repeat_data: r ,model.is_training:False,model.output_keep_prob:1.0} feed = { model.input_data: x, model.target_data: y, model.initial_state: LStateList_b, model.repeat_data: r, model.is_training: False, model.output_keep_prob: 1.0 } LStateList_t, final_output, y = sess.run( [model.final_state, model.final_output, model.y]) test_loss, n_count = ut.get_loss(params, gt=y, est=final_output) # ut.write_rnn_est(est_file=est_file,est=final_output,file_names=f) # tmp_lst=[] # for item in LStateList_t: # tmp_lst.append(item.c) # tmp_lst.append(item.h) # LStateList_t=tmp_lst total_loss += test_loss * n_count total_n_count += n_count total_loss = total_loss / total_n_count s = '%s Loss --> %f' % (action, total_loss) print(s) # ut.log_write(s,params) # test_loss= inception_output.eval(params) # cnt=len(params['test_files'][0])
def test_data(sess, params, X, Y, index_list, S_list, R_L_list, F_list, e, pre_test, n_batches): dic_state = ut.get_state_list(params) I = np.asarray([ np.diag([1.0] * params['n_output']) for i in range(params["batch_size"]) ], dtype=np.float32) is_test = 1 state_reset_counter_lst = [0 for i in range(batch_size)] total_loss = 0.0 total_n_count = 0.0 for minibatch_index in xrange(n_batches): state_reset_counter_lst = [s + 1 for s in state_reset_counter_lst] (dic_state,x,y,r,f,_,state_reset_counter_lst,_)= \ th.prepare_batch(is_test,index_list, minibatch_index, batch_size, S_list, dic_state, params, Y, X, R_L_list,F_list,state_reset_counter_lst) feed = th.get_feed(tracker, params, r, x, y, I, dic_state, is_training=0) if mode == 'klstm': states,final_output,final_pred_output,final_meas_output,q_mat,r_mat,k_mat,y =\ sess.run([tracker.states,tracker.final_output,tracker.final_pred_output,tracker.final_meas_output, tracker.final_q_output,tracker.final_r_output,tracker.final_k_output,tracker.y], feed) else: states, final_output, y = \ sess.run([tracker.states, tracker.final_output, tracker.y], feed) for k in states.keys(): dic_state[k] = states[k] if params["normalise_data"] == 3 or params["normalise_data"] == 2: final_output = ut.unNormalizeData(final_output, params["y_men"], params["y_std"]) y = ut.unNormalizeData(y, params["y_men"], params["y_std"]) if params["normalise_data"] == 4: final_output = ut.unNormalizeData(final_output, params["x_men"], params["x_std"]) y = ut.unNormalizeData(y, params["x_men"], params["x_std"]) if mode == 'klstm': final_pred_output = ut.unNormalizeData(final_pred_output, params["x_men"], params["x_std"]) final_meas_output = ut.unNormalizeData(final_meas_output, params["x_men"], params["x_std"]) test_loss, n_count = ut.get_loss(params, gt=y, est=final_output, r=None) f = f.reshape((-1, 2)) y_f = y.reshape(final_output.shape) r = r.flatten() fnames = f[np.nonzero(r)] # e=final_output[np.nonzero(r)] if mode == 'klstm': ut.write_est(est_file=params["est_file"] + "/kal_est/", est=final_output, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/kal_est_dif/", est=np.abs(final_output - y_f), file_names=fnames) ut.write_est(est_file=params["est_file"] + "/kal_pred/", est=final_pred_output, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/kal_pred_dif/", est=np.abs(final_pred_output - y_f), file_names=fnames) ut.write_est(est_file=params["est_file"] + "/meas/", est=final_meas_output, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/q_mat/", est=q_mat, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/r_mat/", est=r_mat, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/k_mat/", est=k_mat, file_names=fnames) ut.write_est(est_file=params["est_file"] + "/y_f/", est=y_f, file_names=fnames) else: ut.write_est(est_file=params["est_file"], est=final_output, file_names=fnames) # print test/_loss total_loss += test_loss * n_count total_n_count += n_count print total_loss / total_n_count # if (minibatch_index%show_every==0): # print pre_test+" test batch loss: (%i / %i / %i) %f"%(e,minibatch_index,n_train_batches,test_loss) total_loss = total_loss / total_n_count s = pre_test + ' Loss --> epoch %i | error %f' % (e, total_loss) ut.log_write(s, params) return total_loss
def train_rnn(params): rng = RandomStreams(seed=1234) (X_train, Y_train, S_Train_list, F_list_train, G_list_train, X_test, Y_test, S_Test_list, F_list_test, G_list_test) = du.load_pose(params) params["len_train"] = Y_train.shape[0] * Y_train.shape[1] params["len_test"] = Y_test.shape[0] * Y_test.shape[1] u.start_log(params) index_train_list, S_Train_list = du.get_batch_indexes( S_Train_list) #This will prepare bacth indexes index_test_list, S_Test_list = du.get_batch_indexes(S_Test_list) batch_size = params['batch_size'] n_train_batches = len(index_train_list) n_train_batches /= batch_size n_test_batches = len(index_test_list) n_test_batches /= batch_size nb_epochs = params['n_epochs'] print("Batch size: %i, train batch size: %i, test batch size: %i" % (batch_size, n_train_batches, n_test_batches)) u.log_write("Model build started", params) if params['run_mode'] == 1: model = model_provider.get_model_pretrained(params, rng) u.log_write("Pretrained loaded: %s" % (params['mfile']), params) else: model = model_provider.get_model(params, rng) u.log_write("Number of parameters: %s" % (model.n_param), params) train_errors = np.ndarray(nb_epochs) u.log_write("Training started", params) val_counter = 0 best_loss = 1000 for epoch_counter in range(nb_epochs): batch_loss = 0. H = C = np.zeros(shape=(batch_size, params['n_hidden']), dtype=dtype) # initial hidden state sid = 0 is_train = 1 x = [] y = [] for minibatch_index in range(n_train_batches): if (minibatch_index == 0): (sid, H, C, x, y) = du.prepare_cnn_lstm_batch( index_train_list, minibatch_index, batch_size, S_Train_list, sid, H, C, F_list_train, params, Y_train, X_train) pool = ThreadPool(processes=2) async_t = pool.apply_async(model.train, (x, y, is_train, H, C)) async_b = pool.apply_async( du.prepare_cnn_lstm_batch, (index_train_list, minibatch_index, batch_size, S_Train_list, sid, H, C, F_list_train, params, Y_train, X_train)) pool.close() pool.join() (loss, H, C) = async_t.get() # get the return value from your function. x = [] y = [] (sid, H, C, x, y) = async_b.get() # get the return value from your function. if (minibatch_index == n_train_batches - 1): loss, H, C = model.train(x, y, is_train, H, C) batch_loss += loss if params['shufle_data'] == 1: X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train) train_errors[epoch_counter] = batch_loss batch_loss /= n_train_batches s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss) u.log_write(s, params) if (epoch_counter % 1 == 0): print("Model testing") batch_loss3d = [] H = C = np.zeros( shape=(batch_size, params['n_hidden']), dtype=dtype) # resetting initial state, since seq change sid = 0 for minibatch_index in range(n_test_batches): if (minibatch_index == 0): (sid, H, C, x, y) = du.prepare_cnn_lstm_batch( index_test_list, minibatch_index, batch_size, S_Test_list, sid, H, C, F_list_test, params, Y_test, X_test) pool = ThreadPool(processes=2) async_t = pool.apply_async(model.predictions, (x, is_train, H, C)) async_b = pool.apply_async( du.prepare_cnn_lstm_batch, (index_test_list, minibatch_index, batch_size, S_Test_list, sid, H, C, F_list_test, params, Y_test, X_test)) pool.close() pool.join() (pred, H, C) = async_t.get() # get the return value from your function. loss3d = u.get_loss(params, y, pred) batch_loss3d.append(loss3d) (sid, H, C, x, y) = async_b.get() # get the return value from your function. if (minibatch_index == n_train_batches - 1): pred, H, C = model.predictions(x, is_train, H, C) loss3d = u.get_loss(params, y, pred) batch_loss3d.append(loss3d) batch_loss3d = np.nanmean(batch_loss3d) if (batch_loss3d < best_loss): best_loss = batch_loss3d ext = str(epoch_counter) + "_" + str(batch_loss3d) + "_best.p" u.write_params(model.params, params, ext) else: ext = str(val_counter % 2) + ".p" u.write_params(model.params, params, ext) val_counter += 1 s = 'VAL--> epoch %i | error %f, %f' % (val_counter, batch_loss3d, n_test_batches) u.log_write(s, params)
def train(): model = Model(params) num_epochs = 1000 decay_rate = 0.4 show_every = 100 config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = params[ 'per_process_gpu_memory_fraction'] with tf.Session(config=config) as sess: tf.initialize_all_variables().run() saver = tf.train.Saver(tf.all_variables()) merged = tf.summary.merge_all() summary_writer = tf.train.SummaryWriter(params["sm"], sess.graph) for e in xrange(num_epochs): sess.run(tf.assign(model.lr, params['lr'] * (decay_rate**e))) LStateList_t = [ np.zeros(shape=(batch_size, params['n_hidden']), dtype=np.float32) for i in range(params['nlayer'] * 2) ] # initial hidden state LStateList_pre = [ np.zeros(shape=(batch_size, params['n_hidden']), dtype=np.float32) for i in range(params['nlayer'] * 2) ] # initial hidden sta state_reset_counter_lst = [0 for i in range(batch_size)] total_train_loss = 0 for minibatch_index in xrange(n_train_batches): start = time.time() state_reset_counter_lst = [ s + 1 for s in state_reset_counter_lst ] (LStateList_b, x, y, state_reset_counter_lst) = dut.prepare_lstm_batch_joints( index_train_list, minibatch_index, batch_size, S_Train_list, LStateList_t, LStateList_pre, params, F_names_training, state_reset_counter_lst) LStateList_pre = LStateList_b y = y.reshape(batch_size * params["seq_length"], params["n_output"]) feed = { model.input_data: x, model.input_zero: np.ceil(x), model.target_data: y, model.initial_state: LStateList_b, model.is_training: True, model.output_keep_prob: 0.5 } summary,train_loss, LStateList_t,_ =\ sess.run([merged,model.cost, model.final_state, model.train_op], feed) summary_writer.add_summary(summary, minibatch_index) tmp_lst = [] for item in LStateList_t: tmp_lst.append(item.c) tmp_lst.append(item.h) LStateList_t = tmp_lst total_train_loss += train_loss if (minibatch_index % show_every == 0): print "Training batch loss: (%i / %i / %i) %f" % ( e, minibatch_index, n_train_batches, train_loss) total_train_loss = total_train_loss / n_train_batches s = 'TRAIN --> epoch %i | error %f' % (e, total_train_loss) ut.log_write(s, params) LStateList_t = [ np.zeros(shape=(batch_size, params['n_hidden']), dtype=np.float32) for i in range(params['nlayer'] * 2) ] # initial hidden state LStateList_pre = [ np.zeros(shape=(batch_size, params['n_hidden']), dtype=np.float32) for i in range(params['nlayer'] * 2) ] # initial hidden sta state_reset_counter_lst = [0 for i in range(batch_size)] total_test_loss = 0 for minibatch_index in xrange(n_test_batches): state_reset_counter_lst = [ s + 1 for s in state_reset_counter_lst ] (LStateList_b, x, y, state_reset_counter_lst) = dut.prepare_lstm_batch( index_test_list, minibatch_index, batch_size, S_Test_list, LStateList_t, LStateList_pre, params, Y_test, X_test, state_reset_counter_lst) LStateList_pre = LStateList_b y = y.reshape(batch_size * params["seq_length"], params["n_output"]) feed = { model.input_data: x, model.target_data: y, model.initial_state: LStateList_b, model.is_training: False, model.output_keep_prob: 1.0 } LStateList_t, final_output = sess.run( [model.final_state, model.final_output], feed) test_loss = ut.get_loss(params, gt=y, est=final_output) tmp_lst = [] for item in LStateList_t: tmp_lst.append(item.c) tmp_lst.append(item.h) LStateList_t = tmp_lst total_test_loss += test_loss if (minibatch_index % show_every == 0): print "Test batch loss: (%i / %i / %i) %f" % ( e, minibatch_index, n_test_batches, test_loss) total_test_loss = total_test_loss / n_test_batches print "Total test loss %f" % total_test_loss s = 'VAL --> epoch %i | error %f' % (e, total_test_loss) ut.log_write(s, params)
def train_rnn(params): rng = RandomStreams(seed=1234) (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params) params["len_train"]=Y_train.shape[0]*Y_train.shape[1] params["len_test"]=Y_test.shape[0]*Y_test.shape[1] u.start_log(params) index_train_list,S_Train_list=du.get_batch_indexes(params,S_Train_list) index_test_list,S_Test_list=du.get_batch_indexes(params,S_Test_list) batch_size=params['batch_size'] n_train_batches = len(index_train_list) n_train_batches /= batch_size n_test_batches = len(index_test_list) n_test_batches /= batch_size nb_epochs=params['n_epochs'] print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches)) u.log_write("Model build started",params) if params['run_mode']==1: model= model_provider.get_model_pretrained(params,rng) u.log_write("Pretrained loaded: %s"%(params['mfile']),params) else: model= model_provider.get_model(params,rng) u.log_write("Number of parameters: %s"%(model.n_param),params) train_errors = np.ndarray(nb_epochs) u.log_write("Training started",params) val_counter=0 best_loss=1000 for epoch_counter in range(nb_epochs): batch_loss = 0. H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # initial hidden state sid=0 is_train=1 x=[] y=[] for minibatch_index in range(n_train_batches): if(minibatch_index==0): (sid,H,C,x,y)=du.prepare_cnn_lstm_batch(index_train_list, minibatch_index, batch_size, S_Train_list, sid, H, C, F_list_train, params, Y_train, X_train) pool = ThreadPool(processes=2) async_t = pool.apply_async(model.train, (x, y,is_train,H,C)) async_b = pool.apply_async(du.prepare_cnn_lstm_batch, (index_train_list, minibatch_index, batch_size, S_Train_list, sid, H, C, F_list_train, params, Y_train, X_train)) pool.close() pool.join() (loss,H,C) = async_t.get() # get the return value from your function. x=[] y=[] (sid,H,C,x,y) = async_b.get() # get the return value from your function. if(minibatch_index==n_train_batches-1): loss,H,C= model.train(x, y,is_train,H,C) batch_loss += loss if params['shufle_data']==1: X_train,Y_train=du.shuffle_in_unison_inplace(X_train,Y_train) train_errors[epoch_counter] = batch_loss batch_loss/=n_train_batches s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss) u.log_write(s,params) if(epoch_counter%1==0): print("Model testing") batch_loss3d = [] H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change sid=0 for minibatch_index in range(n_test_batches): if(minibatch_index==0): (sid,H,C,x,y)=du.prepare_cnn_lstm_batch(index_test_list, minibatch_index, batch_size, S_Test_list, sid, H, C, F_list_test, params, Y_test, X_test) pool = ThreadPool(processes=2) async_t = pool.apply_async(model.predictions, (x,is_train,H,C)) async_b = pool.apply_async(du.prepare_cnn_lstm_batch, (index_test_list, minibatch_index, batch_size, S_Test_list, sid, H, C, F_list_test, params, Y_test, X_test)) pool.close() pool.join() (pred,H,C) = async_t.get() # get the return value from your function. loss3d =u.get_loss(params,y,pred) batch_loss3d.append(loss3d) x=[] y=[] (sid,H,C,x,y) = async_b.get() # get the return value from your function. if(minibatch_index==n_train_batches-1): pred,H,C= model.predictions(x,is_train,H,C) loss3d =u.get_loss(params,y,pred) batch_loss3d.append(loss3d) batch_loss3d=np.nanmean(batch_loss3d) if(batch_loss3d<best_loss): best_loss=batch_loss3d ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p" u.write_params(model.params,params,ext) else: ext=str(val_counter%2)+".p" u.write_params(model.params,params,ext) val_counter+=1#0.08 s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches) u.log_write(s,params)
def eval(params): batch_size = params['batch_size'] params['write_est']=False num_examples = len(params['test_files'][0]) with tf.Graph().as_default(): batch = dut.distorted_inputs(params,is_training=is_training) with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()): logits,aux, end_points = inception_resnet_v2.inception_resnet_v2(batch[0], num_classes=params['n_output'], is_training=is_training) # Obtain the trainable variables and a saver # variables_to_restore = slim.get_variables_to_restore() init_fn=ut.get_init_fn(slim,params) config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = params['per_process_gpu_memory_fraction'] with tf.Session() as sess: init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables()) # init_op = tf.global_variables_initializer() sess.run(init_op) init_fn(sess) coord = tf.train.Coordinator() threads = [] for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS): threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True)) num_iter = int(math.ceil(num_examples / batch_size)) print('%s: Testing started.' % (datetime.now())) step = 0 loss_lst=[] run_lst=[] run_lst.append(logits) # run_lst.append(end_points['PreLogitsFlatten']) # run_lst.append(end_points['PrePool']) [run_lst.append(lst) for lst in batch[1:len(batch)]] while step < num_iter and not coord.should_stop(): try: batch_res= sess.run(run_lst) except tf.errors.OutOfRangeError: print ('Testing finished....%d'%step) break if(params['write_est']==True): ut.write_mid_est(params,batch_res) est=batch_res[0] gt=batch_res[1] # print(est.shape) # print(gt.shape) prepool=batch_res[-1] loss,_= ut.get_loss(params,gt,est) loss_lst.append(loss) s ='VAL --> batch %i/%i | error %f'%(step,num_iter,loss) if step%10==0: ut.log_write(s,params,screen_print=True) print "Current number of examples / mean err: %i / %f"%(step*gt.shape[0],np.mean(loss_lst)) else: ut.log_write(s, params, screen_print=False) # joint_list=['/'.join(p1.split('/')[0:-1]).replace('joints','img').replace('.cdf','')+'/frame_'+(p1.split('/')[-1].replace('.txt','')).zfill(5)+'.png' for p1 in image_names] # print ('List equality check:') # print len(label_names) == len(set(label_names)) # print sum(joint_list==label_names)==(len(est)) # print(len(label_names)) step += 1 coord.request_stop() coord.join(threads) return np.mean(loss_lst)