def main(_): params=cf.get_params() params['mfile']="-11" model_path=params["wd"]+"/cp/"+params['mfile'] data=du.load_pose(params) data_train=(data[0],data[1]) data_test=(data[2],data[3]) with tf.Graph().as_default(), tf.Session() as session: initializer = tf.random_uniform_initializer(-params["init_scale"],params["init_scale"]) # m = mp.get_model(is_training=True,params=params) mtest = mp.get_model(is_training=False,params=params) saver = tf.train.Saver() # tf.initialize_all_variables().run() saver.restore(sess=session,save_path=model_path) test_err = run_epoch(session, mtest,tf.no_op(),params, data_test,is_training=False) print("Test Err: %.5f" % test_err)
loss3d =u.get_loss(params,y,pred) batch_loss3d.append(loss3d) x=[] y=[] (sid,H,C,x,y) = async_b.get() # get the return value from your function. if(minibatch_index==n_train_batches-1): pred,H,C= model.predictions(x,is_train,H,C) loss3d =u.get_loss(params,y,pred) batch_loss3d.append(loss3d) batch_loss3d=np.nanmean(batch_loss3d) if(batch_loss3d<best_loss): best_loss=batch_loss3d ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p" u.write_params(model.params,params,ext) else: ext=str(val_counter%2)+".p" u.write_params(model.params,params,ext) val_counter+=1#0.08 s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches) u.log_write(s,params) params= config.get_params() parser = argparse.ArgumentParser(description='Training the module') parser.add_argument('-m','--model',help='Model: lstm, lstm2, erd current('+params["model"]+')',default=params["model"]) args = vars(parser.parse_args()) params["model"]=args["model"] params=config.update_params(params) train_rnn(params)
import math import numpy as np import tensorflow as tf from nets import inception import urllib2 from datetime import datetime from helper import config from helper import utils as ut from helper import dt_utils as dut from helper.preprocessing import human36m_preprocessing from PIL import Image import numpy as np params = config.get_params() slim = tf.contrib.slim num_examples = 100 subset = 'validation' is_training = False def eval(params): # batch_size = params['batch_size'] # num_examples = len(params['test_files'][0]) with tf.Graph().as_default() as g: url = '/home/coskun/PycharmProjects/data/pose/mv_val/img/S9/Discussion 1.54138969/frame_00010.png' filename_queue = tf.train.string_input_producer( [url]) # list of files to read reader = tf.WholeFileReader()
save_path = base_cp_path + model_name saved_path = saver.save(sess, save_path) else: if e % 3.0 == 0: saved_path = saver.save(sess, save_path) if saved_path != "": s = 'MODEL_Saved --> epoch %i | error %f path %s' % (e, total_loss, saved_path) ut.log_write(s, params) rnn_keep_prob_lst=[0.8] rnn_input_prob_lst=[1.0] seq_lst=[50] reset_state=[5,100,20] normalise_data_lst=[3] # To get current status of params# params = config.get_params() # To get current status of params# ############################### params["mfile"]='/mnt/Data1/hc/tt/cp/lstm_nostate1/cp/' # adding more values to params# rnn_keep_prob=0.8 input_keep_prob=1.0 params['rnn_keep_prob']=rnn_keep_prob params['input_keep_prob']=input_keep_prob seq=50 #what does this value signify? res=5 #what does this value signify? with tf.Graph().as_default(): print "seq: ============== %s ============" % seq print "reset_state: ============== %s ============" % res print "rnn_keep_prob: ============== %s ============" % rnn_keep_prob params['normalise_data'] = 4 # adding more values to params, what does this value signify? # params['reset_state']=res # adding more values to params, what does this value signify? # params['seq_length']=seq # adding more values to params, what does this value signify? #