コード例 #1
0
def test_data(sess, X, Y, index_list, S_list, R_L_list, F_list, e, pre_test,
              n_batches):
    LStateList_F_t = ut.get_zero_state(params)
    LStateList_F_pre = ut.get_zero_state(params)
    LStateList_K_t = ut.get_zero_state(params, t='K')
    LStateList_K_pre = ut.get_zero_state(params, t='K')
    state_reset_counter_lst = [0 for i in range(batch_size)]
    total_loss = 0.0
    total_n_count = 0.0
    for minibatch_index in xrange(n_batches):
        state_reset_counter_lst = [s + 1 for s in state_reset_counter_lst]
        (LStateList_F_pre,LStateList_K_pre,_,x,y,r,f,state_reset_counter_lst)=\
            dut.prepare_kfl_QRFf_batch(index_list, minibatch_index, batch_size,
                                       S_list, LStateList_F_t, LStateList_F_pre, LStateList_K_t, LStateList_K_pre,
                                       None, None, params, Y, X, R_L_list,F_list,state_reset_counter_lst)
        gt = y
        mes = x
        # print(r)
        feed = {
            tracker._z: mes,
            tracker.target_data: gt,
            tracker.repeat_data: r,
            tracker.initial_state: LStateList_F_pre,
            tracker.initial_state_Q_noise: LStateList_K_pre,
            tracker.output_keep_prob: 1
        }
        # feed = {tracker._z: mes, tracker.target_data: gt, tracker.initial_state: LStateList_F_pre
        #        , tracker._P_inp: P, tracker._I: I}
        LStateList_F_t,LStateList_K_t,final_output,y = \
            sess.run([tracker.final_state_F,tracker.final_state_K,
                      tracker.final_output,tracker.y], feed)

        tmp_lst = []
        for item in LStateList_F_t:
            tmp_lst.append(item.c)
            tmp_lst.append(item.h)
        LStateList_F_t = tmp_lst

        tmp_lst = []
        for item in LStateList_K_t:
            tmp_lst.append(item.c)
            tmp_lst.append(item.h)
        LStateList_K_t = tmp_lst

        # print(y)
        # print(y.shape)
        # print(final_output.shape)
        if params["normalise_data"] == 3 or params["normalise_data"] == 2:
            final_output = ut.unNormalizeData(final_output, params["y_men"],
                                              params["y_std"])
            y = ut.unNormalizeData(y, params["y_men"], params["y_std"])
        test_loss, n_count = ut.get_loss(params, gt=y, est=final_output)
        total_loss += test_loss * n_count
        total_n_count += n_count
        # if (minibatch_index%show_every==0):
        #     print pre_test+" test batch loss: (%i / %i / %i)  %f"%(e,minibatch_index,n_train_batches,test_loss)
    total_loss = total_loss / total_n_count
    s = pre_test + ' Loss --> epoch %i | error %f' % (e, total_loss)
    ut.log_write(s, params)
    return total_loss
コード例 #2
0
def eval(params):
    batch_size = params['batch_size']
    num_examples = len(params['test_files'][0])
    with tf.Graph().as_default():
        batch = dut.distorted_inputs(params,is_training=is_training)

        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, end_points = vgg.vgg_19(batch[0], num_classes=params['n_output'], is_training=is_training)

        init_fn=ut.get_init_fn(slim,params)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = params['per_process_gpu_memory_fraction']

        with tf.Session(config=config) as sess:
            # sess.run(tf.initialize_all_variables())
            sess.run(tf.initialize_local_variables())
            coord = tf.train.Coordinator()
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))

            init_fn(sess)
            num_iter = int(math.ceil(num_examples / batch_size))
            print('%s: Testing started.' % (datetime.now()))

            step = 0
            loss_lst=[]
            run_lst=[]
            run_lst.append(logits)
            [run_lst.append(lst) for lst in batch[1:len(batch)]]

            while step < num_iter and not coord.should_stop():
                try:
                    batch_res= sess.run(run_lst)
                except tf.errors.OutOfRangeError:
                    print ('Testing finished....%d'%step)
                    break
                if(params['write_est']==True):
                    ut.write_est(params,batch_res)
                est=batch_res[0]
                gt=batch_res[1]
                loss= ut.get_loss(params,gt,est)
                loss_lst.append(loss)
                s ='VAL --> batch %i/%i | error %f'%(step,num_iter,loss)
                ut.log_write(s,params)
                # joint_list=['/'.join(p1.split('/')[0:-1]).replace('joints','img').replace('.cdf','')+'/frame_'+(p1.split('/')[-1].replace('.txt','')).zfill(5)+'.png' for p1 in image_names]
                # print ('List equality check:')
                # print len(label_names) == len(set(label_names))
                # print sum(joint_list==label_names)==(len(est))
                # print(len(label_names))
                step += 1
            coord.request_stop()
            coord.join(threads)
            return np.mean(loss_lst)
コード例 #3
0
def test_data(sess,params,X,Y,index_list,S_list,R_L_list,F_list,e, pre_test,n_batches):
    is_test=1
    dic_state=ut.get_state_list(params)
    I= np.asarray([np.diag([1.0]*params['n_output']) for i in range(params["batch_size"])],dtype=np.float32)
    params["reset_state"]=-1 #Never reset

    state_reset_counter_lst=[0 for i in range(batch_size)]
    total_loss=0.0
    total_pred_loss=0.0
    total_meas_loss=0.0
    total_n_count=0.0
    for minibatch_index in xrange(n_batches):
        state_reset_counter_lst=[s+1 for s in state_reset_counter_lst]
        # print state_reset_counter_lst
        (dic_state,x,y,r,f,_,state_reset_counter_lst,_)= \
            th.prepare_batch(is_test,index_list, minibatch_index, batch_size,
                                       S_list, dic_state, params, Y, X, R_L_list,F_list,state_reset_counter_lst)
        feed=th.get_feed(Model,params,r,x,y,I,dic_state, is_training=0)

        states,final_output,final_pred_output,final_meas_output,y =sess.run([Model.states,Model.final_output,Model.final_pred_output,Model.final_meas_output,Model.y], feed)

        for k in states.keys():
            dic_state[k] = states[k]

        if params["normalise_data"]==3 or params["normalise_data"]==2:
            final_output=ut.unNormalizeData(final_output,params["y_men"],params["y_std"])
            final_pred_output=ut.unNormalizeData(final_pred_output,params["y_men"],params["y_std"])
            final_meas_output=ut.unNormalizeData(final_meas_output,params["x_men"],params["x_std"])
            y=ut.unNormalizeData(y,params["y_men"],params["y_std"])
        if params["normalise_data"]==4:
            final_output=ut.unNormalizeData(final_output,params["x_men"],params["x_std"])
            final_pred_output=ut.unNormalizeData(final_pred_output,params["x_men"],params["x_std"])
            final_meas_output=ut.unNormalizeData(final_meas_output,params["x_men"],params["x_std"])
            y=ut.unNormalizeData(y,params["x_men"],params["x_std"])

        test_loss,n_count=ut.get_loss(params,gt=y,est=final_output,r=r)
        test_pred_loss,n_count=ut.get_loss(params,gt=y,est=final_pred_output,r=r)
        test_meas_loss,n_count=ut.get_loss(params,gt=y,est=final_meas_output,r=r)
        total_loss+=test_loss*n_count
        total_pred_loss+=test_pred_loss*n_count
        total_meas_loss+=test_meas_loss*n_count
        total_n_count+=n_count
        # if (minibatch_index%show_every==0):
        #     print pre_test+" test batch loss: (%i / %i / %i)  %f"%(e,minibatch_index,n_train_batches,test_loss)
    total_loss=total_loss/total_n_count
    total_pred_loss=total_pred_loss/total_n_count
    total_meas_loss=total_meas_loss/total_n_count
    s =pre_test+' Loss --> epoch %i | error %f, %f, %f'%(e,total_loss,total_pred_loss,total_meas_loss)
    ut.log_write(s,params)
    return total_loss
コード例 #4
0
ファイル: train_mlp.py プロジェクト: zhengzh/lstmkf_ICCV2017
def test_data(sess, X, Y, index_list, S_list, R_L_list, F_list, e, pre_test,
              n_batches):
    state_reset_counter_lst = [0 for i in range(batch_size)]
    total_loss = 0.0
    total_losss = 0.0
    total_n_count = 0.0
    total_n_countt = 0.0
    for minibatch_index in xrange(n_batches):
        state_reset_counter_lst = [s + 1 for s in state_reset_counter_lst]
        x = X[minibatch_index * batch_size:(minibatch_index + 1) *
              batch_size]  #60*20*1024
        y = Y[minibatch_index * batch_size:(minibatch_index + 1) *
              batch_size]  #60*20*1024
        feed = {
            model.input_data: x,
            model.target_data: y,
            model.is_training: False,
            model.output_keep_prob: 1.0
        }
        final_output = sess.run([model.final_output], feed)
        final_output = final_output[0]
        test_loss, n_count = ut.get_loss(params, gt=y, est=final_output)
        test_losss, n_countt = ut.get_loss(params, gt=y, est=x)
        total_loss += test_loss * n_count
        total_losss += test_losss * n_countt
        total_n_count += n_count
        total_n_countt += n_countt
        if (minibatch_index % show_every == 0):
            print pre_test + " test batch loss: (%i / %i / %i)  %f" % (
                e, minibatch_index, n_train_batches, test_loss)
    total_loss = total_loss / total_n_count
    total_losss = total_losss / total_n_countt
    s = pre_test + ' Loss --> epoch %i | error %f, %f' % (e, total_loss,
                                                          total_losss)
    ut.log_write(s, params)
    return total_loss
コード例 #5
0
def train_rnn(params):
    rng = RandomStreams(seed=1234)
    (X_train, Y_train, S_Train_list, F_list_train, G_list_train, X_test,
     Y_test, S_Test_list, F_list_test, G_list_test) = du.load_pose(params)
    params["len_train"] = len(X_train)
    params["len_test"] = len(X_test)
    u.start_log(params)
    batch_size = params['batch_size']

    n_train_batches = params["len_train"]
    n_train_batches /= batch_size

    n_test_batches = params["len_test"]
    n_test_batches /= batch_size

    nb_epochs = params['n_epochs']

    print("Batch size: %i, train batch size: %i, test batch size: %i" %
          (batch_size, n_train_batches, n_test_batches))
    u.log_write("Model build started", params)
    if params['run_mode'] == 1:
        model = model_provider.get_model_pretrained(params, rng)
        u.log_write("Pretrained loaded: %s" % (params['mfile']), params)
    else:
        model = model_provider.get_model(params, rng)
    u.log_write("Number of parameters: %s" % (model.n_param), params)
    train_errors = np.ndarray(nb_epochs)
    u.log_write("Training started", params)
    val_counter = 0
    best_loss = 1000
    for epoch_counter in range(nb_epochs):
        batch_loss = 0.
        sid = 0
        is_train = 1
        for minibatch_index in range(n_train_batches):
            x = X_train[minibatch_index * batch_size:(minibatch_index + 1) *
                        batch_size]  #60*20*1024
            y = Y_train[minibatch_index * batch_size:(minibatch_index + 1) *
                        batch_size]  #60*20*1024
            if (params["model"] == "blstmnp"):
                x_b = np.asarray(map(np.flipud, x))
                loss = model.train(x, x_b, y)
            else:
                loss = model.train(x, y, is_train)
            batch_loss += loss
        if params['shufle_data'] == 1:
            X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train)
        train_errors[epoch_counter] = batch_loss
        batch_loss /= n_train_batches
        s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss)
        u.log_write(s, params)
        if (epoch_counter % 3 == 0):
            print("Model testing")
            batch_loss3d = []
            is_train = 0
            for minibatch_index in range(n_test_batches):
                x = X_test[minibatch_index * batch_size:(minibatch_index + 1) *
                           batch_size]  #60*20*1024
                y = Y_test[minibatch_index * batch_size:(minibatch_index + 1) *
                           batch_size]  #60*20*1024
                pred = model.predictions(x, is_train)
                loss3d = u.get_loss(params, y, pred)
                batch_loss3d.append(loss3d)
            batch_loss3d = np.nanmean(batch_loss3d)
            if (batch_loss3d < best_loss):
                best_loss = batch_loss3d
                ext = str(epoch_counter) + "_" + str(batch_loss3d) + "_best.p"
                u.write_params(model.params, params, ext)
            else:
                ext = str(val_counter % 2) + ".p"
                u.write_params(model.params, params, ext)

            val_counter += 1  #0.08
            s = 'VAL--> epoch %i | error %f, %f' % (val_counter, batch_loss3d,
                                                    n_test_batches)
            u.log_write(s, params)
コード例 #6
0
def train():
    model = Model(params)
    num_epochs = 1000
    decay_rate = 0.4
    show_every = 100

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = params[
        'per_process_gpu_memory_fraction']
    with tf.Session(config=config) as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())
        merged = tf.summary.merge_all()
        summary_writer = tf.train.SummaryWriter(params["sm"], sess.graph)

        for e in xrange(num_epochs):
            sess.run(tf.assign(model.lr, params['lr'] * (decay_rate**e)))
            LStateList_t = [
                np.zeros(shape=(batch_size, params['n_hidden']),
                         dtype=np.float32) for i in range(params['nlayer'] * 2)
            ]  # initial hidden state
            LStateList_pre = [
                np.zeros(shape=(batch_size, params['n_hidden']),
                         dtype=np.float32) for i in range(params['nlayer'] * 2)
            ]  # initial hidden sta
            state_reset_counter_lst = [0 for i in range(batch_size)]
            total_train_loss = 0
            for minibatch_index in xrange(n_train_batches):
                start = time.time()
                state_reset_counter_lst = [
                    s + 1 for s in state_reset_counter_lst
                ]
                (LStateList_b, x, y,
                 state_reset_counter_lst) = dut.prepare_lstm_batch_joints(
                     index_train_list, minibatch_index, batch_size,
                     S_Train_list, LStateList_t, LStateList_pre, params,
                     F_names_training, state_reset_counter_lst)
                LStateList_pre = LStateList_b

                y = y.reshape(batch_size * params["seq_length"],
                              params["n_output"])
                feed = {
                    model.input_data: x,
                    model.input_zero: np.ceil(x),
                    model.target_data: y,
                    model.initial_state: LStateList_b,
                    model.is_training: True,
                    model.output_keep_prob: 0.5
                }
                summary,train_loss, LStateList_t,_ =\
                    sess.run([merged,model.cost, model.final_state, model.train_op], feed)
                summary_writer.add_summary(summary, minibatch_index)
                tmp_lst = []
                for item in LStateList_t:
                    tmp_lst.append(item.c)
                    tmp_lst.append(item.h)
                LStateList_t = tmp_lst
                total_train_loss += train_loss
                if (minibatch_index % show_every == 0):
                    print "Training batch loss: (%i / %i / %i)  %f" % (
                        e, minibatch_index, n_train_batches, train_loss)

            total_train_loss = total_train_loss / n_train_batches
            s = 'TRAIN --> epoch %i | error %f' % (e, total_train_loss)
            ut.log_write(s, params)

            LStateList_t = [
                np.zeros(shape=(batch_size, params['n_hidden']),
                         dtype=np.float32) for i in range(params['nlayer'] * 2)
            ]  # initial hidden state
            LStateList_pre = [
                np.zeros(shape=(batch_size, params['n_hidden']),
                         dtype=np.float32) for i in range(params['nlayer'] * 2)
            ]  # initial hidden sta
            state_reset_counter_lst = [0 for i in range(batch_size)]
            total_test_loss = 0
            for minibatch_index in xrange(n_test_batches):
                state_reset_counter_lst = [
                    s + 1 for s in state_reset_counter_lst
                ]
                (LStateList_b, x, y,
                 state_reset_counter_lst) = dut.prepare_lstm_batch(
                     index_test_list, minibatch_index, batch_size, S_Test_list,
                     LStateList_t, LStateList_pre, params, Y_test, X_test,
                     state_reset_counter_lst)
                LStateList_pre = LStateList_b
                y = y.reshape(batch_size * params["seq_length"],
                              params["n_output"])
                feed = {
                    model.input_data: x,
                    model.target_data: y,
                    model.initial_state: LStateList_b,
                    model.is_training: False,
                    model.output_keep_prob: 1.0
                }
                LStateList_t, final_output = sess.run(
                    [model.final_state, model.final_output], feed)
                test_loss = ut.get_loss(params, gt=y, est=final_output)
                tmp_lst = []
                for item in LStateList_t:
                    tmp_lst.append(item.c)
                    tmp_lst.append(item.h)
                LStateList_t = tmp_lst
                total_test_loss += test_loss
                if (minibatch_index % show_every == 0):
                    print "Test batch loss: (%i / %i / %i)  %f" % (
                        e, minibatch_index, n_test_batches, test_loss)
            total_test_loss = total_test_loss / n_test_batches
            print "Total test loss %f" % total_test_loss
            s = 'VAL --> epoch %i | error %f' % (e, total_test_loss)
            ut.log_write(s, params)
コード例 #7
0
(F_names_training, S_Train_list, F_names_test,
 S_Test_list) = dut.prepare_training_set_fnames(params)
index_train_list, S_Train_list = dut.get_seq_indexes(params, S_Train_list)
index_test_list, S_Test_list = dut.get_seq_indexes(params, S_Test_list)

batch_size = params['batch_size']
n_train_batches = len(index_train_list)
n_train_batches /= batch_size

n_test_batches = len(index_test_list)
n_test_batches /= batch_size

params['training_size'] = len(F_names_training) * params['seq_length']
params['test_size'] = len(F_names_test) * params['seq_length']
ut.start_log(params)
ut.log_write("Model training started", params)
# summary_writer = tf.train.SummaryWriter(params["sm"])


def train():
    model = Model(params)
    num_epochs = 1000
    decay_rate = 0.4
    show_every = 100

    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = params[
        'per_process_gpu_memory_fraction']
    with tf.Session(config=config) as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())
コード例 #8
0
ファイル: train_cnn.py プロジェクト: Seleucia/v3d
def train_rnn(params):
   rng = RandomStreams(seed=1234)
   (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params)
   F_list_train,Y_train=du.shuffle_in_unison_inplace(F_list_train,Y_train)
   params["len_train"]=len(F_list_train)
   params["len_test"]=len(F_list_test)
   u.start_log(params)
   batch_size=params['batch_size']

   n_train_batches = len(F_list_train)
   n_train_batches /= batch_size

   n_test_batches = len(F_list_test)
   n_test_batches /= batch_size

   nb_epochs=params['n_epochs']

   print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches))
   u.log_write("Model build started",params)
   if params['run_mode']==1:
      model= model_provider.get_model_pretrained(params,rng)
      u.log_write("Pretrained loaded: %s"%(params['mfile']),params)
   else:
     model= model_provider.get_model(params,rng)
   u.log_write("Number of parameters: %s"%(model.n_param),params)
   train_errors = np.ndarray(nb_epochs)
   u.log_write("Training started",params)
   val_counter=0
   best_loss=1000
   for epoch_counter in range(nb_epochs):
      batch_loss = 0.
      is_train=1
      for minibatch_index in range(n_train_batches):
          if(minibatch_index==0):
              x,y=du.prepare_cnn_batch(minibatch_index, batch_size, F_list_train, Y_train)
          pool = ThreadPool(processes=2)
          async_t = pool.apply_async(model.train, (x, y,is_train))
          async_b = pool.apply_async(du.prepare_cnn_batch, (minibatch_index, batch_size, F_list_train, Y_train))
          pool.close()
          pool.join()
          loss = async_t.get()  # get the return value from your function.
          x=[]
          y=[]
          (x,y) = async_b.get()  # get the return value from your function.

          if(minibatch_index==n_train_batches-1):
              loss= model.train(x, y,is_train)

          batch_loss += loss
      if params['shufle_data']==1:
         F_list_train,Y_train=du.shuffle_in_unison_inplace(F_list_train,Y_train)
      train_errors[epoch_counter] = batch_loss
      batch_loss/=n_train_batches
      s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss)
      u.log_write(s,params)
      if(epoch_counter%1==0):
          print("Model testing")
          batch_loss3d = []
          is_train=0
          x=[]
          y=[]
          for minibatch_index in range(n_test_batches):
              if(minibatch_index==0):
                  x,y=du.prepare_cnn_batch(minibatch_index, batch_size, F_list_test, Y_test)
              pool = ThreadPool(processes=2)
              async_t = pool.apply_async(model.predictions, (x,is_train))
              async_b = pool.apply_async(du.prepare_cnn_batch, (minibatch_index, batch_size, F_list_test, Y_test))
              pool.close()
              pool.join()
              pred = async_t.get()  # get the return value from your function.
              loss3d =np.mean(np.linalg.norm((np.asarray(pred) - y)))
              x=[]
              y=[]
              batch_loss3d.append(loss3d)
              (x,y) = async_b.get()  # get the return value from your function.

              if(minibatch_index==n_train_batches-1):
                  pred= model.predictions(x,is_train)
                  loss3d =np.mean(np.linalg.norm((np.asarray(pred) - y)))
                  batch_loss3d.append(loss3d)

          batch_loss3d=np.nanmean(batch_loss3d)
          if(batch_loss3d<best_loss):
             best_loss=batch_loss3d
             ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p"
             u.write_params(model.params,params,ext)
          else:
              ext=str(val_counter%2)+".p"
              u.write_params(model.params,params,ext)

          val_counter+=1#0.08
          s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches)
          u.log_write(s,params)
コード例 #9
0
def train():
    batch_size = params["batch_size"]
    num_epochs = 1000
    decay_rate = 0.5
    show_every = 100
    deca_start = 2
    with tf.Session(config=gpu_config) as sess:
        tf.global_variables_initializer().run()
        # sess.run(tracker.predict())
        print 'Training Noise KLSTM'
        noise_std = params['noise_std']
        new_noise_std = 0.0
        for e in range(num_epochs):
            if e > (deca_start - 1):
                sess.run(
                    tf.assign(
                        tracker.lr,
                        params['lr'] * (decay_rate**((e - deca_start) / 2))))
            else:
                sess.run(tf.assign(tracker.lr, params['lr']))
            total_train_loss = 0
            LStateList_F_t = ut.get_zero_state(params)
            LStateList_F_pre = ut.get_zero_state(params)
            LStateList_K_t = ut.get_zero_state(params, t='K')
            LStateList_K_pre = ut.get_zero_state(params, t='K')
            state_reset_counter_lst = [0 for i in range(batch_size)]
            index_train_list_s = index_train_list
            if params["shufle_data"] == 1 and params['reset_state'] == 1:
                index_train_list_s = ut.shufle_data(index_train_list)

            for minibatch_index in xrange(n_train_batches):
                state_reset_counter_lst = [
                    s + 1 for s in state_reset_counter_lst
                ]
                (LStateList_F_pre,LStateList_K_pre,_,x,y,r,f,state_reset_counter_lst)=\
                    dut.prepare_kfl_QRFf_batch(index_train_list_s, minibatch_index, batch_size,
                                               S_Train_list, LStateList_F_t, LStateList_F_pre, LStateList_K_t, LStateList_K_pre,
                                               None, None, params, Y_train, X_train, R_L_Train_list,F_list_train,state_reset_counter_lst)
                if noise_std > 0.0:
                    u_cnt = e * n_train_batches + minibatch_index
                    if u_cnt in params['noise_schedule']:
                        new_noise_std = noise_std * (
                            u_cnt / (params['noise_schedule'][0]))
                        s = 'NOISE --> u_cnt %i | error %f' % (u_cnt,
                                                               new_noise_std)
                        ut.log_write(s, params)
                    if new_noise_std > 0.0:
                        noise = np.random.normal(0.0, new_noise_std, x.shape)
                        x = noise + x

                gt = y
                mes = x
                feed = {
                    tracker._z: mes,
                    tracker.target_data: gt,
                    tracker.repeat_data: r,
                    tracker.initial_state: LStateList_F_pre,
                    tracker.initial_state_K: LStateList_K_pre,
                    tracker.output_keep_prob: params['rnn_keep_prob']
                }
                # feed = {tracker._z: mes, tracker.target_data: gt, tracker.initial_state: LStateList_F_pre
                #        , tracker._P_inp: P, tracker._I: I}
                train_loss,LStateList_F_t,LStateList_K_t,_ = \
                    sess.run([tracker.cost,tracker.final_state_F,tracker.final_state_Q,
                              tracker.train_op], feed)

                tmp_lst = []
                for item in LStateList_F_t:
                    tmp_lst.append(item.c)
                    tmp_lst.append(item.h)
                LStateList_F_t = tmp_lst

                tmp_lst = []
                for item in LStateList_K_t:
                    tmp_lst.append(item.c)
                    tmp_lst.append(item.h)
                LStateList_K_t = tmp_lst

                total_train_loss += train_loss
                if (minibatch_index % show_every == 0):
                    print "Training batch loss: (%i / %i / %i)  %f" % (
                        e, minibatch_index, n_train_batches, train_loss)

            total_train_loss = total_train_loss / n_train_batches
            s = 'TRAIN --> epoch %i | error %f' % (e, total_train_loss)
            ut.log_write(s, params)
            pre_test = "TEST_Data"
            total_loss = test_data(sess, X_test, Y_test, index_test_list,
                                   S_Test_list, R_L_Test_list, F_list_test, e,
                                   pre_test, n_test_batches)
コード例 #10
0
ファイル: train.py プロジェクト: Seleucia/CNNRNet
def train_model(params):
  rn_id=params["rn_id"]
  im_type=params["im_type"]
  batch_size =params["batch_size"]
  n_epochs =params["n_epochs"]

  datasets = data_loader.load_data(params)
  utils.start_log(datasets,params)

  X_train, y_train,overlaps_train = datasets[0]
  X_val, y_val,overlaps_val = datasets[1]
  X_test, y_test,overlaps_test = datasets[2]

  # compute number of minibatches for training, validation and testing
  n_train_batches = len(X_train)
  n_valid_batches = len(X_val)
  n_test_batches = len(X_test)
  n_train_batches /= batch_size
  n_valid_batches /= batch_size
  n_test_batches /= batch_size

  y_val_mean=np.mean(y_val)
  y_val_abs_mean=np.mean(np.abs(y_val))

  utils.log_write("Model build started",params)
  model=  model_provider.get_model(params)
  utils.log_write("Number of parameters: %s"%(model.count_params()),params)
  run_mode=params["run_mode"]
  utils.log_write("Model build ended",params)
  utils.log_write("Training started",params)
  best_validation_loss=np.inf
  epoch_counter = 0
  n_patch=params["n_patch"]
  n_repeat=params["n_repeat"]#instead of extracting many batches for each epoch, we are repeating epoch since we are ensuring that output changes for each patch
  while (epoch_counter < n_epochs):
      epoch_counter = epoch_counter + 1
      print("Training model...")
      map_list=range(n_train_batches*n_repeat)
      random.shuffle(map_list)
      for index in xrange(n_train_batches*n_repeat):
          minibatch_index=index%n_train_batches
          map_index=map_list[index]%n_repeat
          #We are shuffling data at each batch, we are shufling here because we already finished one epoch just repeating for the extract different batch
          if(index>0 and minibatch_index==0):#we are checking weather we finish all dataset
             ext=params["model_file"]+params["model"]+"_"+im_type+"_m_"+str(index%5)+".hdf5"
             model.save_weights(ext, overwrite=True)
             X_train,y_train=dt_utils.shuffle_in_unison_inplace(X_train,y_train)

          iter = (epoch_counter - 1) * n_train_batches + index
          if iter % 100 == 0:
              print 'training @ iter = ', iter
          batch_loss=0
          Fx = X_train[minibatch_index * batch_size: (minibatch_index + 1) * batch_size]
          data_y = y_train[minibatch_index * batch_size: (minibatch_index + 1) * batch_size]
          for patch_index in xrange(n_patch):
             patch_loc=utils.get_patch_loc(params)
             argu= [(params,"F", Fx,patch_loc,map_index),(params,"S", Fx,patch_loc,map_index)]
             results = dt_utils.asyn_load_batch_images(argu)
             data_Fx = results[0]
             data_Sx = results[1]
             if(params["model_type"]==4):
                data=data_Sx-data_Fx
                loss =model.train_on_batch(data, data_y)
             else:
                loss =model.train_on_batch([data_Fx, data_Sx], data_y)
             if isinstance(loss,list):
                batch_loss+=loss[0]
             else:
                batch_loss+=loss

          batch_loss/=n_patch
          s='TRAIN--> epoch %i | batch_index %i/%i | error %f'%(epoch_counter, index + 1, n_train_batches*n_repeat,  batch_loss)
          utils.log_write(s,params)
          if(run_mode==1):
              break
      #we are shufling for to be sure
      X_train,y_train=dt_utils.shuffle_in_unison_inplace(X_train,y_train)
      ext=params["model_file"]+params["model"]+"_"+im_type+"_e_"+str(rn_id)+"_"+str(epoch_counter % 10)+".hdf5"
      model.save_weights(ext, overwrite=True)
      if params['validate']==0:
         print("Validation skipped...")
         if(run_mode==1):
              break
         continue
      print("Validating model...")
      this_validation_loss = 0
      map_list=range(n_valid_batches*n_repeat)
      random.shuffle(map_list)
      for index in xrange(n_valid_batches*n_repeat):
         i = index%n_valid_batches
         map_index=map_list[index]%n_repeat
         epoch_loss=0
         Fx = X_val[i * batch_size: (i + 1) * batch_size]
         data_y = y_val[i * batch_size: (i + 1) * batch_size]
         for patch_index in xrange(n_patch):
            patch_loc=utils.get_patch_loc(params)
            argu= [(params,"F", Fx,patch_loc,map_index),(params,"S", Fx,patch_loc,map_index)]
            results = dt_utils.asyn_load_batch_images(argu)
            data_Fx = results[0]
            data_Sx = results[1]
            if(params["model_type"]==4):
                data=data_Sx-data_Fx
                loss =model.test_on_batch(data, data_y)
            else:
                loss= model.test_on_batch([data_Fx, data_Sx],data_y)

            if isinstance(loss,list):
                epoch_loss+=loss[0]
            else:
               epoch_loss+=loss
         epoch_loss/=n_patch
         this_validation_loss +=epoch_loss
         if(run_mode==1):
              break
      this_validation_loss /= (n_valid_batches*n_repeat)
      s ='VAL--> epoch %i | error %f | data mean/abs %f/%f'%(epoch_counter, this_validation_loss,y_val_mean,y_val_abs_mean)
      utils.log_write(s,params)
      if this_validation_loss < best_validation_loss:
          best_validation_loss = this_validation_loss
          ext=params["model_file"]+params["model"]+"_"+im_type+"_"+"_best_"+str(rn_id)+"_"+str(epoch_counter)+".hdf5"
          model.save_weights(ext, overwrite=True)
      if(run_mode==1):
              break
  utils.log_write("Training ended",params)
コード例 #11
0
ファイル: trainV2.py プロジェクト: nagyist/RNNPose
def train_rnn(params):
    data = []
    for sindex in range(0, params['seq_length'], 5):
        (X_train, Y_train, X_test, Y_test) = du.load_pose(params,
                                                          sindex=sindex)
        data.append((X_train, Y_train, X_test, Y_test))
    (X_train, Y_train, X_test, Y_test) = data[0]
    params["len_train"] = X_train.shape[0] * X_train.shape[1]
    params["len_test"] = X_test.shape[0] * X_test.shape[1]
    u.start_log(params)
    batch_size = params['batch_size']
    n_train_batches = len(X_train)
    n_train_batches /= batch_size

    n_test_batches = len(X_test)
    n_test_batches /= batch_size

    nb_epochs = params['n_epochs']

    print("Batch size: %i, train batch size: %i, test batch size: %i" %
          (batch_size, n_train_batches, n_test_batches))
    u.log_write("Model build started", params)
    if params['resume'] == 1:
        model = model_provider.get_model_pretrained(params)
    else:
        model = model_provider.get_model(params)
    u.log_write("Number of parameters: %s" % (model.n_param), params)
    train_errors = np.ndarray(nb_epochs)
    u.log_write("Training started", params)
    val_counter = 0
    best_loss = 10000
    for epoch_counter in range(nb_epochs):
        (X_train, Y_train, X_test,
         Y_test) = data[np.mod(epoch_counter, len(data))]
        if params['shufle_data'] == 1:
            X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train)
        n_train_batches = len(X_train)
        n_train_batches /= batch_size

        n_test_batches = len(X_test)
        n_test_batches /= batch_size
        batch_loss = 0.
        for minibatch_index in range(n_train_batches):
            x = X_train[minibatch_index * batch_size:(minibatch_index + 1) *
                        batch_size]  #60*20*1024
            y = Y_train[minibatch_index * batch_size:(minibatch_index + 1) *
                        batch_size]  #60*20*54
            if (params["model"] == "blstmnp"):
                x_b = np.asarray(map(np.flipud, x))
                loss = model.train(x, x_b, y)
            else:
                loss = model.train(x, y)
            batch_loss += loss
        train_errors[epoch_counter] = batch_loss
        batch_loss /= n_train_batches
        s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss)
        u.log_write(s, params)
        if (epoch_counter % 10 == 0):
            print("Model testing")
            batch_loss = 0.
            batch_loss3d = 0.
            for minibatch_index in range(n_test_batches):
                x = X_test[minibatch_index * batch_size:(minibatch_index + 1) *
                           batch_size]
                y = Y_test[minibatch_index * batch_size:(minibatch_index + 1) *
                           batch_size]
                if (params["model"] == "blstmnp"):
                    x_b = np.asarray(map(np.flipud, x))
                    pred = model.predictions(x, x_b)
                else:
                    pred = model.predictions(x)

                loss = np.nanmean(np.abs(pred - y)**2)
                loss3d = u.get_loss(y, pred)
                batch_loss += loss
                batch_loss3d += loss3d
            batch_loss /= n_test_batches
            batch_loss3d /= n_test_batches
            if (batch_loss3d < best_loss):
                best_loss = batch_loss3d
                ext = str(batch_loss3d) + "_best.p"
                u.write_params(model.params, params, ext)
            else:
                ext = str(val_counter % 2) + ".p"
                u.write_params(model.params, params, ext)

            val_counter += 1  #0.08
            s = 'VAL--> epoch %i | error %f, %f %f' % (
                val_counter, batch_loss, batch_loss3d, n_test_batches)
            u.log_write(s, params)
コード例 #12
0
ファイル: test.py プロジェクト: zhengzh/lstmkf_ICCV2017
for tr in lst_bool:
    loss = 0.
    total_cnt = 0.
    for action in lst_action:
        params["action"] = action
        params['test_files'] = dt.load_files(params, is_training=tr)
        test_loss = inception_output.eval(params)
        cnt = len(params['test_files'][0])
        loss = loss + test_loss * cnt
        total_cnt = total_cnt + cnt
        if tr == False:
            s = 'TEST Set --> Action: %s, Frame Count: %i Final error %f' % (
                action, cnt, test_loss)
        else:
            s = 'Train Set --> Action: %s, Frame Count: %i Final error %f' % (
                action, cnt, test_loss)
        ut.log_write(s, params)

    loss = loss / total_cnt
    if tr == False:
        s = 'Total Test Frame Count: %i Final error %f' % (total_cnt, loss)
    else:
        s = 'Total Training Frame Count: %i Final error %f' % (total_cnt, loss)
    ut.log_write(s, params)

# ut.start_log(params)
# ut.log_write("Model testing started",params)
# test_loss=inception_eval.eval(params)
# s ='VAL --> Final error %f'%(test_loss)
# ut.log_write(s,params)
コード例 #13
0
ファイル: train_slam.py プロジェクト: zhengzh/lstmkf_ICCV2017
def train(tracker, params):
    I = np.asarray([
        np.diag([1.0] * params['n_output'])
        for i in range(params["batch_size"])
    ],
                   dtype=np.float32)

    batch_size = params["batch_size"]

    decay_rate = 0.95
    # show_every=100
    deca_start = 10
    # pre_best_loss=10000
    with tf.Session(config=gpu_config) as sess:
        tf.global_variables_initializer().run()
        saver = tf.train.Saver()
        # sess.run(tracker.predict())
        print 'Training model:' + params["model"]
        noise_std = params['noise_std']
        new_noise_std = 0.0
        median_result_lst = []
        mean_result_lst = []
        for e in range(num_epochs):
            if e == 2:
                params['lr'] = params['lr']
            if e > (deca_start - 1):
                sess.run(
                    tf.assign(tracker.lr, params['lr'] * (decay_rate**(e))))
            else:
                sess.run(tf.assign(tracker.lr, params['lr']))
            total_train_loss = 0

            state_reset_counter_lst = [0 for i in range(batch_size)]
            index_train_list_s = index_train_list
            dic_state = ut.get_state_list(params)
            if params["shufle_data"] == 1 and params['reset_state'] == 1:
                index_train_list_s = ut.shufle_data(index_train_list)

            for minibatch_index in xrange(n_train_batches):
                is_test = 0
                state_reset_counter_lst = [
                    s + 1 for s in state_reset_counter_lst
                ]
                (dic_state,x,y,r,f,_,state_reset_counter_lst,_)= \
                    th.prepare_batch(is_test,index_train_list_s, minibatch_index, batch_size,
                                       S_Train_list, dic_state, params, Y_train, X_train, R_L_Train_list,F_list_train,state_reset_counter_lst)
                if noise_std > 0.0:
                    u_cnt = e * n_train_batches + minibatch_index
                    if u_cnt in params['noise_schedule']:
                        new_noise_std = noise_std * (
                            u_cnt / (params['noise_schedule'][0]))
                        s = 'NOISE --> u_cnt %i | error %f' % (u_cnt,
                                                               new_noise_std)
                        ut.log_write(s, params)
                    if new_noise_std > 0.0:
                        noise = np.random.normal(0.0, new_noise_std, x.shape)
                        x = noise + x

                feed = th.get_feed(tracker,
                                   params,
                                   r,
                                   x,
                                   y,
                                   I,
                                   dic_state,
                                   is_training=1)
                train_loss, states, _ = sess.run(
                    [tracker.cost, tracker.states, tracker.train_op], feed)
                # print last_pred.shape
                # print states.shape

                for k in states.keys():
                    dic_state[k] = states[k]

                total_train_loss += train_loss
            # if e%5==0:
            #         print total_train_loss
            pre_test = "TEST_Data"
            total_loss, median_result, mean_result, final_output_lst, file_lst, noise_lst = test_data(
                sess, params, X_test, Y_test, index_test_list, S_Test_list,
                R_L_Test_list, F_list_test, e, pre_test, n_test_batches)
            if len(full_median_result_lst) > 1:
                if median_result[0] < np.min(full_median_result_lst,
                                             axis=0)[0]:
                    # ut.write_slam_est(est_file=params["est_file"],est=final_output_lst,file_names=file_lst)
                    #     ut.write_slam_est(est_file=params["noise_file"],est=noise_lst,file_names=file_lst)
                    #     save_path=params["cp_file"]+params['msg']
                    # saver.save(sess,save_path)
                    print 'Writing estimations....'

            full_median_result_lst.append(median_result)
            median_result_lst.append(median_result)
            mean_result_lst.append(mean_result)
            # base_cp_path = params["cp_file"] + "/"
            #
            # lss_str = '%.5f' % total_loss
            # model_name = lss_str + "_" + str(e) + "_" + str(params["rn_id"]) + params["model"] + "_model.ckpt"
            # save_path = base_cp_path + model_name
            # saved_path = False
            # if pre_best_loss > total_loss:
            #     pre_best_loss = total_loss
            #     model_name = lss_str + "_" + str(e) + "_" + str(params["rn_id"]) + params["model"] + "_best_model.ckpt"
            #     save_path = base_cp_path + model_name
            #     saved_path = saver.save(sess, save_path)
            # else:
            #     if e % 3.0 == 0:
            #         saved_path = saver.save(sess, save_path)
            # if saved_path != "":
            #     s = 'MODEL_Saved --> epoch %i | error %f path %s' % (e, total_loss, saved_path)
            #     ut.log_write(s, params)
    return median_result_lst, mean_result_lst
コード例 #14
0
ファイル: pred_human.py プロジェクト: zhengzh/lstmkf_ICCV2017
def test_data(sess, params, X, Y, index_list, S_list, R_L_list, F_list, e,
              pre_test, n_batches):
    dic_state = ut.get_state_list(params)
    I = np.asarray([
        np.diag([1.0] * params['n_output'])
        for i in range(params["batch_size"])
    ],
                   dtype=np.float32)
    is_test = 1

    state_reset_counter_lst = [0 for i in range(batch_size)]
    total_loss = 0.0
    total_n_count = 0.0
    for minibatch_index in xrange(n_batches):
        state_reset_counter_lst = [s + 1 for s in state_reset_counter_lst]
        (dic_state,x,y,r,f,_,state_reset_counter_lst,_)= \
            th.prepare_batch(is_test,index_list, minibatch_index, batch_size,
                                       S_list, dic_state, params, Y, X, R_L_list,F_list,state_reset_counter_lst)
        feed = th.get_feed(tracker,
                           params,
                           r,
                           x,
                           y,
                           I,
                           dic_state,
                           is_training=0)

        if mode == 'klstm':
            states,final_output,final_pred_output,final_meas_output,q_mat,r_mat,k_mat,y =\
                sess.run([tracker.states,tracker.final_output,tracker.final_pred_output,tracker.final_meas_output,
                      tracker.final_q_output,tracker.final_r_output,tracker.final_k_output,tracker.y], feed)
        else:
            states, final_output, y = \
                sess.run([tracker.states, tracker.final_output, tracker.y], feed)

        for k in states.keys():
            dic_state[k] = states[k]

        if params["normalise_data"] == 3 or params["normalise_data"] == 2:
            final_output = ut.unNormalizeData(final_output, params["y_men"],
                                              params["y_std"])
            y = ut.unNormalizeData(y, params["y_men"], params["y_std"])

        if params["normalise_data"] == 4:
            final_output = ut.unNormalizeData(final_output, params["x_men"],
                                              params["x_std"])
            y = ut.unNormalizeData(y, params["x_men"], params["x_std"])
            if mode == 'klstm':
                final_pred_output = ut.unNormalizeData(final_pred_output,
                                                       params["x_men"],
                                                       params["x_std"])
                final_meas_output = ut.unNormalizeData(final_meas_output,
                                                       params["x_men"],
                                                       params["x_std"])

        test_loss, n_count = ut.get_loss(params,
                                         gt=y,
                                         est=final_output,
                                         r=None)
        f = f.reshape((-1, 2))
        y_f = y.reshape(final_output.shape)
        r = r.flatten()
        fnames = f[np.nonzero(r)]
        # e=final_output[np.nonzero(r)]
        if mode == 'klstm':
            ut.write_est(est_file=params["est_file"] + "/kal_est/",
                         est=final_output,
                         file_names=fnames)
            ut.write_est(est_file=params["est_file"] + "/kal_est_dif/",
                         est=np.abs(final_output - y_f),
                         file_names=fnames)
            ut.write_est(est_file=params["est_file"] + "/kal_pred/",
                         est=final_pred_output,
                         file_names=fnames)
            ut.write_est(est_file=params["est_file"] + "/kal_pred_dif/",
                         est=np.abs(final_pred_output - y_f),
                         file_names=fnames)
            ut.write_est(est_file=params["est_file"] + "/meas/",
                         est=final_meas_output,
                         file_names=fnames)
            ut.write_est(est_file=params["est_file"] + "/q_mat/",
                         est=q_mat,
                         file_names=fnames)
            ut.write_est(est_file=params["est_file"] + "/r_mat/",
                         est=r_mat,
                         file_names=fnames)
            ut.write_est(est_file=params["est_file"] + "/k_mat/",
                         est=k_mat,
                         file_names=fnames)
            ut.write_est(est_file=params["est_file"] + "/y_f/",
                         est=y_f,
                         file_names=fnames)
        else:
            ut.write_est(est_file=params["est_file"],
                         est=final_output,
                         file_names=fnames)
        # print test/_loss
        total_loss += test_loss * n_count

        total_n_count += n_count
        print total_loss / total_n_count
        # if (minibatch_index%show_every==0):
        #     print pre_test+" test batch loss: (%i / %i / %i)  %f"%(e,minibatch_index,n_train_batches,test_loss)
    total_loss = total_loss / total_n_count
    s = pre_test + ' Loss --> epoch %i | error %f' % (e, total_loss)
    ut.log_write(s, params)
    return total_loss
コード例 #15
0
params['write_est'] = True
params["ds_training"] = "crop350"
params["ds_test"] = "crop350"
(trainer, evaller, params) = model_provider.get_model(params)
#Number of steps per epoch
params['training_files'] = dt.load_files(params, is_training=True)
# params['training_files']=([],[])
params['test_files'] = dt.load_files(params, is_training=False)

# params['run_mode']=0 #Load previuesly trained model
if args.mode == 3:
    assert params['model_file'] != ""
    params['model_file'] = args.model_file
    params['run_mode'] = 3
    ut.start_log(params)
    ut.log_write("Testing given model", params)
    params["batch_size"] = 100
    params["est_file"] = params["est_file"] + str(
        args.model_file.split('/')[-1]) + '/'
    test_loss = evaller.eval(params)
    s = 'VAL --> Model %s | error %f' % (args.model_file.split('/')[-1],
                                         test_loss)
    ut.log_write(s, params)

elif args.mode == 1:
    ut.start_log(params)
    for epoch_counter in range(args.epoch_counter_start, 100):
        params["sm"] = params["sm"] + '/' + ut.get_time()
        # if epoch_counter> 0:.
        params['run_mode'] = 2
        params["batch_size"] = 30
コード例 #16
0
ファイル: train_mlp.py プロジェクト: zhengzh/lstmkf_ICCV2017
def train(X_train, Y_train, X_test, Y_test):
    num_epochs = 1000
    decay_rate = 0.5
    pre_best_loss = 10000.0

    # config = tf.ConfigProto(device_count = {'GPU': 0})
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.95
    with tf.Session(config=config) as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver()
        merged = tf.merge_all_summaries()
        summary_writer = tf.train.SummaryWriter(params["sm"], sess.graph)
        for e in xrange(num_epochs):
            X_train, Y_train = ut.unison_shuffled_copies(X_train, Y_train)
            if e > 1:
                sess.run(
                    tf.assign(model.lr, params['lr'] * (decay_rate**(e - 1))))
            else:
                sess.run(tf.assign(model.lr, params['lr']))
            state_reset_counter_lst = [0 for i in range(batch_size)]
            total_train_loss = 0
            for minibatch_index in xrange(n_train_batches):
                start = time.time()
                state_reset_counter_lst = [
                    s + 1 for s in state_reset_counter_lst
                ]
                x = X_train[minibatch_index *
                            batch_size:(minibatch_index + 1) *
                            batch_size]  #60*20*1024
                y = Y_train[minibatch_index *
                            batch_size:(minibatch_index + 1) *
                            batch_size]  #60*20*1024
                feed = {
                    model.input_data: x,
                    model.target_data: y,
                    model.is_training: True,
                    model.output_keep_prob: 0.8
                }
                summary,train_loss,_ =\
                    sess.run([merged,model.cost, model.train_op], feed)
                summary_writer.add_summary(summary, minibatch_index)
                total_train_loss += train_loss
                if (minibatch_index % show_every == 0):
                    print "Training batch loss: (%i / %i / %i)  %f" % (
                        e, minibatch_index, n_train_batches, train_loss)

            total_train_loss = total_train_loss / n_train_batches
            s = 'TRAIN --> epoch %i | error %f' % (e, total_train_loss)
            ut.log_write(s, params)

            pre_test = "TRAINING_Data"
            test_data(sess, X_train, Y_train, index_train_list, S_Train_list,
                      R_L_Train_list, F_list_train, e, pre_test,
                      n_train_batches)

            pre_test = "TEST_Data"
            total_loss = test_data(sess, X_test, Y_test, index_test_list,
                                   S_Test_list, R_L_Test_list, F_list_test, e,
                                   pre_test, n_test_batches)
            base_cp_path = params["cp_file"] + "/"

            lss_str = '%.5f' % total_loss
            model_name = lss_str + "_" + str(e) + "_" + str(
                params["rn_id"]) + params["model"] + "_model.ckpt"
            save_path = base_cp_path + model_name
            saved_path = False
            if pre_best_loss > total_loss:
                pre_best_loss = total_loss
                model_name = lss_str + "_" + str(e) + "_" + str(
                    params["rn_id"]) + params["model"] + "_best_model.ckpt"
                save_path = base_cp_path + model_name
                saved_path = saver.save(sess, save_path)
            else:
                if e % 3.0 == 0:
                    saved_path = saver.save(sess, save_path)
            if saved_path != False:
                s = 'MODEL_Saved --> epoch %i | error %f path %s' % (
                    e, total_loss, saved_path)
                ut.log_write(s, params)
コード例 #17
0
ファイル: train_CNNV3.py プロジェクト: Seleucia/RNNPose
def train_rnn(params):
   rng = RandomStreams(seed=1234)
   (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params)
   params["len_train"]=len(F_list_train)
   params["len_test"]=len(F_list_test)
   u.start_log(params)
   batch_size=params['batch_size']

   n_train_batches = len(F_list_train)
   n_train_batches /= batch_size

   n_test_batches = len(F_list_test)
   n_test_batches /= batch_size

   nb_epochs=params['n_epochs']

   print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches))
   u.log_write("Model build started",params)
   if params['run_mode']==1:
      model= model_provider.get_model_pretrained(params,rng)
      u.log_write("Pretrained loaded: %s"%(params['mfile']),params)
   else:
     model= model_provider.get_model(params,rng)
   u.log_write("Number of parameters: %s"%(model.n_param),params)
   train_errors = np.ndarray(nb_epochs)
   u.log_write("Training started",params)
   val_counter=0
   best_loss=1000
   for epoch_counter in range(nb_epochs):
      batch_loss = 0.
      # H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # initial hidden state
      sid=0
      for minibatch_index in range(n_train_batches):
          x_lst=F_list_train[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024
          y_lst=G_list_train[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024
          x,y=du.load_batch(params,x_lst,y_lst)
          # x=X_train[id_lst] #60*20*1024
          # y=Y_train[id_lst]#60*20*54
          is_train=1
          if(params["model"]=="blstmnp"):
             x_b=np.asarray(map(np.flipud,x))
             loss = model.train(x,x_b,y)
          else:
             loss= model.train(x, y,is_train)
          batch_loss += loss
      if params['shufle_data']==1:
         X_train,Y_train=du.shuffle_in_unison_inplace(X_train,Y_train)
      train_errors[epoch_counter] = batch_loss
      batch_loss/=n_train_batches
      s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss)
      u.log_write(s,params)
      if(epoch_counter%3==0):
          print("Model testing")
          batch_loss3d = []
          H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change
          sid=0
          for minibatch_index in range(n_test_batches):
             x_lst=F_list_test[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024
             y_lst=G_list_test[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024
             x,y=du.load_batch(params,x_lst,y_lst)
             # tmp_sid=S_Test_list[(minibatch_index + 1) * batch_size-1]
             # if(sid==0):
             #      sid=tmp_sid
             # if(tmp_sid!=sid):
             #      sid=tmp_sid
             #      H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change
             # x=X_test[id_lst] #60*20*1024
             # y=Y_test[id_lst]#60*20*54
             is_train=0
             if(params["model"]=="blstmnp"):
                x_b=np.asarray(map(np.flipud,x))
                pred = model.predictions(x,x_b)
             else:
                pred= model.predictions(x,is_train)
             loss3d =u.get_loss(params,y,pred)
             batch_loss3d.append(loss3d)
          batch_loss3d=np.nanmean(batch_loss3d)
          if(batch_loss3d<best_loss):
             best_loss=batch_loss3d
             ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p"
             u.write_params(model.params,params,ext)
          else:
              ext=str(val_counter%2)+".p"
              u.write_params(model.params,params,ext)

          val_counter+=1#0.08
          s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches)
          u.log_write(s,params)
コード例 #18
0
def train_rnn(params):
    rng = RandomStreams(seed=1234)
    (X_train, Y_train, S_Train_list, F_list_train, G_list_train, X_test,
     Y_test, S_Test_list, F_list_test, G_list_test) = du.load_pose(params)
    params["len_train"] = Y_train.shape[0] * Y_train.shape[1]
    params["len_test"] = Y_test.shape[0] * Y_test.shape[1]
    u.start_log(params)
    index_train_list, S_Train_list = du.get_batch_indexes(
        S_Train_list)  #This will prepare bacth indexes
    index_test_list, S_Test_list = du.get_batch_indexes(S_Test_list)
    batch_size = params['batch_size']
    n_train_batches = len(index_train_list)
    n_train_batches /= batch_size

    n_test_batches = len(index_test_list)
    n_test_batches /= batch_size

    nb_epochs = params['n_epochs']

    print("Batch size: %i, train batch size: %i, test batch size: %i" %
          (batch_size, n_train_batches, n_test_batches))
    u.log_write("Model build started", params)
    if params['run_mode'] == 1:
        model = model_provider.get_model_pretrained(params, rng)
        u.log_write("Pretrained loaded: %s" % (params['mfile']), params)
    else:
        model = model_provider.get_model(params, rng)
    u.log_write("Number of parameters: %s" % (model.n_param), params)
    train_errors = np.ndarray(nb_epochs)
    u.log_write("Training started", params)
    val_counter = 0
    best_loss = 1000
    for epoch_counter in range(nb_epochs):
        batch_loss = 0.
        H = C = np.zeros(shape=(batch_size, params['n_hidden']),
                         dtype=dtype)  # initial hidden state
        sid = 0
        is_train = 1
        x = []
        y = []
        for minibatch_index in range(n_train_batches):
            if (minibatch_index == 0):
                (sid, H, C, x, y) = du.prepare_cnn_lstm_batch(
                    index_train_list, minibatch_index, batch_size,
                    S_Train_list, sid, H, C, F_list_train, params, Y_train,
                    X_train)
            pool = ThreadPool(processes=2)
            async_t = pool.apply_async(model.train, (x, y, is_train, H, C))
            async_b = pool.apply_async(
                du.prepare_cnn_lstm_batch,
                (index_train_list, minibatch_index, batch_size, S_Train_list,
                 sid, H, C, F_list_train, params, Y_train, X_train))
            pool.close()
            pool.join()
            (loss, H,
             C) = async_t.get()  # get the return value from your function.
            x = []
            y = []
            (sid, H, C, x,
             y) = async_b.get()  # get the return value from your function.

            if (minibatch_index == n_train_batches - 1):
                loss, H, C = model.train(x, y, is_train, H, C)

            batch_loss += loss
        if params['shufle_data'] == 1:
            X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train)
        train_errors[epoch_counter] = batch_loss
        batch_loss /= n_train_batches
        s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss)
        u.log_write(s, params)
        if (epoch_counter % 1 == 0):
            print("Model testing")
            batch_loss3d = []
            H = C = np.zeros(
                shape=(batch_size, params['n_hidden']),
                dtype=dtype)  # resetting initial state, since seq change
            sid = 0
            for minibatch_index in range(n_test_batches):
                if (minibatch_index == 0):
                    (sid, H, C, x, y) = du.prepare_cnn_lstm_batch(
                        index_test_list, minibatch_index, batch_size,
                        S_Test_list, sid, H, C, F_list_test, params, Y_test,
                        X_test)
                pool = ThreadPool(processes=2)
                async_t = pool.apply_async(model.predictions,
                                           (x, is_train, H, C))
                async_b = pool.apply_async(
                    du.prepare_cnn_lstm_batch,
                    (index_test_list, minibatch_index, batch_size, S_Test_list,
                     sid, H, C, F_list_test, params, Y_test, X_test))
                pool.close()
                pool.join()
                (pred, H,
                 C) = async_t.get()  # get the return value from your function.
                loss3d = u.get_loss(params, y, pred)
                batch_loss3d.append(loss3d)
                (sid, H, C, x,
                 y) = async_b.get()  # get the return value from your function.
                if (minibatch_index == n_train_batches - 1):
                    pred, H, C = model.predictions(x, is_train, H, C)
                    loss3d = u.get_loss(params, y, pred)
                    batch_loss3d.append(loss3d)

            batch_loss3d = np.nanmean(batch_loss3d)
            if (batch_loss3d < best_loss):
                best_loss = batch_loss3d
                ext = str(epoch_counter) + "_" + str(batch_loss3d) + "_best.p"
                u.write_params(model.params, params, ext)
            else:
                ext = str(val_counter % 2) + ".p"
                u.write_params(model.params, params, ext)

            val_counter += 1
            s = 'VAL--> epoch %i | error %f, %f' % (val_counter, batch_loss3d,
                                                    n_test_batches)
            u.log_write(s, params)
コード例 #19
0
ファイル: trainV2.py プロジェクト: Seleucia/RNNPose
def train_rnn(params):
    data = []
    for sindex in range(0, params["seq_length"], 5):
        (X_train, Y_train, X_test, Y_test) = du.load_pose(params, sindex=sindex)
        data.append((X_train, Y_train, X_test, Y_test))
    (X_train, Y_train, X_test, Y_test) = data[0]
    params["len_train"] = X_train.shape[0] * X_train.shape[1]
    params["len_test"] = X_test.shape[0] * X_test.shape[1]
    u.start_log(params)
    batch_size = params["batch_size"]
    n_train_batches = len(X_train)
    n_train_batches /= batch_size

    n_test_batches = len(X_test)
    n_test_batches /= batch_size

    nb_epochs = params["n_epochs"]

    print("Batch size: %i, train batch size: %i, test batch size: %i" % (batch_size, n_train_batches, n_test_batches))
    u.log_write("Model build started", params)
    if params["resume"] == 1:
        model = model_provider.get_model_pretrained(params)
    else:
        model = model_provider.get_model(params)
    u.log_write("Number of parameters: %s" % (model.n_param), params)
    train_errors = np.ndarray(nb_epochs)
    u.log_write("Training started", params)
    val_counter = 0
    best_loss = 10000
    for epoch_counter in range(nb_epochs):
        (X_train, Y_train, X_test, Y_test) = data[np.mod(epoch_counter, len(data))]
        if params["shufle_data"] == 1:
            X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train)
        n_train_batches = len(X_train)
        n_train_batches /= batch_size

        n_test_batches = len(X_test)
        n_test_batches /= batch_size
        batch_loss = 0.0
        for minibatch_index in range(n_train_batches):
            x = X_train[minibatch_index * batch_size : (minibatch_index + 1) * batch_size]  # 60*20*1024
            y = Y_train[minibatch_index * batch_size : (minibatch_index + 1) * batch_size]  # 60*20*54
            if params["model"] == "blstmnp":
                x_b = np.asarray(map(np.flipud, x))
                loss = model.train(x, x_b, y)
            else:
                loss = model.train(x, y)
            batch_loss += loss
        train_errors[epoch_counter] = batch_loss
        batch_loss /= n_train_batches
        s = "TRAIN--> epoch %i | error %f" % (epoch_counter, batch_loss)
        u.log_write(s, params)
        if epoch_counter % 10 == 0:
            print("Model testing")
            batch_loss = 0.0
            batch_loss3d = 0.0
            for minibatch_index in range(n_test_batches):
                x = X_test[minibatch_index * batch_size : (minibatch_index + 1) * batch_size]
                y = Y_test[minibatch_index * batch_size : (minibatch_index + 1) * batch_size]
                if params["model"] == "blstmnp":
                    x_b = np.asarray(map(np.flipud, x))
                    pred = model.predictions(x, x_b)
                else:
                    pred = model.predictions(x)

                loss = np.nanmean(np.abs(pred - y) ** 2)
                loss3d = u.get_loss(y, pred)
                batch_loss += loss
                batch_loss3d += loss3d
            batch_loss /= n_test_batches
            batch_loss3d /= n_test_batches
            if batch_loss3d < best_loss:
                best_loss = batch_loss3d
                ext = str(batch_loss3d) + "_best.p"
                u.write_params(model.params, params, ext)
            else:
                ext = str(val_counter % 2) + ".p"
                u.write_params(model.params, params, ext)

            val_counter += 1  # 0.08
            s = "VAL--> epoch %i | error %f, %f %f" % (val_counter, batch_loss, batch_loss3d, n_test_batches)
            u.log_write(s, params)
コード例 #20
0
ファイル: train_mlp.py プロジェクト: zhengzh/lstmkf_ICCV2017
index_train_list, S_Train_list = dut.get_seq_indexes(params, S_Train_list)
index_test_list, S_Test_list = dut.get_seq_indexes(params, S_Test_list)

batch_size = params['batch_size']
n_train_batches = X_train.shape[0]
n_train_batches /= batch_size

n_test_batches = X_test.shape[0]
n_test_batches /= batch_size

params['training_size'] = X_train.shape[0]
params['test_size'] = X_test.shape[0]
ut.start_log(params)
ut.log_write(
    "Train mean:%f/%f, max:%f/%f, min:%f/%f ; Test mean:%f/%f, max:%f/%f, min:%f/%f "
    ";" %
    (np.mean(X_train), np.mean(Y_train), np.max(X_train), np.max(Y_train),
     np.min(X_train), np.min(Y_train), np.mean(X_test), np.mean(Y_test),
     np.max(X_test), np.max(Y_test), np.min(X_test), np.min(Y_test)), params)
ut.log_write("Model training started", params)
# summary_writer = tf.train.SummaryWriter(params["sm"])
show_every = 100000.0


def test_data(sess, X, Y, index_list, S_list, R_L_list, F_list, e, pre_test,
              n_batches):
    state_reset_counter_lst = [0 for i in range(batch_size)]
    total_loss = 0.0
    total_losss = 0.0
    total_n_count = 0.0
    total_n_countt = 0.0
    for minibatch_index in xrange(n_batches):
コード例 #21
0
def train_rnn(params):
    rng = RandomStreams(seed=1234)
    (X_train, Y_train, S_Train_list, F_list_train, G_list_train, X_test,
     Y_test, S_Test_list, F_list_test, G_list_test) = du.load_pose(params)
    F_list_train, G_list_train = du.shuffle_in_unison_inplace(
        F_list_train, G_list_train)
    params["len_train"] = len(F_list_train)
    params["len_test"] = len(F_list_test)
    u.start_log(params)
    batch_size = params['batch_size']

    n_train_batches = len(F_list_train)
    n_train_batches /= batch_size

    n_test_batches = len(F_list_test)
    n_test_batches /= batch_size

    nb_epochs = params['n_epochs']

    print("Batch size: %i, train batch size: %i, test batch size: %i" %
          (batch_size, n_train_batches, n_test_batches))
    u.log_write("Model build started", params)
    if params['run_mode'] == 1:
        model = model_provider.get_model_pretrained(params, rng)
        u.log_write("Pretrained loaded: %s" % (params['mfile']), params)
    else:
        model = model_provider.get_model(params, rng)
    u.log_write("Number of parameters: %s" % (model.n_param), params)
    train_errors = np.ndarray(nb_epochs)
    u.log_write("Training started", params)
    val_counter = 0
    best_loss = 1000
    for epoch_counter in range(nb_epochs):
        batch_loss = 0.
        # H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # initial hidden state
        sid = 0
        for minibatch_index in range(n_train_batches):
            x_lst = F_list_train[minibatch_index *
                                 batch_size:(minibatch_index + 1) *
                                 batch_size]  #60*20*1024
            y_lst = G_list_train[minibatch_index *
                                 batch_size:(minibatch_index + 1) *
                                 batch_size]  #60*20*1024
            x, y = du.load_batch(params, x_lst, y_lst)
            # x=X_train[id_lst] #60*20*1024
            # y=Y_train[id_lst]#60*20*54
            is_train = 1
            if (params["model"] == "blstmnp"):
                x_b = np.asarray(map(np.flipud, x))
                loss = model.train(x, x_b, y)
            else:
                loss = model.train(x, y, is_train)
            batch_loss += loss
        if params['shufle_data'] == 1:
            F_list_train, G_list_train = du.shuffle_in_unison_inplace(
                F_list_train, G_list_train)
        train_errors[epoch_counter] = batch_loss
        batch_loss /= n_train_batches
        s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss)
        u.log_write(s, params)
        if (epoch_counter % 1 == 0):
            print("Model testing")
            batch_loss3d = []
            H = C = np.zeros(
                shape=(batch_size, params['n_hidden']),
                dtype=dtype)  # resetting initial state, since seq change
            sid = 0
            for minibatch_index in range(n_test_batches):
                x_lst = F_list_test[minibatch_index *
                                    batch_size:(minibatch_index + 1) *
                                    batch_size]  #60*20*1024
                y_lst = G_list_test[minibatch_index *
                                    batch_size:(minibatch_index + 1) *
                                    batch_size]  #60*20*1024
                x, y = du.load_batch(params, x_lst, y_lst)
                # tmp_sid=S_Test_list[(minibatch_index + 1) * batch_size-1]
                # if(sid==0):
                #      sid=tmp_sid
                # if(tmp_sid!=sid):
                #      sid=tmp_sid
                #      H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change
                # x=X_test[id_lst] #60*20*1024
                # y=Y_test[id_lst]#60*20*54
                is_train = 0
                if (params["model"] == "blstmnp"):
                    x_b = np.asarray(map(np.flipud, x))
                    pred = model.predictions(x, x_b)
                else:
                    pred = model.predictions(x, is_train)
                loss3d = u.get_loss(params, y, pred)
                batch_loss3d.append(loss3d)
            batch_loss3d = np.nanmean(batch_loss3d)
            if (batch_loss3d < best_loss):
                best_loss = batch_loss3d
                ext = str(epoch_counter) + "_" + str(batch_loss3d) + "_best.p"
                u.write_params(model.params, params, ext)
            else:
                ext = str(val_counter % 2) + ".p"
                u.write_params(model.params, params, ext)

            val_counter += 1  #0.08
            s = 'VAL--> epoch %i | error %f, %f' % (val_counter, batch_loss3d,
                                                    n_test_batches)
            u.log_write(s, params)
コード例 #22
0
def train(Model,params):
    I= np.asarray([np.diag([1.0]*params['n_output']) for i in range(params["batch_size"])],dtype=np.float32)

    batch_size=params["batch_size"]
    num_epochs=100000
    decay_rate=0.9
    show_every=100
    deca_start=3
    pre_best_loss=10000
    with tf.Session() as sess:#config=gpu_config
        tf.global_variables_initializer().run()
        #saver = tf.train.Saver()
        # if params["model"] == "kfl_QRf":
            # ckpt = tf.train.get_checkpoint_state(params["mfile"])
            # if ckpt and ckpt.model_checkpoint_path:
            #     saver.restore(sess, ckpt.model_checkpoint_path)
            #     mfile = ckpt.model_checkpoint_path
            #     params["est_file"] = params["est_file"] + mfile.split('/')[-1].replace('.ckpt', '') + '/'
            #     print "Loaded Model: %s" % ckpt.model_checkpoint_path
        # if params["model"] == "kfl_QRf":
        #     for var in Model.tvars:
        #         path = '/mnt/Data1/hc/tt/cp/weights/' + var.name.replace('transitionF/','')
        #         if os.path.exists(path+'.npy'):
        #             val=np.load(path+'.npy')
        #             sess.run(tf.assign(var, val))
        #     print 'PreTrained LSTM model loaded...'


        # sess.run(Model.predict())
        print ('Training model:'+params["model"])
        noise_std = params['noise_std']
        new_noise_std=0.0
        for e in range(num_epochs):
            if e>(deca_start-1):
                sess.run(tf.assign(Model.lr, params['lr'] * (decay_rate ** (e))))
            else:
                sess.run(tf.assign(Model.lr, params['lr']))
            total_train_loss=0

            state_reset_counter_lst=[0 for i in range(batch_size)]
            index_train_list_s=index_train_list
            dic_state = ut.get_state_list(params)
            # total_loss = test_data(sess, params, X_test, Y_test, index_test_list, S_Test_list, R_L_Test_list,
            #                        F_list_test, e, 'Test Check', n_test_batches)
            if params["shufle_data"]==1 and params['reset_state']==1:
                index_train_list_s = ut.shufle_data(index_train_list)

            for minibatch_index in xrange(n_train_batches):
                is_test = 0
                state_reset_counter_lst=[s+1 for s in state_reset_counter_lst]
                (dic_state,x,y,r,f,_,state_reset_counter_lst,_)= \
                    th.prepare_batch(is_test,index_train_list_s, minibatch_index, batch_size,
                                       S_Train_list, dic_state, params, Y_train, X_train, R_L_Train_list,F_list_train,state_reset_counter_lst)
                if noise_std >0.0:
                   u_cnt= e*n_train_batches + minibatch_index
                   if u_cnt in params['noise_schedule']:
                       if u_cnt==params['noise_schedule'][0]:
                         new_noise_std=noise_std
                       else:
                           new_noise_std = noise_std * (u_cnt / (params['noise_schedule'][1]))

                       s = 'NOISE --> u_cnt %i | error %f' % (u_cnt, new_noise_std)
                       ut.log_write(s, params)
                   if new_noise_std>0.0:
                       noise=np.random.normal(0.0,new_noise_std,x.shape)
                       x=noise+x

                feed = th.get_feed(Model, params, r, x, y, I, dic_state, is_training=1)
                train_loss,states,_ = sess.run([Model.cost,Model.states,Model.train_op], feed)

                for k in states.keys():
                    dic_state[k] = states[k]

                total_train_loss+=train_loss
                if (minibatch_index%show_every==0):
                    print "Training batch loss: (%i / %i / %i)  %f"%(e,minibatch_index,n_train_batches,
                                                                 train_loss)

            total_train_loss=total_train_loss/n_train_batches
            s='TRAIN --> epoch %i | error %f'%(e, total_train_loss)
            ut.log_write(s,params)

            pre_test = "TRAINING_Data"
            total_loss = test_data(sess, params, X_train, Y_train, index_train_list, S_Train_list, R_L_Train_list,
                                   F_list_train, e, pre_test, n_train_batches)

            pre_test="TEST_Data"
            total_loss= test_data(sess,params,X_test,Y_test,index_test_list,S_Test_list,R_L_Test_list,F_list_test,e, pre_test,n_test_batches)
            base_cp_path = params["cp_file"] + "/"

            lss_str = '%.5f' % total_loss
            model_name = lss_str + "_" + str(e) + "_" + str(params["rn_id"]) + params["model"] + "_model.ckpt"
            save_path = base_cp_path + model_name
            saved_path = False
            if pre_best_loss > total_loss:
                pre_best_loss = total_loss
                model_name = lss_str + "_" + str(e) + "_" + str(params["rn_id"]) + params["model"] + "_best_model.ckpt"
                save_path = base_cp_path + model_name
                saved_path = saver.save(sess, save_path)
            else:
                if e % 3.0 == 0:
                    saved_path = saver.save(sess, save_path)
            if saved_path != "":
                s = 'MODEL_Saved --> epoch %i | error %f path %s' % (e, total_loss, saved_path)
                ut.log_write(s, params)
コード例 #23
0
ファイル: train.py プロジェクト: Seleucia/CNNRNet
          ext=params["model_file"]+params["model"]+"_"+im_type+"_"+"_best_"+str(rn_id)+"_"+str(epoch_counter)+".hdf5"
          model.save_weights(ext, overwrite=True)
      if(run_mode==1):
              break
  utils.log_write("Training ended",params)

if __name__ == "__main__":
  global params
  params= config.get_params()
  parser = argparse.ArgumentParser(description='Training the module')
  parser.add_argument('-rm','--run_mode',type=int, help='Training mode:0 full train, 1 system check, 2 simle train',
                      required=True)
  parser.add_argument('-m','--model',help='Model: kcnnr, dccnr, current('+params["model"]+')',default=params["model"])
  parser.add_argument('-i','--im_type',help='Image type: pre_depth, depth, gray, current('+params["im_type"]+')',
                      default=params["im_type"])

  args = vars(parser.parse_args())
  params["run_mode"]=args["run_mode"]
  params["model"]=args["model"]
  params["im_type"]=args["im_type"]
  params["is_exit"]=0
  params=config.update_params(params)
  try:
     train_model(params)
  except KeyboardInterrupt:
     utils.log_write("Exiting program",params)
     is_exit=1
     params["is_exit"]=1
  except Exception, e:
        utils.log_write('got exception: %r, terminating the pool' % (e,),params)
        params["is_exit"]=1
コード例 #24
0
ファイル: train_lstm_V3.py プロジェクト: Seleucia/v3d
def train_rnn(params):
   rng = RandomStreams(seed=1234)
   (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params)
   params["len_train"]=Y_train.shape[0]*Y_train.shape[1]
   params["len_test"]=Y_test.shape[0]*Y_test.shape[1]
   u.start_log(params)
   index_train_list,S_Train_list=du.get_seq_indexes(params,S_Train_list)
   index_test_list,S_Test_list=du.get_seq_indexes(params,S_Test_list)
   batch_size=params['batch_size']
   n_train_batches = len(index_train_list)
   n_train_batches /= batch_size

   n_test_batches = len(index_test_list)
   n_test_batches /= batch_size

   nb_epochs=params['n_epochs']

   print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches))
   u.log_write("Model build started",params)
   if params['run_mode']==1:
      model= model_provider.get_model_pretrained(params,rng)
      u.log_write("Pretrained loaded: %s"%(params['mfile']),params)
   else:
     model= model_provider.get_model(params,rng)
   u.log_write("Number of parameters: %s"%(model.n_param),params)
   train_errors = np.ndarray(nb_epochs)
   u.log_write("Training started",params)
   val_counter=0
   best_loss=1000
   for epoch_counter in range(nb_epochs):
      batch_loss = 0.
      LStateList_t=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden state
      LStateList_pre=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden state
      state_reset_counter_lst=[0 for i in range(batch_size)]
      is_train=1
      for minibatch_index in range(n_train_batches):
          state_reset_counter_lst=[s+1 for s in state_reset_counter_lst]
          (LStateList_b,x,y,state_reset_counter_lst)=du.prepare_lstm_batch(index_train_list, minibatch_index, batch_size, S_Train_list,LStateList_t,LStateList_pre, F_list_train, params, Y_train, X_train,state_reset_counter_lst)
          LStateList_pre=LStateList_b
          args=(x, y,is_train)+tuple(LStateList_b)
          result= model.train(*args)
          loss=result[0]
          LStateList_t=result[1:len(result)]

          batch_loss += loss
      if params['shufle_data']==1:
         X_train,Y_train=du.shuffle_in_unison_inplace(X_train,Y_train)
      train_errors[epoch_counter] = batch_loss
      batch_loss/=n_train_batches
      s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss)
      u.log_write(s,params)
      if(epoch_counter%1==0):
          is_train=0
          print("Model testing")
          state_reset_counter=0
          batch_loss3d = []
          LStateList_t=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden state
          LStateList_pre=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden sta
          state_reset_counter_lst=[0 for i in range(batch_size)]
          for minibatch_index in range(n_test_batches):
             state_reset_counter_lst=[s+1 for s in state_reset_counter_lst]
             (LStateList_b,x,y,state_reset_counter_lst)=du.prepare_lstm_batch(index_test_list, minibatch_index, batch_size, S_Test_list, LStateList_t,LStateList_pre, F_list_test, params, Y_test, X_test,state_reset_counter_lst)
             LStateList_pre=LStateList_b
             args=(x,is_train)+tuple(LStateList_b)
             result = model.predictions(*args)
             pred=result[0]
             LStateList_t=result[1:len(result)]
             loss3d =u.get_loss(params,y,pred)
             batch_loss3d.append(loss3d)
          batch_loss3d=np.nanmean(batch_loss3d)
          if(batch_loss3d<best_loss):
             best_loss=batch_loss3d
             ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p"
             u.write_params(model.params,params,ext)
          else:
              ext=str(val_counter%2)+".p"
              u.write_params(model.params,params,ext)

          val_counter+=1#0.08
          s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches)
          u.log_write(s,params)
コード例 #25
0
ファイル: train_cnn_lstm.py プロジェクト: Seleucia/v3d
def train_rnn(params):
   rng = RandomStreams(seed=1234)
   (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params)
   params["len_train"]=Y_train.shape[0]*Y_train.shape[1]
   params["len_test"]=Y_test.shape[0]*Y_test.shape[1]
   u.start_log(params)
   index_train_list,S_Train_list=du.get_batch_indexes(params,S_Train_list)
   index_test_list,S_Test_list=du.get_batch_indexes(params,S_Test_list)
   batch_size=params['batch_size']
   n_train_batches = len(index_train_list)
   n_train_batches /= batch_size

   n_test_batches = len(index_test_list)
   n_test_batches /= batch_size

   nb_epochs=params['n_epochs']

   print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches))
   u.log_write("Model build started",params)
   if params['run_mode']==1:
      model= model_provider.get_model_pretrained(params,rng)
      u.log_write("Pretrained loaded: %s"%(params['mfile']),params)
   else:
     model= model_provider.get_model(params,rng)
   u.log_write("Number of parameters: %s"%(model.n_param),params)
   train_errors = np.ndarray(nb_epochs)
   u.log_write("Training started",params)
   val_counter=0
   best_loss=1000
   for epoch_counter in range(nb_epochs):
      batch_loss = 0.
      H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # initial hidden state
      sid=0
      is_train=1
      x=[]
      y=[]
      for minibatch_index in range(n_train_batches):
          if(minibatch_index==0):
              (sid,H,C,x,y)=du.prepare_cnn_lstm_batch(index_train_list, minibatch_index, batch_size, S_Train_list, sid, H, C, F_list_train, params, Y_train, X_train)
          pool = ThreadPool(processes=2)
          async_t = pool.apply_async(model.train, (x, y,is_train,H,C))
          async_b = pool.apply_async(du.prepare_cnn_lstm_batch, (index_train_list, minibatch_index, batch_size, S_Train_list, sid, H, C, F_list_train, params, Y_train, X_train))
          pool.close()
          pool.join()
          (loss,H,C) = async_t.get()  # get the return value from your function.
          x=[]
          y=[]
          (sid,H,C,x,y) = async_b.get()  # get the return value from your function.

          if(minibatch_index==n_train_batches-1):
              loss,H,C= model.train(x, y,is_train,H,C)

          batch_loss += loss
      if params['shufle_data']==1:
         X_train,Y_train=du.shuffle_in_unison_inplace(X_train,Y_train)
      train_errors[epoch_counter] = batch_loss
      batch_loss/=n_train_batches
      s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss)
      u.log_write(s,params)
      if(epoch_counter%1==0):
          print("Model testing")
          batch_loss3d = []
          H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change
          sid=0
          for minibatch_index in range(n_test_batches):
             if(minibatch_index==0):
               (sid,H,C,x,y)=du.prepare_cnn_lstm_batch(index_test_list, minibatch_index, batch_size, S_Test_list, sid, H, C, F_list_test, params, Y_test, X_test)
             pool = ThreadPool(processes=2)
             async_t = pool.apply_async(model.predictions, (x,is_train,H,C))
             async_b = pool.apply_async(du.prepare_cnn_lstm_batch, (index_test_list, minibatch_index, batch_size, S_Test_list, sid, H, C, F_list_test, params, Y_test, X_test))
             pool.close()
             pool.join()
             (pred,H,C) = async_t.get()  # get the return value from your function.
             loss3d =u.get_loss(params,y,pred)
             batch_loss3d.append(loss3d)
             x=[]
             y=[]
             (sid,H,C,x,y) = async_b.get()  # get the return value from your function.
             if(minibatch_index==n_train_batches-1):
                 pred,H,C= model.predictions(x,is_train,H,C)
                 loss3d =u.get_loss(params,y,pred)
                 batch_loss3d.append(loss3d)

          batch_loss3d=np.nanmean(batch_loss3d)
          if(batch_loss3d<best_loss):
             best_loss=batch_loss3d
             ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p"
             u.write_params(model.params,params,ext)
          else:
              ext=str(val_counter%2)+".p"
              u.write_params(model.params,params,ext)

          val_counter+=1#0.08
          s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches)
          u.log_write(s,params)
コード例 #26
0
def eval(params):
    batch_size = params['batch_size']
    params['write_est']=False
    num_examples = len(params['test_files'][0])
    with tf.Graph().as_default():
        batch = dut.distorted_inputs(params,is_training=is_training)
        with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
            logits,aux, end_points = inception_resnet_v2.inception_resnet_v2(batch[0],
                                                                         num_classes=params['n_output'],
                                                                         is_training=is_training)

        # Obtain the trainable variables and a saver
        # variables_to_restore = slim.get_variables_to_restore()

        init_fn=ut.get_init_fn(slim,params)
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = params['per_process_gpu_memory_fraction']
        with tf.Session() as sess:
            init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
            # init_op = tf.global_variables_initializer()
            sess.run(init_op)
            init_fn(sess)
            coord = tf.train.Coordinator()
            threads = []
            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(qr.create_threads(sess, coord=coord, daemon=True, start=True))


            num_iter = int(math.ceil(num_examples / batch_size))
            print('%s: Testing started.' % (datetime.now()))

            step = 0
            loss_lst=[]
            run_lst=[]
            run_lst.append(logits)
            # run_lst.append(end_points['PreLogitsFlatten'])
            # run_lst.append(end_points['PrePool'])
            [run_lst.append(lst) for lst in batch[1:len(batch)]]

            while step < num_iter and not coord.should_stop():
                try:
                    batch_res= sess.run(run_lst)
                except tf.errors.OutOfRangeError:
                    print ('Testing finished....%d'%step)
                    break
                if(params['write_est']==True):
                    ut.write_mid_est(params,batch_res)
                est=batch_res[0]
                gt=batch_res[1]
                # print(est.shape)
                # print(gt.shape)
                prepool=batch_res[-1]
                loss,_= ut.get_loss(params,gt,est)
                loss_lst.append(loss)
                s ='VAL --> batch %i/%i | error %f'%(step,num_iter,loss)
                if step%10==0:
                    ut.log_write(s,params,screen_print=True)
                    print "Current number of examples / mean err: %i / %f"%(step*gt.shape[0],np.mean(loss_lst))
                else:
                    ut.log_write(s, params, screen_print=False)
                # joint_list=['/'.join(p1.split('/')[0:-1]).replace('joints','img').replace('.cdf','')+'/frame_'+(p1.split('/')[-1].replace('.txt','')).zfill(5)+'.png' for p1 in image_names]
                # print ('List equality check:')
                # print len(label_names) == len(set(label_names))
                # print sum(joint_list==label_names)==(len(est))
                # print(len(label_names))
                step += 1
            coord.request_stop()
            coord.join(threads)
            return np.mean(loss_lst)