Example #1
0
def main(_):
  params=cf.get_params()
  params['mfile']="-11"
  model_path=params["wd"]+"/cp/"+params['mfile']
  data=du.load_pose(params)
  data_train=(data[0],data[1])
  data_test=(data[2],data[3])

  with tf.Graph().as_default(), tf.Session() as session:
    initializer = tf.random_uniform_initializer(-params["init_scale"],params["init_scale"])

    # m = mp.get_model(is_training=True,params=params)
    mtest = mp.get_model(is_training=False,params=params)

    saver = tf.train.Saver()

    # tf.initialize_all_variables().run()
    saver.restore(sess=session,save_path=model_path)
    test_err = run_epoch(session, mtest,tf.no_op(),params, data_test,is_training=False)
    print("Test Err: %.5f" % test_err)
Example #2
0
def train_rnn(params):
   rng = RandomStreams(seed=1234)
   (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params)
   params["len_train"]=Y_train.shape[0]*Y_train.shape[1]
   params["len_test"]=Y_test.shape[0]*Y_test.shape[1]
   u.start_log(params)
   index_train_list,S_Train_list=du.get_batch_indexes(params,S_Train_list)
   index_test_list,S_Test_list=du.get_batch_indexes(params,S_Test_list)
   batch_size=params['batch_size']
   n_train_batches = len(index_train_list)
   n_train_batches /= batch_size

   n_test_batches = len(index_test_list)
   n_test_batches /= batch_size

   nb_epochs=params['n_epochs']

   print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches))
   u.log_write("Model build started",params)
   if params['run_mode']==1:
      model= model_provider.get_model_pretrained(params,rng)
      u.log_write("Pretrained loaded: %s"%(params['mfile']),params)
   else:
     model= model_provider.get_model(params,rng)
   u.log_write("Number of parameters: %s"%(model.n_param),params)
   train_errors = np.ndarray(nb_epochs)
   u.log_write("Training started",params)
   val_counter=0
   best_loss=1000
   for epoch_counter in range(nb_epochs):
      batch_loss = 0.
      H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # initial hidden state
      sid=0
      is_train=1
      x=[]
      y=[]
      for minibatch_index in range(n_train_batches):
          if(minibatch_index==0):
              (sid,H,C,x,y)=du.prepare_cnn_lstm_batch(index_train_list, minibatch_index, batch_size, S_Train_list, sid, H, C, F_list_train, params, Y_train, X_train)
          pool = ThreadPool(processes=2)
          async_t = pool.apply_async(model.train, (x, y,is_train,H,C))
          async_b = pool.apply_async(du.prepare_cnn_lstm_batch, (index_train_list, minibatch_index, batch_size, S_Train_list, sid, H, C, F_list_train, params, Y_train, X_train))
          pool.close()
          pool.join()
          (loss,H,C) = async_t.get()  # get the return value from your function.
          x=[]
          y=[]
          (sid,H,C,x,y) = async_b.get()  # get the return value from your function.

          if(minibatch_index==n_train_batches-1):
              loss,H,C= model.train(x, y,is_train,H,C)

          batch_loss += loss
      if params['shufle_data']==1:
         X_train,Y_train=du.shuffle_in_unison_inplace(X_train,Y_train)
      train_errors[epoch_counter] = batch_loss
      batch_loss/=n_train_batches
      s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss)
      u.log_write(s,params)
      if(epoch_counter%1==0):
          print("Model testing")
          batch_loss3d = []
          H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change
          sid=0
          for minibatch_index in range(n_test_batches):
             if(minibatch_index==0):
               (sid,H,C,x,y)=du.prepare_cnn_lstm_batch(index_test_list, minibatch_index, batch_size, S_Test_list, sid, H, C, F_list_test, params, Y_test, X_test)
             pool = ThreadPool(processes=2)
             async_t = pool.apply_async(model.predictions, (x,is_train,H,C))
             async_b = pool.apply_async(du.prepare_cnn_lstm_batch, (index_test_list, minibatch_index, batch_size, S_Test_list, sid, H, C, F_list_test, params, Y_test, X_test))
             pool.close()
             pool.join()
             (pred,H,C) = async_t.get()  # get the return value from your function.
             loss3d =u.get_loss(params,y,pred)
             batch_loss3d.append(loss3d)
             x=[]
             y=[]
             (sid,H,C,x,y) = async_b.get()  # get the return value from your function.
             if(minibatch_index==n_train_batches-1):
                 pred,H,C= model.predictions(x,is_train,H,C)
                 loss3d =u.get_loss(params,y,pred)
                 batch_loss3d.append(loss3d)

          batch_loss3d=np.nanmean(batch_loss3d)
          if(batch_loss3d<best_loss):
             best_loss=batch_loss3d
             ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p"
             u.write_params(model.params,params,ext)
          else:
              ext=str(val_counter%2)+".p"
              u.write_params(model.params,params,ext)

          val_counter+=1#0.08
          s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches)
          u.log_write(s,params)
def train_rnn(params):
    rng = RandomStreams(seed=1234)
    (X_train, Y_train, S_Train_list, F_list_train, G_list_train, X_test,
     Y_test, S_Test_list, F_list_test, G_list_test) = du.load_pose(params)
    params["len_train"] = Y_train.shape[0] * Y_train.shape[1]
    params["len_test"] = Y_test.shape[0] * Y_test.shape[1]
    u.start_log(params)
    index_train_list, S_Train_list = du.get_batch_indexes(
        S_Train_list)  #This will prepare bacth indexes
    index_test_list, S_Test_list = du.get_batch_indexes(S_Test_list)
    batch_size = params['batch_size']
    n_train_batches = len(index_train_list)
    n_train_batches /= batch_size

    n_test_batches = len(index_test_list)
    n_test_batches /= batch_size

    nb_epochs = params['n_epochs']

    print("Batch size: %i, train batch size: %i, test batch size: %i" %
          (batch_size, n_train_batches, n_test_batches))
    u.log_write("Model build started", params)
    if params['run_mode'] == 1:
        model = model_provider.get_model_pretrained(params, rng)
        u.log_write("Pretrained loaded: %s" % (params['mfile']), params)
    else:
        model = model_provider.get_model(params, rng)
    u.log_write("Number of parameters: %s" % (model.n_param), params)
    train_errors = np.ndarray(nb_epochs)
    u.log_write("Training started", params)
    val_counter = 0
    best_loss = 1000
    for epoch_counter in range(nb_epochs):
        batch_loss = 0.
        H = C = np.zeros(shape=(batch_size, params['n_hidden']),
                         dtype=dtype)  # initial hidden state
        sid = 0
        is_train = 1
        x = []
        y = []
        for minibatch_index in range(n_train_batches):
            if (minibatch_index == 0):
                (sid, H, C, x, y) = du.prepare_cnn_lstm_batch(
                    index_train_list, minibatch_index, batch_size,
                    S_Train_list, sid, H, C, F_list_train, params, Y_train,
                    X_train)
            pool = ThreadPool(processes=2)
            async_t = pool.apply_async(model.train, (x, y, is_train, H, C))
            async_b = pool.apply_async(
                du.prepare_cnn_lstm_batch,
                (index_train_list, minibatch_index, batch_size, S_Train_list,
                 sid, H, C, F_list_train, params, Y_train, X_train))
            pool.close()
            pool.join()
            (loss, H,
             C) = async_t.get()  # get the return value from your function.
            x = []
            y = []
            (sid, H, C, x,
             y) = async_b.get()  # get the return value from your function.

            if (minibatch_index == n_train_batches - 1):
                loss, H, C = model.train(x, y, is_train, H, C)

            batch_loss += loss
        if params['shufle_data'] == 1:
            X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train)
        train_errors[epoch_counter] = batch_loss
        batch_loss /= n_train_batches
        s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss)
        u.log_write(s, params)
        if (epoch_counter % 1 == 0):
            print("Model testing")
            batch_loss3d = []
            H = C = np.zeros(
                shape=(batch_size, params['n_hidden']),
                dtype=dtype)  # resetting initial state, since seq change
            sid = 0
            for minibatch_index in range(n_test_batches):
                if (minibatch_index == 0):
                    (sid, H, C, x, y) = du.prepare_cnn_lstm_batch(
                        index_test_list, minibatch_index, batch_size,
                        S_Test_list, sid, H, C, F_list_test, params, Y_test,
                        X_test)
                pool = ThreadPool(processes=2)
                async_t = pool.apply_async(model.predictions,
                                           (x, is_train, H, C))
                async_b = pool.apply_async(
                    du.prepare_cnn_lstm_batch,
                    (index_test_list, minibatch_index, batch_size, S_Test_list,
                     sid, H, C, F_list_test, params, Y_test, X_test))
                pool.close()
                pool.join()
                (pred, H,
                 C) = async_t.get()  # get the return value from your function.
                loss3d = u.get_loss(params, y, pred)
                batch_loss3d.append(loss3d)
                (sid, H, C, x,
                 y) = async_b.get()  # get the return value from your function.
                if (minibatch_index == n_train_batches - 1):
                    pred, H, C = model.predictions(x, is_train, H, C)
                    loss3d = u.get_loss(params, y, pred)
                    batch_loss3d.append(loss3d)

            batch_loss3d = np.nanmean(batch_loss3d)
            if (batch_loss3d < best_loss):
                best_loss = batch_loss3d
                ext = str(epoch_counter) + "_" + str(batch_loss3d) + "_best.p"
                u.write_params(model.params, params, ext)
            else:
                ext = str(val_counter % 2) + ".p"
                u.write_params(model.params, params, ext)

            val_counter += 1
            s = 'VAL--> epoch %i | error %f, %f' % (val_counter, batch_loss3d,
                                                    n_test_batches)
            u.log_write(s, params)
Example #4
0
def train_rnn(params):
    rng = RandomStreams(seed=1234)
    (X_train, Y_train, S_Train_list, F_list_train, G_list_train, X_test,
     Y_test, S_Test_list, F_list_test, G_list_test) = du.load_pose(params)
    F_list_train, G_list_train = du.shuffle_in_unison_inplace(
        F_list_train, G_list_train)
    params["len_train"] = len(F_list_train)
    params["len_test"] = len(F_list_test)
    u.start_log(params)
    batch_size = params['batch_size']

    n_train_batches = len(F_list_train)
    n_train_batches /= batch_size

    n_test_batches = len(F_list_test)
    n_test_batches /= batch_size

    nb_epochs = params['n_epochs']

    print("Batch size: %i, train batch size: %i, test batch size: %i" %
          (batch_size, n_train_batches, n_test_batches))
    u.log_write("Model build started", params)
    if params['run_mode'] == 1:
        model = model_provider.get_model_pretrained(params, rng)
        u.log_write("Pretrained loaded: %s" % (params['mfile']), params)
    else:
        model = model_provider.get_model(params, rng)
    u.log_write("Number of parameters: %s" % (model.n_param), params)
    train_errors = np.ndarray(nb_epochs)
    u.log_write("Training started", params)
    val_counter = 0
    best_loss = 1000
    for epoch_counter in range(nb_epochs):
        batch_loss = 0.
        # H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # initial hidden state
        sid = 0
        for minibatch_index in range(n_train_batches):
            x_lst = F_list_train[minibatch_index *
                                 batch_size:(minibatch_index + 1) *
                                 batch_size]  #60*20*1024
            y_lst = G_list_train[minibatch_index *
                                 batch_size:(minibatch_index + 1) *
                                 batch_size]  #60*20*1024
            x, y = du.load_batch(params, x_lst, y_lst)
            # x=X_train[id_lst] #60*20*1024
            # y=Y_train[id_lst]#60*20*54
            is_train = 1
            if (params["model"] == "blstmnp"):
                x_b = np.asarray(map(np.flipud, x))
                loss = model.train(x, x_b, y)
            else:
                loss = model.train(x, y, is_train)
            batch_loss += loss
        if params['shufle_data'] == 1:
            F_list_train, G_list_train = du.shuffle_in_unison_inplace(
                F_list_train, G_list_train)
        train_errors[epoch_counter] = batch_loss
        batch_loss /= n_train_batches
        s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss)
        u.log_write(s, params)
        if (epoch_counter % 1 == 0):
            print("Model testing")
            batch_loss3d = []
            H = C = np.zeros(
                shape=(batch_size, params['n_hidden']),
                dtype=dtype)  # resetting initial state, since seq change
            sid = 0
            for minibatch_index in range(n_test_batches):
                x_lst = F_list_test[minibatch_index *
                                    batch_size:(minibatch_index + 1) *
                                    batch_size]  #60*20*1024
                y_lst = G_list_test[minibatch_index *
                                    batch_size:(minibatch_index + 1) *
                                    batch_size]  #60*20*1024
                x, y = du.load_batch(params, x_lst, y_lst)
                # tmp_sid=S_Test_list[(minibatch_index + 1) * batch_size-1]
                # if(sid==0):
                #      sid=tmp_sid
                # if(tmp_sid!=sid):
                #      sid=tmp_sid
                #      H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change
                # x=X_test[id_lst] #60*20*1024
                # y=Y_test[id_lst]#60*20*54
                is_train = 0
                if (params["model"] == "blstmnp"):
                    x_b = np.asarray(map(np.flipud, x))
                    pred = model.predictions(x, x_b)
                else:
                    pred = model.predictions(x, is_train)
                loss3d = u.get_loss(params, y, pred)
                batch_loss3d.append(loss3d)
            batch_loss3d = np.nanmean(batch_loss3d)
            if (batch_loss3d < best_loss):
                best_loss = batch_loss3d
                ext = str(epoch_counter) + "_" + str(batch_loss3d) + "_best.p"
                u.write_params(model.params, params, ext)
            else:
                ext = str(val_counter % 2) + ".p"
                u.write_params(model.params, params, ext)

            val_counter += 1  #0.08
            s = 'VAL--> epoch %i | error %f, %f' % (val_counter, batch_loss3d,
                                                    n_test_batches)
            u.log_write(s, params)
Example #5
0
def train_rnn(params):
    data = []
    for sindex in range(0, params['seq_length'], 5):
        (X_train, Y_train, X_test, Y_test) = du.load_pose(params,
                                                          sindex=sindex)
        data.append((X_train, Y_train, X_test, Y_test))
    (X_train, Y_train, X_test, Y_test) = data[0]
    params["len_train"] = X_train.shape[0] * X_train.shape[1]
    params["len_test"] = X_test.shape[0] * X_test.shape[1]
    u.start_log(params)
    batch_size = params['batch_size']
    n_train_batches = len(X_train)
    n_train_batches /= batch_size

    n_test_batches = len(X_test)
    n_test_batches /= batch_size

    nb_epochs = params['n_epochs']

    print("Batch size: %i, train batch size: %i, test batch size: %i" %
          (batch_size, n_train_batches, n_test_batches))
    u.log_write("Model build started", params)
    if params['resume'] == 1:
        model = model_provider.get_model_pretrained(params)
    else:
        model = model_provider.get_model(params)
    u.log_write("Number of parameters: %s" % (model.n_param), params)
    train_errors = np.ndarray(nb_epochs)
    u.log_write("Training started", params)
    val_counter = 0
    best_loss = 10000
    for epoch_counter in range(nb_epochs):
        (X_train, Y_train, X_test,
         Y_test) = data[np.mod(epoch_counter, len(data))]
        if params['shufle_data'] == 1:
            X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train)
        n_train_batches = len(X_train)
        n_train_batches /= batch_size

        n_test_batches = len(X_test)
        n_test_batches /= batch_size
        batch_loss = 0.
        for minibatch_index in range(n_train_batches):
            x = X_train[minibatch_index * batch_size:(minibatch_index + 1) *
                        batch_size]  #60*20*1024
            y = Y_train[minibatch_index * batch_size:(minibatch_index + 1) *
                        batch_size]  #60*20*54
            if (params["model"] == "blstmnp"):
                x_b = np.asarray(map(np.flipud, x))
                loss = model.train(x, x_b, y)
            else:
                loss = model.train(x, y)
            batch_loss += loss
        train_errors[epoch_counter] = batch_loss
        batch_loss /= n_train_batches
        s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss)
        u.log_write(s, params)
        if (epoch_counter % 10 == 0):
            print("Model testing")
            batch_loss = 0.
            batch_loss3d = 0.
            for minibatch_index in range(n_test_batches):
                x = X_test[minibatch_index * batch_size:(minibatch_index + 1) *
                           batch_size]
                y = Y_test[minibatch_index * batch_size:(minibatch_index + 1) *
                           batch_size]
                if (params["model"] == "blstmnp"):
                    x_b = np.asarray(map(np.flipud, x))
                    pred = model.predictions(x, x_b)
                else:
                    pred = model.predictions(x)

                loss = np.nanmean(np.abs(pred - y)**2)
                loss3d = u.get_loss(y, pred)
                batch_loss += loss
                batch_loss3d += loss3d
            batch_loss /= n_test_batches
            batch_loss3d /= n_test_batches
            if (batch_loss3d < best_loss):
                best_loss = batch_loss3d
                ext = str(batch_loss3d) + "_best.p"
                u.write_params(model.params, params, ext)
            else:
                ext = str(val_counter % 2) + ".p"
                u.write_params(model.params, params, ext)

            val_counter += 1  #0.08
            s = 'VAL--> epoch %i | error %f, %f %f' % (
                val_counter, batch_loss, batch_loss3d, n_test_batches)
            u.log_write(s, params)
Example #6
0
def train_rnn(params):
   rng = RandomStreams(seed=1234)
   (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params)
   params["len_train"]=len(F_list_train)
   params["len_test"]=len(F_list_test)
   u.start_log(params)
   batch_size=params['batch_size']

   n_train_batches = len(F_list_train)
   n_train_batches /= batch_size

   n_test_batches = len(F_list_test)
   n_test_batches /= batch_size

   nb_epochs=params['n_epochs']

   print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches))
   u.log_write("Model build started",params)
   if params['run_mode']==1:
      model= model_provider.get_model_pretrained(params,rng)
      u.log_write("Pretrained loaded: %s"%(params['mfile']),params)
   else:
     model= model_provider.get_model(params,rng)
   u.log_write("Number of parameters: %s"%(model.n_param),params)
   train_errors = np.ndarray(nb_epochs)
   u.log_write("Training started",params)
   val_counter=0
   best_loss=1000
   for epoch_counter in range(nb_epochs):
      batch_loss = 0.
      # H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # initial hidden state
      sid=0
      for minibatch_index in range(n_train_batches):
          x_lst=F_list_train[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024
          y_lst=G_list_train[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024
          x,y=du.load_batch(params,x_lst,y_lst)
          # x=X_train[id_lst] #60*20*1024
          # y=Y_train[id_lst]#60*20*54
          is_train=1
          if(params["model"]=="blstmnp"):
             x_b=np.asarray(map(np.flipud,x))
             loss = model.train(x,x_b,y)
          else:
             loss= model.train(x, y,is_train)
          batch_loss += loss
      if params['shufle_data']==1:
         X_train,Y_train=du.shuffle_in_unison_inplace(X_train,Y_train)
      train_errors[epoch_counter] = batch_loss
      batch_loss/=n_train_batches
      s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss)
      u.log_write(s,params)
      if(epoch_counter%3==0):
          print("Model testing")
          batch_loss3d = []
          H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change
          sid=0
          for minibatch_index in range(n_test_batches):
             x_lst=F_list_test[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024
             y_lst=G_list_test[minibatch_index * batch_size: (minibatch_index + 1) * batch_size] #60*20*1024
             x,y=du.load_batch(params,x_lst,y_lst)
             # tmp_sid=S_Test_list[(minibatch_index + 1) * batch_size-1]
             # if(sid==0):
             #      sid=tmp_sid
             # if(tmp_sid!=sid):
             #      sid=tmp_sid
             #      H=C=np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) # resetting initial state, since seq change
             # x=X_test[id_lst] #60*20*1024
             # y=Y_test[id_lst]#60*20*54
             is_train=0
             if(params["model"]=="blstmnp"):
                x_b=np.asarray(map(np.flipud,x))
                pred = model.predictions(x,x_b)
             else:
                pred= model.predictions(x,is_train)
             loss3d =u.get_loss(params,y,pred)
             batch_loss3d.append(loss3d)
          batch_loss3d=np.nanmean(batch_loss3d)
          if(batch_loss3d<best_loss):
             best_loss=batch_loss3d
             ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p"
             u.write_params(model.params,params,ext)
          else:
              ext=str(val_counter%2)+".p"
              u.write_params(model.params,params,ext)

          val_counter+=1#0.08
          s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches)
          u.log_write(s,params)
Example #7
0
def train_rnn(params):
    rng = RandomStreams(seed=1234)
    (X_train, Y_train, S_Train_list, F_list_train, G_list_train, X_test,
     Y_test, S_Test_list, F_list_test, G_list_test) = du.load_pose(params)
    params["len_train"] = len(X_train)
    params["len_test"] = len(X_test)
    u.start_log(params)
    batch_size = params['batch_size']

    n_train_batches = params["len_train"]
    n_train_batches /= batch_size

    n_test_batches = params["len_test"]
    n_test_batches /= batch_size

    nb_epochs = params['n_epochs']

    print("Batch size: %i, train batch size: %i, test batch size: %i" %
          (batch_size, n_train_batches, n_test_batches))
    u.log_write("Model build started", params)
    if params['run_mode'] == 1:
        model = model_provider.get_model_pretrained(params, rng)
        u.log_write("Pretrained loaded: %s" % (params['mfile']), params)
    else:
        model = model_provider.get_model(params, rng)
    u.log_write("Number of parameters: %s" % (model.n_param), params)
    train_errors = np.ndarray(nb_epochs)
    u.log_write("Training started", params)
    val_counter = 0
    best_loss = 1000
    for epoch_counter in range(nb_epochs):
        batch_loss = 0.
        sid = 0
        is_train = 1
        for minibatch_index in range(n_train_batches):
            x = X_train[minibatch_index * batch_size:(minibatch_index + 1) *
                        batch_size]  #60*20*1024
            y = Y_train[minibatch_index * batch_size:(minibatch_index + 1) *
                        batch_size]  #60*20*1024
            if (params["model"] == "blstmnp"):
                x_b = np.asarray(map(np.flipud, x))
                loss = model.train(x, x_b, y)
            else:
                loss = model.train(x, y, is_train)
            batch_loss += loss
        if params['shufle_data'] == 1:
            X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train)
        train_errors[epoch_counter] = batch_loss
        batch_loss /= n_train_batches
        s = 'TRAIN--> epoch %i | error %f' % (epoch_counter, batch_loss)
        u.log_write(s, params)
        if (epoch_counter % 3 == 0):
            print("Model testing")
            batch_loss3d = []
            is_train = 0
            for minibatch_index in range(n_test_batches):
                x = X_test[minibatch_index * batch_size:(minibatch_index + 1) *
                           batch_size]  #60*20*1024
                y = Y_test[minibatch_index * batch_size:(minibatch_index + 1) *
                           batch_size]  #60*20*1024
                pred = model.predictions(x, is_train)
                loss3d = u.get_loss(params, y, pred)
                batch_loss3d.append(loss3d)
            batch_loss3d = np.nanmean(batch_loss3d)
            if (batch_loss3d < best_loss):
                best_loss = batch_loss3d
                ext = str(epoch_counter) + "_" + str(batch_loss3d) + "_best.p"
                u.write_params(model.params, params, ext)
            else:
                ext = str(val_counter % 2) + ".p"
                u.write_params(model.params, params, ext)

            val_counter += 1  #0.08
            s = 'VAL--> epoch %i | error %f, %f' % (val_counter, batch_loss3d,
                                                    n_test_batches)
            u.log_write(s, params)
Example #8
0
def train_rnn(params):
   rng = RandomStreams(seed=1234)
   (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params)
   F_list_train,Y_train=du.shuffle_in_unison_inplace(F_list_train,Y_train)
   params["len_train"]=len(F_list_train)
   params["len_test"]=len(F_list_test)
   u.start_log(params)
   batch_size=params['batch_size']

   n_train_batches = len(F_list_train)
   n_train_batches /= batch_size

   n_test_batches = len(F_list_test)
   n_test_batches /= batch_size

   nb_epochs=params['n_epochs']

   print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches))
   u.log_write("Model build started",params)
   if params['run_mode']==1:
      model= model_provider.get_model_pretrained(params,rng)
      u.log_write("Pretrained loaded: %s"%(params['mfile']),params)
   else:
     model= model_provider.get_model(params,rng)
   u.log_write("Number of parameters: %s"%(model.n_param),params)
   train_errors = np.ndarray(nb_epochs)
   u.log_write("Training started",params)
   val_counter=0
   best_loss=1000
   for epoch_counter in range(nb_epochs):
      batch_loss = 0.
      is_train=1
      for minibatch_index in range(n_train_batches):
          if(minibatch_index==0):
              x,y=du.prepare_cnn_batch(minibatch_index, batch_size, F_list_train, Y_train)
          pool = ThreadPool(processes=2)
          async_t = pool.apply_async(model.train, (x, y,is_train))
          async_b = pool.apply_async(du.prepare_cnn_batch, (minibatch_index, batch_size, F_list_train, Y_train))
          pool.close()
          pool.join()
          loss = async_t.get()  # get the return value from your function.
          x=[]
          y=[]
          (x,y) = async_b.get()  # get the return value from your function.

          if(minibatch_index==n_train_batches-1):
              loss= model.train(x, y,is_train)

          batch_loss += loss
      if params['shufle_data']==1:
         F_list_train,Y_train=du.shuffle_in_unison_inplace(F_list_train,Y_train)
      train_errors[epoch_counter] = batch_loss
      batch_loss/=n_train_batches
      s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss)
      u.log_write(s,params)
      if(epoch_counter%1==0):
          print("Model testing")
          batch_loss3d = []
          is_train=0
          x=[]
          y=[]
          for minibatch_index in range(n_test_batches):
              if(minibatch_index==0):
                  x,y=du.prepare_cnn_batch(minibatch_index, batch_size, F_list_test, Y_test)
              pool = ThreadPool(processes=2)
              async_t = pool.apply_async(model.predictions, (x,is_train))
              async_b = pool.apply_async(du.prepare_cnn_batch, (minibatch_index, batch_size, F_list_test, Y_test))
              pool.close()
              pool.join()
              pred = async_t.get()  # get the return value from your function.
              loss3d =np.mean(np.linalg.norm((np.asarray(pred) - y)))
              x=[]
              y=[]
              batch_loss3d.append(loss3d)
              (x,y) = async_b.get()  # get the return value from your function.

              if(minibatch_index==n_train_batches-1):
                  pred= model.predictions(x,is_train)
                  loss3d =np.mean(np.linalg.norm((np.asarray(pred) - y)))
                  batch_loss3d.append(loss3d)

          batch_loss3d=np.nanmean(batch_loss3d)
          if(batch_loss3d<best_loss):
             best_loss=batch_loss3d
             ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p"
             u.write_params(model.params,params,ext)
          else:
              ext=str(val_counter%2)+".p"
              u.write_params(model.params,params,ext)

          val_counter+=1#0.08
          s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches)
          u.log_write(s,params)
Example #9
0
def train_rnn(params):
   rng = RandomStreams(seed=1234)
   (X_train,Y_train,S_Train_list,F_list_train,G_list_train,X_test,Y_test,S_Test_list,F_list_test,G_list_test)=du.load_pose(params)
   params["len_train"]=Y_train.shape[0]*Y_train.shape[1]
   params["len_test"]=Y_test.shape[0]*Y_test.shape[1]
   u.start_log(params)
   index_train_list,S_Train_list=du.get_seq_indexes(params,S_Train_list)
   index_test_list,S_Test_list=du.get_seq_indexes(params,S_Test_list)
   batch_size=params['batch_size']
   n_train_batches = len(index_train_list)
   n_train_batches /= batch_size

   n_test_batches = len(index_test_list)
   n_test_batches /= batch_size

   nb_epochs=params['n_epochs']

   print("Batch size: %i, train batch size: %i, test batch size: %i"%(batch_size,n_train_batches,n_test_batches))
   u.log_write("Model build started",params)
   if params['run_mode']==1:
      model= model_provider.get_model_pretrained(params,rng)
      u.log_write("Pretrained loaded: %s"%(params['mfile']),params)
   else:
     model= model_provider.get_model(params,rng)
   u.log_write("Number of parameters: %s"%(model.n_param),params)
   train_errors = np.ndarray(nb_epochs)
   u.log_write("Training started",params)
   val_counter=0
   best_loss=1000
   for epoch_counter in range(nb_epochs):
      batch_loss = 0.
      LStateList_t=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden state
      LStateList_pre=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden state
      state_reset_counter_lst=[0 for i in range(batch_size)]
      is_train=1
      for minibatch_index in range(n_train_batches):
          state_reset_counter_lst=[s+1 for s in state_reset_counter_lst]
          (LStateList_b,x,y,state_reset_counter_lst)=du.prepare_lstm_batch(index_train_list, minibatch_index, batch_size, S_Train_list,LStateList_t,LStateList_pre, F_list_train, params, Y_train, X_train,state_reset_counter_lst)
          LStateList_pre=LStateList_b
          args=(x, y,is_train)+tuple(LStateList_b)
          result= model.train(*args)
          loss=result[0]
          LStateList_t=result[1:len(result)]

          batch_loss += loss
      if params['shufle_data']==1:
         X_train,Y_train=du.shuffle_in_unison_inplace(X_train,Y_train)
      train_errors[epoch_counter] = batch_loss
      batch_loss/=n_train_batches
      s='TRAIN--> epoch %i | error %f'%(epoch_counter, batch_loss)
      u.log_write(s,params)
      if(epoch_counter%1==0):
          is_train=0
          print("Model testing")
          state_reset_counter=0
          batch_loss3d = []
          LStateList_t=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden state
          LStateList_pre=[np.zeros(shape=(batch_size,params['n_hidden']), dtype=dtype) for i in range(params['nlayer']*2)] # initial hidden sta
          state_reset_counter_lst=[0 for i in range(batch_size)]
          for minibatch_index in range(n_test_batches):
             state_reset_counter_lst=[s+1 for s in state_reset_counter_lst]
             (LStateList_b,x,y,state_reset_counter_lst)=du.prepare_lstm_batch(index_test_list, minibatch_index, batch_size, S_Test_list, LStateList_t,LStateList_pre, F_list_test, params, Y_test, X_test,state_reset_counter_lst)
             LStateList_pre=LStateList_b
             args=(x,is_train)+tuple(LStateList_b)
             result = model.predictions(*args)
             pred=result[0]
             LStateList_t=result[1:len(result)]
             loss3d =u.get_loss(params,y,pred)
             batch_loss3d.append(loss3d)
          batch_loss3d=np.nanmean(batch_loss3d)
          if(batch_loss3d<best_loss):
             best_loss=batch_loss3d
             ext=str(epoch_counter)+"_"+str(batch_loss3d)+"_best.p"
             u.write_params(model.params,params,ext)
          else:
              ext=str(val_counter%2)+".p"
              u.write_params(model.params,params,ext)

          val_counter+=1#0.08
          s ='VAL--> epoch %i | error %f, %f'%(val_counter,batch_loss3d,n_test_batches)
          u.log_write(s,params)
Example #10
0
def train_rnn(params):
    data = []
    for sindex in range(0, params["seq_length"], 5):
        (X_train, Y_train, X_test, Y_test) = du.load_pose(params, sindex=sindex)
        data.append((X_train, Y_train, X_test, Y_test))
    (X_train, Y_train, X_test, Y_test) = data[0]
    params["len_train"] = X_train.shape[0] * X_train.shape[1]
    params["len_test"] = X_test.shape[0] * X_test.shape[1]
    u.start_log(params)
    batch_size = params["batch_size"]
    n_train_batches = len(X_train)
    n_train_batches /= batch_size

    n_test_batches = len(X_test)
    n_test_batches /= batch_size

    nb_epochs = params["n_epochs"]

    print("Batch size: %i, train batch size: %i, test batch size: %i" % (batch_size, n_train_batches, n_test_batches))
    u.log_write("Model build started", params)
    if params["resume"] == 1:
        model = model_provider.get_model_pretrained(params)
    else:
        model = model_provider.get_model(params)
    u.log_write("Number of parameters: %s" % (model.n_param), params)
    train_errors = np.ndarray(nb_epochs)
    u.log_write("Training started", params)
    val_counter = 0
    best_loss = 10000
    for epoch_counter in range(nb_epochs):
        (X_train, Y_train, X_test, Y_test) = data[np.mod(epoch_counter, len(data))]
        if params["shufle_data"] == 1:
            X_train, Y_train = du.shuffle_in_unison_inplace(X_train, Y_train)
        n_train_batches = len(X_train)
        n_train_batches /= batch_size

        n_test_batches = len(X_test)
        n_test_batches /= batch_size
        batch_loss = 0.0
        for minibatch_index in range(n_train_batches):
            x = X_train[minibatch_index * batch_size : (minibatch_index + 1) * batch_size]  # 60*20*1024
            y = Y_train[minibatch_index * batch_size : (minibatch_index + 1) * batch_size]  # 60*20*54
            if params["model"] == "blstmnp":
                x_b = np.asarray(map(np.flipud, x))
                loss = model.train(x, x_b, y)
            else:
                loss = model.train(x, y)
            batch_loss += loss
        train_errors[epoch_counter] = batch_loss
        batch_loss /= n_train_batches
        s = "TRAIN--> epoch %i | error %f" % (epoch_counter, batch_loss)
        u.log_write(s, params)
        if epoch_counter % 10 == 0:
            print("Model testing")
            batch_loss = 0.0
            batch_loss3d = 0.0
            for minibatch_index in range(n_test_batches):
                x = X_test[minibatch_index * batch_size : (minibatch_index + 1) * batch_size]
                y = Y_test[minibatch_index * batch_size : (minibatch_index + 1) * batch_size]
                if params["model"] == "blstmnp":
                    x_b = np.asarray(map(np.flipud, x))
                    pred = model.predictions(x, x_b)
                else:
                    pred = model.predictions(x)

                loss = np.nanmean(np.abs(pred - y) ** 2)
                loss3d = u.get_loss(y, pred)
                batch_loss += loss
                batch_loss3d += loss3d
            batch_loss /= n_test_batches
            batch_loss3d /= n_test_batches
            if batch_loss3d < best_loss:
                best_loss = batch_loss3d
                ext = str(batch_loss3d) + "_best.p"
                u.write_params(model.params, params, ext)
            else:
                ext = str(val_counter % 2) + ".p"
                u.write_params(model.params, params, ext)

            val_counter += 1  # 0.08
            s = "VAL--> epoch %i | error %f, %f %f" % (val_counter, batch_loss, batch_loss3d, n_test_batches)
            u.log_write(s, params)