def train():

    # Object Matrix
    O = tf.placeholder(tf.float32, [None, FLAGS.Ds, FLAGS.No], name="O")
    # Relation Matrics R=<Rr,Rs,Ra>
    Rr = tf.placeholder(tf.float32, [None, FLAGS.No, FLAGS.Nr], name="Rr")
    Rs = tf.placeholder(tf.float32, [None, FLAGS.No, FLAGS.Nr], name="Rs")
    Ra = tf.placeholder(tf.float32, [None, FLAGS.Dr, FLAGS.Nr], name="Ra")
    # next velocities
    P_label = tf.placeholder(tf.float32, [None, FLAGS.Dp, FLAGS.No],
                             name="P_label")
    # External Effects
    X = tf.placeholder(tf.float32, [None, FLAGS.Dx, FLAGS.No], name="X")

    # marshalling function, m(G)=B, G=<O,R>
    B = m(O, Rr, Rs, Ra)

    # relational modeling phi_R(B)=E
    E = phi_R(B)

    # aggregator
    C = a(O, Rr, X, E)

    # object modeling phi_O(C)=P
    P = phi_O(C)

    # abstract modeling phi_A(P)=q
    #q=phi_A(P);

    # loss and optimizer
    params_list = tf.global_variables()
    loss = tf.nn.l2_loss(P - P_label) + 0.001 * tf.nn.l2_loss(E)
    for i in params_list:
        loss += 0.001 * tf.nn.l2_loss(i)
    optimizer = tf.train.AdamOptimizer(0.001)
    trainer = optimizer.minimize(loss)

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    # Data Generation
    set_num = 2000
    total_data = np.zeros((999 * set_num, FLAGS.Ds, FLAGS.No), dtype=object)
    total_label = np.zeros((999 * set_num, FLAGS.Dp, FLAGS.No), dtype=object)
    for i in range(set_num):
        raw_data = gen(FLAGS.No, True)
        data = np.zeros((999, FLAGS.Ds, FLAGS.No), dtype=object)
        label = np.zeros((999, FLAGS.Dp, FLAGS.No), dtype=object)
        for j in range(1000 - 1):
            data[j] = np.transpose(raw_data[j])
            label[j] = np.transpose(raw_data[j + 1, :, 3:5])
        total_data[i * 999:(i + 1) * 999, :] = data
        total_label[i * 999:(i + 1) * 999, :] = label

    # Shuffle
    tr_data_num = 1000000
    val_data_num = 200000
    total_idx = range(len(total_data))
    np.random.shuffle(total_idx)
    mixed_data = total_data[total_idx]
    mixed_label = total_label[total_idx]
    # Training/Validation/Test
    train_data = mixed_data[:tr_data_num]
    train_label = mixed_label[:tr_data_num]
    val_data = mixed_data[tr_data_num:tr_data_num + val_data_num]
    val_label = mixed_label[tr_data_num:tr_data_num + val_data_num]
    test_data = mixed_data[tr_data_num + val_data_num:]
    test_label = mixed_label[tr_data_num + val_data_num:]

    # Normalization
    weights_list = np.sort(
        np.reshape(train_data[:, 0, :], [1, tr_data_num * FLAGS.No])[0])
    weights_median = weights_list[int(len(weights_list) * 0.5)]
    weights_min = weights_list[int(len(weights_list) * 0.05)]
    weights_max = weights_list[int(len(weights_list) * 0.95)]
    position_list = np.sort(
        np.reshape(train_data[:, 1:3, :], [1, tr_data_num * FLAGS.No * 2])[0])
    position_median = position_list[int(len(position_list) * 0.5)]
    position_min = position_list[int(len(position_list) * 0.05)]
    position_max = position_list[int(len(position_list) * 0.95)]
    velocity_list = np.sort(
        np.reshape(train_data[:, 3:5, :], [1, tr_data_num * FLAGS.No * 2])[0])
    velocity_median = velocity_list[int(len(velocity_list) * 0.5)]
    velocity_min = velocity_list[int(len(velocity_list) * 0.05)]
    velocity_max = velocity_list[int(len(velocity_list) * 0.95)]

    train_data[:, 0, :] = (train_data[:, 0, :] -
                           weights_median) * (2 / (weights_max - weights_min))
    train_data[:, 1:3, :] = (train_data[:, 1:3, :] -
                             position_median) * (2 /
                                                 (position_max - position_min))
    train_data[:, 3:5, :] = (train_data[:, 3:5, :] -
                             velocity_median) * (2 /
                                                 (velocity_max - velocity_min))

    val_data[:, 0, :] = (val_data[:, 0, :] -
                         weights_median) * (2 / (weights_max - weights_min))
    val_data[:,
             1:3, :] = (val_data[:, 1:3, :] -
                        position_median) * (2 / (position_max - position_min))
    val_data[:,
             3:5, :] = (val_data[:, 3:5, :] -
                        velocity_median) * (2 / (velocity_max - velocity_min))

    test_data[:, 0, :] = (test_data[:, 0, :] -
                          weights_median) * (2 / (weights_max - weights_min))
    test_data[:,
              1:3, :] = (test_data[:, 1:3, :] -
                         position_median) * (2 / (position_max - position_min))
    test_data[:,
              3:5, :] = (test_data[:, 3:5, :] -
                         velocity_median) * (2 / (velocity_max - velocity_min))

    mini_batch_num = 100
    # Set Rr_data, Rs_data, Ra_data and X_data
    Rr_data = np.zeros((mini_batch_num, FLAGS.No, FLAGS.Nr), dtype=float)
    Rs_data = np.zeros((mini_batch_num, FLAGS.No, FLAGS.Nr), dtype=float)
    Ra_data = np.zeros((mini_batch_num, FLAGS.Dr, FLAGS.Nr), dtype=float)
    X_data = np.zeros((mini_batch_num, FLAGS.Dx, FLAGS.No), dtype=float)
    cnt = 0
    for i in range(FLAGS.No):
        for j in range(FLAGS.No):
            if (i != j):
                Rr_data[:, i, cnt] = 1.0
                Rs_data[:, j, cnt] = 1.0
                cnt += 1

    # Training
    max_epoches = 2000
    for i in range(max_epoches):
        for j in range(int(len(train_data) / mini_batch_num)):
            batch_data = train_data[j * mini_batch_num:(j + 1) *
                                    mini_batch_num]
            batch_label = train_label[j * mini_batch_num:(j + 1) *
                                      mini_batch_num]
            sess.run(trainer,
                     feed_dict={
                         O: batch_data,
                         Rr: Rr_data,
                         Rs: Rs_data,
                         Ra: Ra_data,
                         P_label: batch_label,
                         X: X_data
                     })
        val_loss = 0
        for j in range(int(len(val_data) / mini_batch_num)):
            batch_data = val_data[j * mini_batch_num:(j + 1) * mini_batch_num]
            batch_label = val_label[j * mini_batch_num:(j + 1) *
                                    mini_batch_num]
            val_loss += sess.run(loss,
                                 feed_dict={
                                     O: batch_data,
                                     Rr: Rr_data,
                                     Rs: Rs_data,
                                     Ra: Ra_data,
                                     P_label: batch_label,
                                     X: X_data
                                 })
        print("Epoch " + str(i + 1) + " Validation MSE: " + str(val_loss /
                                                                (j + 1)))

    # Make Video
    frame_len = 300
    raw_data = gen(FLAGS.No, True)
    xy_origin = raw_data[:frame_len, :, 1:3]
    estimated_data = np.zeros((frame_len, FLAGS.No, FLAGS.Ds), dtype=float)
    estimated_data[0] = raw_data[0]
    for i in range(1, frame_len):
        velocities = sess.run(P,
                              feed_dict={
                                  O: [np.transpose(estimated_data[i - 1])],
                                  Rr: [Rr_data[0]],
                                  Rs: [Rs_data[0]],
                                  Ra: [Ra_data[0]],
                                  X: [X_data[0]]
                              })[0]
        estimated_data[i, :, 0] = estimated_data[i - 1][:, 0]
        estimated_data[i, :, 3:5] = np.transpose(velocities)
        estimated_data[i, :, 1:3] = estimated_data[
            i - 1, :, 1:3] + estimated_data[i, :, 3:5] * 0.001
    xy_estimated = estimated_data[:, :, 1:3]
    print("Video Recording")
    make_video(xy_origin, "true.mp4")
    make_video(xy_estimated, "modeling.mp4")
def train():

    # Object Matrix
    O = tf.placeholder(tf.float32, [None, FLAGS.Ds, FLAGS.No], name="O")
    # Relation Matrics R=<Rr,Rs,Ra>
    Rr = tf.placeholder(tf.float32, [None, FLAGS.No, FLAGS.Nr], name="Rr")
    Rs = tf.placeholder(tf.float32, [None, FLAGS.No, FLAGS.Nr], name="Rs")
    Ra = tf.placeholder(tf.float32, [None, FLAGS.Dr, FLAGS.Nr], name="Ra")
    # next velocities
    P_label = tf.placeholder(tf.float32, [None, FLAGS.Dp, FLAGS.No],
                             name="P_label")
    # External Effects
    X = tf.placeholder(tf.float32, [None, FLAGS.Dx, FLAGS.No], name="X")

    # marshalling function, m(G)=B, G=<O,R>
    B = m(O, Rr, Rs, Ra)

    # relational modeling phi_R(B)=E
    E = phi_R(B)

    # aggregator
    C = a(O, Rr, X, E)

    # object modeling phi_O(C)=P
    P = phi_O(C)

    # abstract modeling phi_A(P)=q
    #q=phi_A(P);

    # loss and optimizer
    params_list = tf.global_variables()
    mse = tf.reduce_mean(tf.reduce_sum(tf.square(P - P_label), [1, 2]))
    loss = mse + 0.001 * tf.nn.l2_loss(E)
    for i in params_list:
        loss += 0.001 * tf.nn.l2_loss(i)
    optimizer = tf.train.AdamOptimizer(0.001)
    trainer = optimizer.minimize(loss)

    # tensorboard
    tf.summary.scalar('mse', mse)
    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter(FLAGS.log_dir)

    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    # Data Generation
    set_num = 1
    #set_num=2000;
    total_data = np.zeros((999 * set_num, FLAGS.Ds, FLAGS.No), dtype=object)
    total_label = np.zeros((999 * set_num, FLAGS.Dp, FLAGS.No), dtype=object)
    for i in range(set_num):
        raw_data = gen(FLAGS.No, True)
        data = np.zeros((999, FLAGS.Ds, FLAGS.No), dtype=object)
        label = np.zeros((999, FLAGS.Dp, FLAGS.No), dtype=object)
        for j in range(1000 - 1):
            data[j] = np.transpose(raw_data[j])
            label[j] = np.transpose(raw_data[j + 1, :, 3:5])
        total_data[i * 999:(i + 1) * 999, :] = data
        total_label[i * 999:(i + 1) * 999, :] = label
    """
  # Shuffle
  tr_data_num=1000*(set_num-1);
  val_data_num=300*(set_num-1);
  #tr_data_num=1000000;
  #val_data_num=200000;
  total_idx=range(len(total_data));np.random.shuffle(total_idx);
  mixed_data=total_data[total_idx];
  mixed_label=total_label[total_idx];
  # Training/Validation/Test
  train_data=mixed_data[:tr_data_num];
  train_label=mixed_label[:tr_data_num];
  val_data=mixed_data[tr_data_num:tr_data_num+val_data_num];
  val_label=mixed_label[tr_data_num:tr_data_num+val_data_num];
  test_data=mixed_data[tr_data_num+val_data_num:];
  test_label=mixed_label[tr_data_num+val_data_num:];
  """
    train_data = total_data
    train_label = total_label
    val_data = total_data
    val_label = total_label
    test_data = total_data
    test_label = total_label
    tr_data_num = len(train_data)
    val_data_num = len(val_data)

    # Normalization
    weights_list = np.sort(
        np.reshape(train_data[:, 0, :], [1, tr_data_num * FLAGS.No])[0])
    weights_median = weights_list[int(len(weights_list) * 0.5)]
    weights_min = weights_list[int(len(weights_list) * 0.05)]
    weights_max = weights_list[int(len(weights_list) * 0.95)]
    position_list = np.sort(
        np.reshape(train_data[:, 1:3, :], [1, tr_data_num * FLAGS.No * 2])[0])
    position_median = position_list[int(len(position_list) * 0.5)]
    position_min = position_list[int(len(position_list) * 0.05)]
    position_max = position_list[int(len(position_list) * 0.95)]
    velocity_list = np.sort(
        np.reshape(train_data[:, 3:5, :], [1, tr_data_num * FLAGS.No * 2])[0])
    velocity_median = velocity_list[int(len(velocity_list) * 0.5)]
    velocity_min = velocity_list[int(len(velocity_list) * 0.05)]
    velocity_max = velocity_list[int(len(velocity_list) * 0.95)]

    train_data[:, 0, :] = (train_data[:, 0, :] -
                           weights_median) * (2 / (weights_max - weights_min))
    train_data[:, 1:3, :] = (train_data[:, 1:3, :] -
                             position_median) * (2 /
                                                 (position_max - position_min))
    train_data[:, 3:5, :] = (train_data[:, 3:5, :] -
                             velocity_median) * (2 /
                                                 (velocity_max - velocity_min))
    train_label = (train_label -
                   velocity_median) * (2 / (velocity_max - velocity_min))

    val_data[:, 0, :] = (val_data[:, 0, :] -
                         weights_median) * (2 / (weights_max - weights_min))
    val_data[:,
             1:3, :] = (val_data[:, 1:3, :] -
                        position_median) * (2 / (position_max - position_min))
    val_data[:,
             3:5, :] = (val_data[:, 3:5, :] -
                        velocity_median) * (2 / (velocity_max - velocity_min))
    val_label = (val_label - velocity_median) * (2 /
                                                 (velocity_max - velocity_min))

    test_data[:, 0, :] = (test_data[:, 0, :] -
                          weights_median) * (2 / (weights_max - weights_min))
    test_data[:,
              1:3, :] = (test_data[:, 1:3, :] -
                         position_median) * (2 / (position_max - position_min))
    test_data[:,
              3:5, :] = (test_data[:, 3:5, :] -
                         velocity_median) * (2 / (velocity_max - velocity_min))
    test_label = (test_label -
                  velocity_median) * (2 / (velocity_max - velocity_min))

    mini_batch_num = 100
    # Set Rr_data, Rs_data, Ra_data and X_data
    Rr_data = np.zeros((mini_batch_num, FLAGS.No, FLAGS.Nr), dtype=float)
    Rs_data = np.zeros((mini_batch_num, FLAGS.No, FLAGS.Nr), dtype=float)
    Ra_data = np.zeros((mini_batch_num, FLAGS.Dr, FLAGS.Nr), dtype=float)
    X_data = np.zeros((mini_batch_num, FLAGS.Dx, FLAGS.No), dtype=float)
    cnt = 0
    for i in range(FLAGS.No):
        for j in range(FLAGS.No):
            if (i != j):
                Rr_data[:, i, cnt] = 1.0
                Rs_data[:, j, cnt] = 1.0
                cnt += 1

    # Training
    max_epoches = 20000
    for i in range(max_epoches):
        tr_loss = 0
        for j in range(int(len(train_data) / mini_batch_num)):
            batch_data = train_data[j * mini_batch_num:(j + 1) *
                                    mini_batch_num]
            batch_label = train_label[j * mini_batch_num:(j + 1) *
                                      mini_batch_num]
            tr_loss_part, _ = sess.run(
                [mse, trainer],
                feed_dict={
                    O: batch_data,
                    Rr: Rr_data,
                    Rs: Rs_data,
                    Ra: Ra_data,
                    P_label: batch_label,
                    X: X_data
                })
            tr_loss += tr_loss_part
        val_loss = 0
        for j in range(int(len(val_data) / mini_batch_num)):
            batch_data = val_data[j * mini_batch_num:(j + 1) * mini_batch_num]
            batch_label = val_label[j * mini_batch_num:(j + 1) *
                                    mini_batch_num]
            #summary,val_loss_part=sess.run([merged,mse],feed_dict={O:batch_data,Rr:Rr_data,Rs:Rs_data,Ra:Ra_data,P_label:batch_label,X:X_data});
            val_loss_part = sess.run(mse,
                                     feed_dict={
                                         O: batch_data,
                                         Rr: Rr_data,
                                         Rs: Rs_data,
                                         Ra: Ra_data,
                                         P_label: batch_label,
                                         X: X_data
                                     })
            val_loss += val_loss_part
            #writer.add_summary(summary,(i*(int(len(val_data)/mini_batch_num))));
        print("Epoch " + str(i + 1) + " Training MSE: " +
              str(tr_loss / (int(len(train_data) / mini_batch_num))) +
              " Validation MSE: " + str(val_loss / (j + 1)))

    # Make Video
    frame_len = 300
    #raw_data=gen(FLAGS.No,True);
    xy_origin = raw_data[:frame_len, :, 1:3]
    estimated_data = np.zeros((frame_len, FLAGS.No, FLAGS.Ds), dtype=float)
    estimated_data[0] = raw_data[0]
    estimated_data[0, :,
                   0] = (estimated_data[0, :, 0] -
                         weights_median) * (2 / (weights_max - weights_min))
    estimated_data[0, :,
                   1:3] = (estimated_data[0, :, 1:3] -
                           position_median) * (2 /
                                               (position_max - position_min))
    estimated_data[0, :,
                   3:5] = (estimated_data[0, :, 3:5] -
                           velocity_median) * (2 /
                                               (velocity_max - velocity_min))
    for i in range(1, frame_len):
        velocities = sess.run(P,
                              feed_dict={
                                  O: [np.transpose(estimated_data[i - 1])],
                                  Rr: [Rr_data[0]],
                                  Rs: [Rs_data[0]],
                                  Ra: [Ra_data[0]],
                                  X: [X_data[0]]
                              })[0]
        estimated_data[i, :, 0] = estimated_data[i - 1][:, 0]
        estimated_data[i, :,
                       3:5] = np.transpose(velocities *
                                           (velocity_max - velocity_min) / 2 +
                                           velocity_median)
        estimated_data[i, :, 1:3] = (
            estimated_data[i - 1, :, 1:3] * (position_max - position_min) / 2 +
            position_median) + estimated_data[i, :, 3:5] * 0.001
        estimated_data[i, :,
                       1:3] = (estimated_data[i, :, 1:3] - position_median) * (
                           2 / (position_max - position_min))
        estimated_data[i, :,
                       3:5] = (estimated_data[i, :, 3:5] - velocity_median) * (
                           2 / (velocity_max - velocity_min))
    xy_estimated = estimated_data[:, :, 1:3] * (
        position_max - position_min) / 2 + position_median
    print("Video Recording")
    make_video(xy_origin, "true.mp4")
    make_video(xy_estimated, "modeling.mp4")
Beispiel #3
0
def train():
  # Architecture Definition
  F=tf.placeholder(tf.float32, [None,6,FLAGS.height,FLAGS.weight,FLAGS.col_dim], name="F");
  F1,F2,F3,F4,F5,F6=tf.unstack(F,6,1);
  label=tf.placeholder(tf.float32, [None,FLAGS.No,4], name="label");
  # x and y coordinate channels
  x_cor=tf.placeholder(tf.float32, [None,FLAGS.height,FLAGS.weight,1], name="x_cor");
  y_cor=tf.placeholder(tf.float32, [None,FLAGS.height,FLAGS.weight,1], name="y_cor");

  S1,S2,S3,S4=VE(F1,F2,F3,F4,F5,F6,x_cor,y_cor,FLAGS);
  out_dp=DP(S1,S2,S3,S4,FLAGS);
  out_sd=SD(out_dp,FLAGS);

  # loss and optimizer
  mse=tf.reduce_mean(tf.reduce_mean(tf.square(out_sd-label),[1,2]));
  optimizer = tf.train.AdamOptimizer(0.0005);
  trainer=optimizer.minimize(mse);

  # tensorboard
  tf.summary.scalar('mse',mse);
  merged=tf.summary.merge_all();
  writer=tf.summary.FileWriter(FLAGS.log_dir);

  sess=tf.InteractiveSession();
  tf.global_variables_initializer().run();

  # Get Training Image and Data 
  total_img=np.zeros((FLAGS.set_num,1000,FLAGS.height,FLAGS.weight,FLAGS.col_dim),dtype=float);
  for i in range(FLAGS.set_num):
    for j in range(1000):
      total_img[i,j]=mpimg.imread(img_folder+"train/"+str(i)+'_'+str(j)+'.png');
  total_data=np.zeros((FLAGS.set_num,1000,FLAGS.No*5),dtype=float);
  for i in range(FLAGS.set_num):
    f=open(data_folder+"train/"+str(i)+".csv","r");
    total_data[i]=[line[:-1].split(",") for line in f.readlines()];

  # reshape img and data
  input_img=np.zeros((FLAGS.set_num*(1000-7+1),6,FLAGS.height,FLAGS.weight,FLAGS.col_dim),dtype=float);
  output_label=np.zeros((FLAGS.set_num*(1000-7+1),FLAGS.No,4),dtype=float);
  for i in range(FLAGS.set_num):
    for j in range(1000-7+1):
      input_img[i*(1000-7+1)+j]=total_img[i,j:j+6];
      output_label[i*(1000-7+1)+j]=np.reshape(total_data[i,j+6],[FLAGS.No,5])[:,1:5];

  # shuffle
  tr_data_num=int(len(input_img)*0.8);
  val_data_num=int(len(input_img)*0.1);
  total_idx=range(len(input_img));np.random.shuffle(total_idx);
  mixed_img=input_img[total_idx];mixed_label=output_label[total_idx];
  tr_data=mixed_img[:tr_data_num];tr_label=mixed_label[:tr_data_num];
  val_data=mixed_img[tr_data_num:(tr_data_num+val_data_num)];val_label=mixed_label[tr_data_num:(tr_data_num+val_data_num)];

  # x-cor and y-cor setting
  nx, ny = (FLAGS.weight, FLAGS.height);
  x = np.linspace(0, 1, nx);
  y = np.linspace(0, 1, ny);
  xv, yv = np.meshgrid(x, y);
  xv=np.reshape(xv,[FLAGS.height,FLAGS.weight,1]);
  yv=np.reshape(yv,[FLAGS.height,FLAGS.weight,1]);
  xcor=np.zeros((FLAGS.batch_num*5,FLAGS.height,FLAGS.weight,1),dtype=float);
  ycor=np.zeros((FLAGS.batch_num*5,FLAGS.height,FLAGS.weight,1),dtype=float);
  for i in range(FLAGS.batch_num*5):
    xcor[i]=xv; ycor[i]=yv;

  # training
  for i in range(FLAGS.max_epoches):
    tr_loss=0;
    for j in range(int(len(tr_data)/FLAGS.batch_num)):
      batch_data=tr_data[j*FLAGS.batch_num:(j+1)*FLAGS.batch_num];
      batch_label=tr_label[j*FLAGS.batch_num:(j+1)*FLAGS.batch_num];
      tr_loss_part,_=sess.run([mse,trainer],feed_dict={F:batch_data,label:batch_label,x_cor:xcor,y_cor:ycor});
      tr_loss+=tr_loss_part;
    tr_idx=range(len(tr_data));np.random.shuffle(tr_idx);
    tr_data=tr_data[tr_idx];
    tr_label=tr_label[tr_idx];
    val_loss=0;
    for j in range(int(len(val_data)/FLAGS.batch_num)):
      batch_data=val_data[j*FLAGS.batch_num:(j+1)*FLAGS.batch_num];
      batch_label=val_label[j*FLAGS.batch_num:(j+1)*FLAGS.batch_num];
      val_loss_part,_=sess.run([mse,trainer],feed_dict={F:batch_data,label:batch_label,x_cor:xcor,y_cor:ycor});
      val_loss+=val_loss_part;
    val_idx=range(len(val_data));np.random.shuffle(val_idx);
    val_data=val_data[val_idx];
    val_label=val_label[val_idx];
    print("Epoch "+str(i+1)+" Training MSE: "+str(tr_loss/(int(len(tr_data)/FLAGS.batch_num)))+" Validation MSE: "+str(val_loss/(j+1)));
  
  ts_frame_num=300;
  # Get Test Image and Data 
  ts_img=np.zeros((1,1000,FLAGS.height,FLAGS.weight,FLAGS.col_dim),dtype=float);
  for i in range(1):
    for j in range(1000):
      ts_img[i]=mpimg.imread(img_folder+"test/"+str(i)+"_"+str(j)+'.png');
  ts_data=np.zeros((1,1000,FLAGS.No*5),dtype=float);
  for i in range(1):
    f=open(data_folder+"test/"+str(i)+".csv","r");
    ts_data[i]=[line[:-1].split(",") for line in f.readlines()];

  # reshape img and data
  input_img=np.zeros((1*(1000-7+1),6,FLAGS.height,FLAGS.weight,FLAGS.col_dim),dtype=float);
  output_label=np.zeros((1*(1000-7+1),FLAGS.No,4),dtype=float);
  for i in range(1):
    for j in range(1000-7+1):
      input_img[i*(1000-7+1)+j]=total_img[i,j:j+6];
      output_label[i*(1000-7+1)+j]=np.reshape(total_data[i,j+6],[FLAGS.No,5])[:,1:5];
  
  xy_origin=output_label[:,:,0:2];
  xy_estimated=np.zeros((1*(1000-7+1),No,2),dtype=float);
  for i in range(len(input_img)):
    xy_estimated[i]=sess.run(label,feed_dict={F:[input_img[i]],label:[output_label[i]],x_cor:xcor[0:4],y_cor:ycor[0:4]})[:,:,1:3];
  print("Video Recording");
  make_video(xy_origin[:ts_frame_num],"true"+str(time.time())+".mp4");
  make_video(xy_estimated[:ts_frame_num],"modeling"+str(time.time())+".mp4");
  print("Done");