コード例 #1
0
def get_pose(img, times, index):

    max_src_offset = (FLAGS.seq_length - 1) // 2  #1

    # TODO: currently assuming batch_size = 1
    tgt_idx = index - 2
    print('tgt_idx:', tgt_idx)
    image_seq = load_image_sequence(img, tgt_idx, FLAGS.seq_length,
                                    FLAGS.img_height, FLAGS.img_width)
    # print('image_seq.shape',image_seq.shape)                      #(128, 1248, 3)
    # 传入data,feed_dict={}
    pred = sfm.inference(image_seq[None, :, :, :], sess,
                         mode='pose')  #an dictionary
    # print('pred_poses.array:',pred['pose'])
    # print('pred_poses.array:', pred['pose'].shape)                  #shape(1,2,6)
    pred_poses = pred['pose'][0]  # dictionary to ndarray
    # print('pred_poses.shape:',pred_poses.shape)                   #shape(2,6)
    # Insert the target pose [0, 0, 0, 0, 0, 0]
    pred_poses = np.insert(pred_poses,
                           max_src_offset,
                           np.zeros((1, 6)),
                           axis=0)  # the target image is the reference
    # print('pred_poses',pred_poses)
    # print('pred_poses[0]:',pred_poses[0])
    # curr_times = times[0:3]
    out_file = FLAGS.output_dir + '%.6d.txt' % (tgt_idx - max_src_offset)
    dump_pose_seq_TUM(out_file, pred_poses, times)
コード例 #2
0
ファイル: test_kitti_pose.py プロジェクト: sakshikakde/sfm_dl
def main():

    ##### SETUP SFM IN POSE MODE ######
    sfm = SfMLearner()
    sfm.setup_inference(FLAGS.img_height, FLAGS.img_width, 'pose',
                        FLAGS.seq_length)

    saver = tf.train.Saver([var for var in tf.trainable_variables()])

    ##### File Handlings #####
    if not os.path.isdir(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)

    seq_dir = os.path.join(FLAGS.dataset_dir, 'sequences',
                           '%.2d' % FLAGS.test_seq)  # enter sequence/09
    img_dir = os.path.join(seq_dir, 'image_2')  # enter image2 folder
    N = len(glob(img_dir + '/*.png'))  # get all images
    test_frames = ['%.2d %.6d' % (FLAGS.test_seq, n)
                   for n in range(N)]  #append all N image paths

    with open(FLAGS.dataset_dir + 'sequences/%.2d/times.txt' % FLAGS.test_seq,
              'r') as f:  # open and read all times
        times = f.readlines()
    times = np.array([float(s[:-1]) for s in times])

    max_src_offset = (FLAGS.seq_length - 1) // 2
    with tf.Session() as sess:
        saver.restore(sess, FLAGS.ckpt_file)  # load pose model
        # for every tgt file
        for tgt_idx in range(N):

            if not is_valid_sample(
                    test_frames, tgt_idx,
                    FLAGS.seq_length):  # check if sample is valid
                continue
            if tgt_idx % 100 == 0:
                print('Progress: %d/%d' % (tgt_idx, N))
            # TODO: currently assuming batch_size = 1
            image_seq = load_image_sequence(
                FLAGS.dataset_dir,  # load the image sequence based on seq_len
                test_frames,
                tgt_idx,
                FLAGS.seq_length,
                FLAGS.img_height,
                FLAGS.img_width)
            # predict poses
            pred = sfm.inference(image_seq[None, :, :, :], sess, mode='pose')
            pred_poses = pred['pose'][0]

            # Insert the target pose [0, 0, 0, 0, 0, 0]
            pred_poses = np.insert(pred_poses,
                                   max_src_offset,
                                   np.zeros((1, 6)),
                                   axis=0)

            curr_times = times[tgt_idx - max_src_offset:tgt_idx +
                               max_src_offset + 1]
            out_file = FLAGS.output_dir + '%.6d.txt' % (tgt_idx -
                                                        max_src_offset)
            dump_pose_seq_TUM(out_file, pred_poses, curr_times)
コード例 #3
0
def main():
    # get input images
    if not os.path.isdir(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)
    concat_img_dir = os.path.join(FLAGS.concat_img_dir,
                                  '%.2d' % FLAGS.test_seq)
    max_src_offset = int((FLAGS.seq_length - 1) / 2)
    N = len(glob(concat_img_dir + '/*.jpg')) + 2 * max_src_offset
    test_frames = ['%.2d %.6d' % (FLAGS.test_seq, n) for n in range(N)]

    with open(FLAGS.dataset_dir + 'sequences/%.2d/times.txt' % FLAGS.test_seq,
              'r') as f:
        times = f.readlines()
    times = np.array([float(s[:-1]) for s in times])

    with tf.Session() as sess:
        # setup input tensor
        loader = DataLoader(FLAGS.concat_img_dir, FLAGS.batch_size,
                            FLAGS.img_height, FLAGS.img_width,
                            FLAGS.seq_length - 1)
        image_sequence_names, tgt_inds = load_kitti_image_sequence_names(
            FLAGS.concat_img_dir, test_frames, FLAGS.seq_length)
        image_sequence_names = complete_batch_size(image_sequence_names,
                                                   FLAGS.batch_size)
        tgt_inds = complete_batch_size(tgt_inds, FLAGS.batch_size)
        assert len(tgt_inds) == len(image_sequence_names)
        batch_sample = loader.load_test_batch(image_sequence_names)
        sess.run(batch_sample.initializer)
        input_batch = batch_sample.get_next()
        input_batch.set_shape([
            FLAGS.batch_size, FLAGS.img_height,
            FLAGS.img_width * FLAGS.seq_length, 3
        ])

        # init system
        system = DeepSlam()
        system.setup_inference(FLAGS.img_height, FLAGS.img_width, 'pose',
                               FLAGS.seq_length, FLAGS.batch_size, input_batch)
        saver = tf.train.Saver([var for var in tf.trainable_variables()])
        saver.restore(sess, FLAGS.ckpt_file)

        round_num = len(image_sequence_names) // FLAGS.batch_size
        for i in range(round_num):
            pred = system.inference(sess, mode='pose')
            for j in range(FLAGS.batch_size):
                tgt_idx = tgt_inds[i * FLAGS.batch_size + j]
                pred_poses = pred['pose'][j]
                # Insert the target pose [0, 0, 0, 0, 0, 0] to the middle
                pred_poses = np.insert(pred_poses,
                                       max_src_offset,
                                       np.zeros((1, 6)),
                                       axis=0)
                curr_times = times[tgt_idx - max_src_offset:tgt_idx +
                                   max_src_offset + 1]
                out_file = FLAGS.output_dir + '%.6d.txt' % (tgt_idx -
                                                            max_src_offset)
                dump_pose_seq_TUM(out_file, pred_poses, curr_times)
コード例 #4
0
def main():
    sfm = SfMLearner()  #__init__
    sfm.setup_inference(FLAGS.img_height, FLAGS.img_width, 'pose',
                        FLAGS.seq_length)
    saver = tf.train.Saver([var for var in tf.trainable_variables()])

    if not os.path.isdir(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)
    seq_dir = os.path.join(FLAGS.dataset_dir, 'sequences',
                           '%.2d' % FLAGS.test_seq)
    img_dir = os.path.join(seq_dir, 'image_2')
    N = len(glob(img_dir + '/*.png'))
    test_frames = ['%.2d %.6d' % (FLAGS.test_seq, n) for n in range(N)]
    print('test_frames:', test_frames)
    with open(FLAGS.dataset_dir + 'sequences/%.2d/times.txt' % FLAGS.test_seq,
              'r') as f:
        times = f.readlines()  #list
    times = np.array([float(s[:-1]) for s in times])
    max_src_offset = (FLAGS.seq_length - 1) // 2  #1
    with tf.Session() as sess:
        saver.restore(sess, FLAGS.ckpt_file)
        for tgt_idx in range(N):
            if not is_valid_sample(test_frames, tgt_idx,
                                   FLAGS.seq_length):  #tgt_idx=0跳出当前循环
                continue
            if tgt_idx % 100 == 0:
                print('Progress: %d/%d' % (tgt_idx, N))
            # TODO: currently assuming batch_size = 1
            image_seq = load_image_sequence(FLAGS.dataset_dir, test_frames,
                                            tgt_idx, FLAGS.seq_length,
                                            FLAGS.img_height, FLAGS.img_width)
            # print('image_seq.shape',image_seq.shape)                      #(128, 1248, 3)
            # print('image_seq:',image_seq)
            # 传入data,feed_dict={}
            pred = sfm.inference(image_seq[None, :, :, :], sess,
                                 mode='pose')  #an dictionary
            # print('pred_poses.array:',pred['pose'])
            # print('pred_poses.array:', pred['pose'].shape)                  #shape(1,2,6)
            pred_poses = pred['pose'][0]  # dictionary to ndarray
            # print('pred_poses.shape:',pred_poses.shape)                   #shape(2,6)
            # Insert the target pose [0, 0, 0, 0, 0, 0]
            pred_poses = np.insert(pred_poses,
                                   max_src_offset,
                                   np.zeros((1, 6)),
                                   axis=0)  #FIXME:此处insert zeros,当前帧为基准
            # print('pred_poses',pred_poses)
            # print('pred_poses[0]:',pred_poses[0])
            curr_times = times[tgt_idx - max_src_offset:tgt_idx +
                               max_src_offset + 1]  #每张图片对应三帧的时间戳
            # print(type(curr_times))
            out_file = FLAGS.output_dir + '%.6d.txt' % (tgt_idx -
                                                        max_src_offset)
            dump_pose_seq_TUM(out_file, pred_poses, curr_times)
コード例 #5
0
def main():
    sfm = SfMLearner()
    sfm.setup_inference(cfg.img_height, cfg.img_width, 'pose', cfg.seq_length)
    #saver = tf.train.Saver([var for var in tf.trainable_variables()]) 
    saver = tf.train.Saver() 
    #if not os.path.isdir(cfg.output_dir):
    #    os.makedirs(cfg.output_dir)
    seq_dir = os.path.join(cfg.dataset_dir, 'sequences', '%.2d' % cfg.test_seq)
    img_dir = os.path.join(seq_dir, 'image_2')
    N = len(glob(img_dir + '/*.png'))
    test_frames = ['%.2d %.6d' % (cfg.test_seq, n) for n in range(N)]
    with open(cfg.dataset_dir + 'sequences/%.2d/times.txt' % cfg.test_seq, 'r') as f:
        times = f.readlines()
    times = np.array([float(s[:-1]) for s in times])
    max_src_offset = (cfg.seq_length - 1)//2
    yaw_class_list = []
    tgt_list = []
    with tf.Session() as sess:
        saver.restore(sess, cfg.ckpt_file)
        for tgt_idx in range(N):
            if not is_valid_sample(test_frames, tgt_idx, cfg.seq_length):
                continue
            if tgt_idx % 100 == 0:
                print('Progress: %d/%d' % (tgt_idx, N))
            # TODO: currently assuming batch_size = 1
            image_seq = load_image_sequence(cfg.dataset_dir, 
                                            test_frames, 
                                            tgt_idx, 
                                            cfg.seq_length, 
                                            cfg.img_height, 
                                            cfg.img_width)
            pred = sfm.inference(image_seq[None, :, :, :], sess, mode='pose')
            pred_poses = pred['pose'][0]
            pred_yaw_class = pred['yaw_class'][0]
            # Insert the target pose [0, 0, 0, 0, 0, 0] 
            pred_poses = np.insert(pred_poses, max_src_offset, np.zeros((1,6)), axis=0)
            ##print("pred_poses : " , pred_poses)
            ##print("pred_yaw_class : " , pred_yaw_class)
            yaw_class_list.append(pred_yaw_class)
            tgt_list.append(tgt_idx)
            curr_times = times[tgt_idx - max_src_offset:tgt_idx + max_src_offset + 1]
            #curr_times = times[tgt_idx - max_src_offset:tgt_idx + max_src_offset]
            ##print("times : " , curr_times)
            out_file = cfg.output_dir + '%.6d.txt' % (tgt_idx - max_src_offset)
            dump_pose_seq_TUM(out_file, pred_poses, curr_times)
            #dump_yawclass_pose_seq_TUM(out_file, pred_poses, pred_yaw_class, curr_times)
    result = {'target':tgt_list, 'class':yaw_class_list}
    result = pd.DataFrame(result)
    result = result.set_index('target')
    result.to_csv("result.csv", header=None)
コード例 #6
0
ファイル: test_kitti_pose.py プロジェクト: dikshant2210/m2p3
def main():
    sfm = SfMLearner()
    sfm.setup_inference(FLAGS.img_height, FLAGS.img_width, 'pose',
                        FLAGS.seq_length)
    saver = tf.train.Saver([var for var in tf.trainable_variables()])

    if not os.path.isdir(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)
    seq_dir = os.path.join(FLAGS.dataset_dir, 'sequences',
                           '%.2d' % FLAGS.test_seq)
    img_dir = os.path.join(seq_dir, 'image_2')
    N = len(glob(img_dir + '/*.png'))
    test_frames = ['%.2d %.6d' % (FLAGS.test_seq, n) for n in range(N)]
    with open(FLAGS.dataset_dir + 'sequences/%.2d/times.txt' % FLAGS.test_seq,
              'r') as f:
        times = f.readlines()
    times = np.array([float(s[:-1]) for s in times])
    max_src_offset = (FLAGS.seq_length - 1) // 2
    with tf.Session() as sess:
        saver.restore(sess, FLAGS.ckpt_file)
        for tgt_idx in range(N):
            if not is_valid_sample(test_frames, tgt_idx, FLAGS.seq_length):
                continue
            if tgt_idx % 100 == 0:
                print('Progress: %d/%d' % (tgt_idx, N))
            # TODO: currently assuming batch_size = 1
            image_seq = load_image_sequence(FLAGS.dataset_dir, test_frames,
                                            tgt_idx, FLAGS.seq_length,
                                            FLAGS.img_height, FLAGS.img_width)

            pred = sfm.inference(image_seq[None, :, :, :], sess, mode='pose')
            pred_poses = pred['pose'][0]
            # Insert the target pose [0, 0, 0, 0, 0, 0]
            pred_poses = np.insert(pred_poses,
                                   max_src_offset,
                                   np.zeros((1, 6)),
                                   axis=0)
            curr_times = times[tgt_idx - max_src_offset:tgt_idx +
                               max_src_offset + 1]
            out_file = FLAGS.output_dir + '%.6d.txt' % (tgt_idx -
                                                        max_src_offset)
            print(len(curr_times))
            dump_pose_seq_TUM(out_file, pred_poses, curr_times)
コード例 #7
0
ファイル: test_kitti_pose.py プロジェクト: olibd/undemon
def main():
    demon = UnDEMoN()
    demon.setup_inference(FLAGS.img_height,
                        FLAGS.img_width,
                        'pose',
                        FLAGS.seq_length)
    saver = tf.train.Saver([var for var in tf.trainable_variables()]) 
    if not os.path.isdir(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)
    #seq_dir = os.path.join(FLAGS.dataset_dir, 'sequences', '%.2d' % FLAGS.test_seq)
    seq_name=test_data[FLAGS.test_seq].split(' ')[0]
    seq_dir=FLAGS.dataset_dir+seq_name[:10]+"/"+seq_name+"_sync/"
    img_dir = os.path.join(seq_dir, 'image_02','data')
    N = len(glob(img_dir + '/*.png')[:4540])
    test_frames = ['%.2d %.10d' % (FLAGS.test_seq, n) for n in range(N)]
    with open(seq_dir+'image_02/time.txt', 'r') as f:
        times = f.readlines()
    times = np.array([(s[:-1]) for s in times])
    max_src_offset = (FLAGS.seq_length - 1)//2
    with tf.Session() as sess:
        saver.restore(sess, FLAGS.ckpt_file)
        print "Model Loaded"
        for tgt_idx in range(N):
            if not is_valid_sample(test_frames, tgt_idx, FLAGS.seq_length):
                continue
            if tgt_idx % 100 == 0:
                print('Progress: %d/%d' % (tgt_idx, N))
            # TODO: currently assuming batch_size = 1
            image_seq = load_image_sequence(FLAGS.dataset_dir, 
                                            test_frames, 
                                            tgt_idx,seq_name, 
                                            FLAGS.seq_length, 
                                            FLAGS.img_height, 
                                            FLAGS.img_width)
            pred = demon.inference(image_seq[None, :, :, :], sess, mode='pose')
            pred_poses = pred['pose'][0]
            #print pred_poses
            # Insert the target pose [0, 0, 0, 0, 0, 0] 
            pred_poses = np.insert(pred_poses, max_src_offset, np.zeros((1,6)), axis=0)
            curr_times = times[tgt_idx - max_src_offset:tgt_idx + max_src_offset + 1]
            out_file = FLAGS.output_dir + '%.6d.txt' % (tgt_idx - max_src_offset)
            dump_pose_seq_TUM(out_file, pred_poses, curr_times)
コード例 #8
0
ファイル: geonet_test_pose.py プロジェクト: yang330624/GeoNet
def test_pose(opt):

    if not os.path.isdir(opt.output_dir):
        os.makedirs(opt.output_dir)

    ##### init #####
    input_uint8 = tf.placeholder(tf.uint8, [opt.batch_size, 
        opt.img_height, opt.img_width, opt.seq_length * 3], 
        name='raw_input')
    tgt_image = input_uint8[:,:,:,:3]
    src_image_stack = input_uint8[:,:,:,3:]

    model = GeoNetModel(opt, tgt_image, src_image_stack, None)
    fetches = { "pose": model.pred_poses }

    saver = tf.train.Saver([var for var in tf.model_variables()]) 

    ##### load test frames #####
    seq_dir = os.path.join(opt.dataset_dir, 'sequences', '%.2d' % opt.pose_test_seq)
    img_dir = os.path.join(seq_dir, 'image_2')
    N = len(glob(img_dir + '/*.png'))
    test_frames = ['%.2d %.6d' % (opt.pose_test_seq, n) for n in range(N)]

    ##### load time file #####
    with open(opt.dataset_dir + 'sequences/%.2d/times.txt' % opt.pose_test_seq, 'r') as f:
        times = f.readlines()
    times = np.array([float(s[:-1]) for s in times])

    ##### Go! #####
    max_src_offset = (opt.seq_length - 1) // 2
    with tf.Session() as sess:
        saver.restore(sess, opt.init_ckpt_file)
        for tgt_idx in range(max_src_offset, N-max_src_offset, opt.batch_size):            
            if (tgt_idx-max_src_offset) % 100 == 0:
                print('Progress: %d/%d' % (tgt_idx-max_src_offset, N))

            inputs = np.zeros((opt.batch_size, opt.img_height,
                     opt.img_width, 3*opt.seq_length), dtype=np.uint8)

            for b in range(opt.batch_size):
                idx = tgt_idx + b
                if idx >= N-max_src_offset:
                    break
                image_seq = load_image_sequence(opt.dataset_dir,
                                                test_frames,
                                                idx,
                                                opt.seq_length,
                                                opt.img_height,
                                                opt.img_width)
                inputs[b] = image_seq

            pred = sess.run(fetches, feed_dict={input_uint8: inputs})
            pred_poses = pred['pose']
            # Insert the target pose [0, 0, 0, 0, 0, 0] 
            pred_poses = np.insert(pred_poses, max_src_offset, np.zeros((1,6)), axis=1)

            for b in range(opt.batch_size):
                idx = tgt_idx + b
                if idx >=N-max_src_offset:
                    break
                pred_pose = pred_poses[b]                
                curr_times = times[idx - max_src_offset:idx + max_src_offset + 1]
                out_file = opt.output_dir + '%.6d.txt' % (idx - max_src_offset)
                dump_pose_seq_TUM(out_file, pred_pose, curr_times)
コード例 #9
0
ファイル: geonet_test_pose.py プロジェクト: dingmyu/GeoNet
def test_pose(opt):

    if not os.path.isdir(opt.output_dir):
        os.makedirs(opt.output_dir)

    ##### init #####
    input_uint8 = tf.placeholder(tf.uint8, [opt.batch_size, 
        opt.img_height, opt.img_width, opt.seq_length * 4], 
        name='raw_input')
    tgt_image = input_uint8[:,:,:,:4]
    src_image_stack = input_uint8[:,:,:,4:]

    model = GeoNetModel(opt, tgt_image, src_image_stack, None)
    fetches = { "pose": model.pred_poses }

    saver = tf.train.Saver([var for var in tf.model_variables()]) 

    ##### load test frames #####
    seq_dir = os.path.join(opt.dataset_dir, 'sequences', '%.2d' % opt.pose_test_seq)
    img_dir = os.path.join(seq_dir, 'image_2')
    N = len(glob(img_dir + '/*.png'))
    test_frames = ['%.2d %.6d' % (opt.pose_test_seq, n) for n in range(N)]

    ##### load time file #####
    with open(opt.dataset_dir + 'sequences/%.2d/times.txt' % opt.pose_test_seq, 'r') as f:
        times = f.readlines()
    times = np.array([float(s[:-1]) for s in times])

    ##### Go! #####
    max_src_offset = (opt.seq_length - 1) // 2
    with tf.Session() as sess:
        saver.restore(sess, opt.init_ckpt_file)
        for tgt_idx in range(max_src_offset, N-max_src_offset, opt.batch_size):            
            if (tgt_idx-max_src_offset) % 100 == 0:
                print('Progress: %d/%d' % (tgt_idx-max_src_offset, N))

            inputs = np.zeros((opt.batch_size, opt.img_height,
                     opt.img_width, 4*opt.seq_length), dtype=np.uint8)

            for b in range(opt.batch_size):
                idx = tgt_idx + b
                if idx >= N-max_src_offset:
                    break
                image_seq = load_image_sequence(opt.dataset_dir,
                                                test_frames,
                                                idx,
                                                opt.seq_length,
                                                opt.img_height,
                                                opt.img_width)
                inputs[b] = image_seq

            pred = sess.run(fetches, feed_dict={input_uint8: inputs})
            pred_poses = pred['pose']
            # Insert the target pose [0, 0, 0, 0, 0, 0] 
            pred_poses = np.insert(pred_poses, max_src_offset, np.zeros((1,6)), axis=1)

            for b in range(opt.batch_size):
                idx = tgt_idx + b
                if idx >=N-max_src_offset:
                    break
                pred_pose = pred_poses[b]                
                curr_times = times[idx - max_src_offset:idx + max_src_offset + 1]
                out_file = opt.output_dir + '%.6d.txt' % (idx - max_src_offset)
                dump_pose_seq_TUM(out_file, pred_pose, curr_times)
コード例 #10
0
def test_pose(FLAGS):

    if not os.path.isdir(FLAGS['output_dir']):
        os.makedirs(FLAGS['output_dir'])

    geonet = GeoNet(FLAGS)

    ##### load test frames #####
    seq_dir = os.path.join(FLAGS['dataset_dir'], 'sequences',
                           '%.2d' % FLAGS['pose_test_seq'])
    img_dir = os.path.join(seq_dir, 'image_2')
    N = len(glob(img_dir + '/*.png'))
    test_frames = ['%.2d %.6d' % (FLAGS['pose_test_seq'], n) for n in range(N)]

    ##### load time file #####
    with open(
            FLAGS['dataset_dir'] +
            '/sequences/%.2d/times.txt' % FLAGS['pose_test_seq'], 'r') as f:
        times = f.readlines()
    times = np.array([float(s[:-1]) for s in times])

    ##### Go! #####
    max_src_offset = (FLAGS['seq_length'] - 1) // 2
    checkpoint_path = os.path.join(FLAGS['init_ckpt_file'])
    geonet.load_weights(checkpoint_path)

    for tgt_idx in range(max_src_offset, N - max_src_offset,
                         FLAGS['batch_size']):
        if (tgt_idx - max_src_offset) % 100 == 0:
            print('Progress: %d/%d' % (tgt_idx - max_src_offset, N))

        inputs = np.zeros((FLAGS['batch_size'], FLAGS['img_height'],
                           FLAGS['img_width'], 3 * FLAGS['seq_length']),
                          dtype=np.float32)

        for b in range(FLAGS['batch_size']):
            idx = tgt_idx + b
            if idx >= N - max_src_offset:
                break
            image_seq = load_image_sequence(FLAGS['dataset_dir'], test_frames,
                                            idx, FLAGS['seq_length'],
                                            FLAGS['img_height'],
                                            FLAGS['img_width'])
            inputs[b] = image_seq

        pred_poses = geonet.pose_net(inputs, training=False)
        pred_poses = pred_poses.numpy()
        # Insert the target pose [0, 0, 0, 0, 0, 0]
        pred_poses = np.insert(pred_poses,
                               max_src_offset,
                               np.zeros((1, 6)),
                               axis=1)

        for b in range(FLAGS['batch_size']):
            idx = tgt_idx + b
            if idx >= N - max_src_offset:
                break
            pred_pose = pred_poses[b]
            curr_times = times[idx - max_src_offset:idx + max_src_offset + 1]
            out_file = FLAGS['output_dir'] + '%.6d.txt' % (idx -
                                                           max_src_offset)
            dump_pose_seq_TUM(out_file, pred_pose, curr_times)