Ejemplo n.º 1
0
def main():
    if not os.path.exists(args.dump_root):
        os.makedirs(args.dump_root)

    global data_loader
    if args.dataset_name == 'kitti_odom':
        from kitti.kitti_odom_loader import kitti_odom_loader
        data_loader = kitti_odom_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)

    if args.dataset_name == 'kitti_raw_eigen':
        from kitti.kitti_raw_loader import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='eigen',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length)

    if args.dataset_name == 'kitti_raw_stereo':
        from kitti.kitti_raw_loader import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='stereo',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length)        

    if args.dataset_name == 'cityscapes':
        from cityscapes.cityscapes_loader import cityscapes_loader
        data_loader = cityscapes_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)

    if args.dataset_name == 'humanbody':
        from humanbodys.humanbody_loader import humanbody_loader
        data_loader = humanbody_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)


    Parallel(n_jobs=args.num_threads)(delayed(dump_example)(n) for n in range(data_loader.num_train))

    # Split into train/val
    np.random.seed(8964)
    subfolders = os.listdir(args.dump_root)
    with open(args.dump_root + 'train.txt', 'w') as tf:
        with open(args.dump_root + 'val.txt', 'w') as vf:
            for s in subfolders:
                if not os.path.isdir(args.dump_root + '/%s' % s):
                    continue
                imfiles = glob(os.path.join(args.dump_root, s, '*.jpg'))
                frame_ids = [os.path.basename(fi).split('.')[0] for fi in imfiles]
                for frame in frame_ids:
                    if np.random.random() < 0.1:
                        vf.write('%s %s\n' % (s, frame))
                    else:
                        tf.write('%s %s\n' % (s, frame))
Ejemplo n.º 2
0
def main():
    if not os.path.exists(args.dump_root):
        os.makedirs(args.dump_root)

    global data_loader
    if args.dataset_name == 'kitti_odom':
        from kitti.kitti_odom_loader import kitti_odom_loader
        data_loader = kitti_odom_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)

    if args.dataset_name == 'kitti_raw_eigen':
        from kitti.kitti_raw_loader import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='eigen',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length,
                                       remove_static=args.remove_static)

    if args.dataset_name == 'kitti_raw_stereo':
        from kitti.kitti_raw_loader import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='stereo',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length,
                                       remove_static=args.remove_static)

    if args.dataset_name == 'cityscapes':
        from cityscapes.cityscapes_loader import cityscapes_loader
        data_loader = cityscapes_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)

    Parallel(n_jobs=args.num_threads)(delayed(dump_example)(n) for n in range(data_loader.num_train))

    # Split into train/val
    np.random.seed(8964)
    subfolders = os.listdir(args.dump_root)
    with open(os.path.join(args.dump_root, 'train.txt'), 'w') as tf:
        with open(os.path.join(args.dump_root, 'val.txt'), 'w') as vf:
            for s in subfolders:
                if not os.path.isdir(args.dump_root + '/%s' % s):
                    continue
                imfiles = glob(os.path.join(args.dump_root, s, '*.jpg'))
                frame_ids = [os.path.basename(fi).split('.')[0] for fi in imfiles]
                for frame in frame_ids:
                    if np.random.random() < 0.1:
                        vf.write('%s %s\n' % (s, frame))
                    else:
                        tf.write('%s %s\n' % (s, frame))
Ejemplo n.º 3
0
def main():
    if not os.path.exists(args.dump_root):
        os.makedirs(args.dump_root)

    global data_loader

    if args.dataset_name == 'kitti_odom':
        from kitti.kitti_odom_loader import kitti_odom_loader
        data_loader = kitti_odom_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)

    if args.dataset_name == 'kitti_raw_eigen':
        from kitti.kitti_raw_loader_stereo import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='eigen',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length)

    if args.dataset_name == 'kitti_raw_stereo':
        from kitti.kitti_raw_loader_stereo import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='stereo',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length)        

    print(data_loader.num_train)

    Parallel(n_jobs=args.num_threads)(delayed(dump_example)(n) for n in range(data_loader.num_train-1))

    # Split into train/val
    np.random.seed(8964)
    subfolders = os.listdir(args.dump_root)
    with open(args.dump_root + 'train.txt', 'w') as tf:
        with open(args.dump_root + 'val.txt', 'w') as vf:
            for s in subfolders:
                if not os.path.isdir(args.dump_root + '/%s' % s):
                    continue
                os.chdir(args.dump_root)
                left_imfiles = glob(s+'/image_02/*.jpg')
                right_imfiles = glob(s+'/image_03/*.jpg')
                for left,right in zip(left_imfiles,right_imfiles):
                    #print left
                    if np.random.random()<0.1:
                        vf.write(left+' '+right+'\n')
                    else:
                        tf.write(left+' '+right+'\n')
Ejemplo n.º 4
0
def main():
    if not os.path.exists(args.dump_root):
        os.makedirs(args.dump_root)

    global data_loader
    if args.dataset_name == 'kitti_odom':
        from kitti.kitti_odom_loader import kitti_odom_loader
        data_loader = kitti_odom_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)

    if args.dataset_name == 'kitti_raw_eigen':
        from kitti.kitti_raw_loader import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='eigen',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length)

    if args.dataset_name == 'kitti_raw_stereo':
        from kitti.kitti_raw_loader import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='stereo',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length)

    if args.dataset_name == 'cityscapes':
        from cityscapes.cityscapes_loader import cityscapes_loader
        data_loader = cityscapes_loader(args.dataset_dir,
                                        split='train',
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)

    is_training = not (args.generate_test)
    print("is traing: ", is_training)
    all_frames = range(data_loader.num_train) if is_training else range(
        data_loader.num_test)
    frame_chunks = np.array_split(all_frames,
                                  NUM_CHUNKS)  # len(frame_chunks)=NUM_CHUNKS

    num_cores = multiprocessing.cpu_count()
    num_threads = num_cores if args.num_threads is None else args.num_threads
    pool = multiprocessing.Pool(num_threads)

    if args.generate_test:
        print("Generate Test")
        logging.info('Generating test data...')
        for index, frame_chunk in enumerate(frame_chunks):
            pool.map(_dump_example_star,
                     zip(frame_chunk, itertools.repeat(is_training)))
            logging.info('Chunk %d/%d: saving entries...', index + 1,
                         NUM_CHUNKS)
    else:
        print("Generate Train/Val")
        # Split into train/val
        np.random.seed(8964)
        train_file_txt = os.path.join(args.dump_root, 'train.txt')
        val_file_txt = os.path.join(args.dump_root, 'val.txt')
        for _file in [train_file_txt, val_file_txt]:
            if os.path.exists(_file):
                file_t = os.path.getmtime(_file)
                file_suffix = datetime.datetime.fromtimestamp(file_t).strftime(
                    "%Y%m%d")
                os.rename(_file, _file + ".bak-" + file_suffix)
        subfolders = ['%.2d' % s for s in data_loader.train_seqs]
        with open(train_file_txt, 'w') as tf:
            with open(val_file_txt, 'w') as vf:
                logging.info('Generating train/val data...')
                for index, frame_chunk in enumerate(frame_chunks):
                    pool.map(_dump_example_star,
                             zip(frame_chunk, itertools.repeat(is_training)))
                    logging.info('Chunk %d/%d: saving entries...', index + 1,
                                 NUM_CHUNKS)
                    for s in subfolders:
                        if not os.path.isdir(args.dump_root + '/%s' % s):
                            continue
                        imfiles = glob(
                            os.path.join(args.dump_root, '%s' % s, '*.jpg'))
                        frame_ids = [
                            os.path.basename(fi).split('.')[0]
                            for fi in imfiles
                        ]
                        for frame in frame_ids:
                            if np.random.random() < 0.1:
                                vf.write('%s %s\n' % (s, frame))
                            else:
                                tf.write('%s %s\n' % (s, frame))

        logging.info('Generating train/val data...')
        for index, frame_chunk in enumerate(frame_chunks):
            pool.map(_dump_example_star,
                     zip(frame_chunk, itertools.repeat(is_training)))
            logging.info('Chunk %d/%d: saving entries...', index + 1,
                         NUM_CHUNKS)

        pool.close()
        pool.join()
Ejemplo n.º 5
0
def main():
    if not os.path.exists(args.dump_root):
        os.makedirs(args.dump_root)

    # initialize the data_loader for each task
    global data_loader
    # pose
    if args.dataset_name == 'kitti_odom':
        from kitti.kitti_odom_loader import kitti_odom_loader
        data_loader = kitti_odom_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)
        print("hello")
        print(data_loader.num_train)
        print("hello babe")

    # depth
    if args.dataset_name == 'kitti_raw_eigen':
        from kitti.kitti_raw_loader import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='eigen',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length,
                                       remove_static=args.remove_static)

    # flow
    if args.dataset_name == 'kitti_raw_stereo':
        from kitti.kitti_raw_loader import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='stereo',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length,
                                       remove_static=args.remove_static)

    if args.dataset_name == 'cityscapes':
        from cityscapes.cityscapes_loader import cityscapes_loader
        data_loader = cityscapes_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)

    # multi-threads for data preprocessing
    # 45016 for depth, raw_data, kitti_raw_eigen
    # 59552 for flow, raw_data, kitti_raw_stereo
    # 20409 for pose, odometry, kitti_odom
    Parallel(n_jobs=args.num_threads)(delayed(dump_example)(n, args)
                                      for n in range(data_loader.num_train))

    # Split into train/val, random seed to make it consistent
    np.random.seed(8964)
    subfolders = os.listdir(args.dump_root)
    with open(os.path.join(args.dump_root, 'train.txt'), 'w') as tf:
        with open(os.path.join(args.dump_root, 'val.txt'), 'w') as vf:
            for s in subfolders:
                if not os.path.isdir(args.dump_root + '/%s' % s):
                    continue

                imfiles = glob(os.path.join(args.dump_root, s, '*.jpg'))
                frame_ids = [
                    os.path.basename(fi).split('.')[0] for fi in imfiles
                ]

                for frame in frame_ids:
                    # 10% for validation, 90% for training
                    if np.random.random() < 0.1:
                        vf.write('%s %s\n' % (s, frame))
                    else:
                        tf.write('%s %s\n' % (s, frame))
Ejemplo n.º 6
0
def main():
    if not os.path.exists(args.dump_root):
        os.makedirs(args.dump_root)

    global data_loader
    if args.dataset_name == 'kitti_odom':
        from kitti.kitti_odom_loader import kitti_odom_loader
        data_loader = kitti_odom_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)

    if args.dataset_name == 'kitti_raw_eigen':
        from kitti.kitti_raw_loader import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='eigen',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length,
                                       remove_static=args.remove_static)

    if args.dataset_name == 'kitti_raw_stereo':
        from kitti.kitti_raw_loader import kitti_raw_loader
        data_loader = kitti_raw_loader(args.dataset_dir,
                                       split='stereo',
                                       img_height=args.img_height,
                                       img_width=args.img_width,
                                       seq_length=args.seq_length,
                                       remove_static=args.remove_static)

    if args.dataset_name == 'cityscapes':
        from cityscapes.cityscapes_loader import cityscapes_loader
        data_loader = cityscapes_loader(args.dataset_dir,
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length)

    if args.dataset_name == 'hallway':
        from hallway.hallway_loader import hallway_loader
        data_loader = hallway_loader(args.dataset_dir,
                                     split='mine',
                                     img_height=args.img_height,
                                     img_width=args.img_width,
                                     seq_length=args.seq_length)

    if args.dataset_name == 'rms_hallway':
        from rms_hallway.rms_hallway_loader import rms_hallway_loader
        if args.video_to_image == "True":
            from rms_hallway.rms_hallway_video2image import rms_hallway_video2image
            video_loader = rms_hallway_video2image(args.dataset_dir,
                                                   args.frame_height,
                                                   args.frame_width,
                                                   down_sample=5)
            video_loader.collect_video_frames()

        data_loader = rms_hallway_loader(args.dataset_dir,
                                         split='mine',
                                         img_height=args.img_height,
                                         img_width=args.img_width,
                                         seq_length=args.seq_length,
                                         remove_static=args.remove_static)

    if args.dataset_name == 'MobileRGBD':
        from MobileRGBD.MobileRGBD_loader import MobileRGBD_loader
        data_loader = MobileRGBD_loader(args.dataset_dir,
                                        split='mine',
                                        img_height=args.img_height,
                                        img_width=args.img_width,
                                        seq_length=args.seq_length,
                                        remove_static=args.remove_static)

    Parallel(n_jobs=args.num_threads)(delayed(dump_example)(n)
                                      for n in range(data_loader.num_train))

    # Split into train/val
    np.random.seed(8964)
    subfolders = os.listdir(args.dump_root)
    with open(os.path.join(args.dump_root, 'train.txt'), 'w') as tf:
        with open(os.path.join(args.dump_root, 'val.txt'), 'w') as vf:
            for s in subfolders:
                if not os.path.isdir(args.dump_root + '/%s' % s):
                    continue
                imfiles = glob(os.path.join(args.dump_root, s, '*.jpg'))
                frame_ids = [
                    os.path.basename(fi).split('.')[0] for fi in imfiles
                ]
                for frame in frame_ids:
                    if np.random.random() < 0:
                        #vf.write('%s %s\n' % (s, frame))
                        vf.write('%s %s\n' % (s, frame))
                    else:
                        tf.write('%s %s\n' % (s, frame))