Example #1
0
 def getSparseFeatures(heatmap, desc, verbose=False):
     from models.model_wrap import SuperPointFrontend_torch
     fe = SuperPointFrontend_torch(weights_path='', nms_dist=4, conf_thresh=0.01, nn_thresh=0.7, load=False)
     points = fe.getPtsFromHeatmap(heatmap)
     print("pts: ", points.shape)
     def getSparseDesc(desc, pts):
         return desc[pts[1,:].astype(int), pts[0, :].astype(int)]
     desc = getSparseDesc(desc, points)
     return points, desc
Example #2
0
    def prapare_SP(self):
        logging.info('Preparing SP inference.')
        with open(DEEPSFM_PATH + '/configs/superpoint_coco_train.yaml', 'r') as f:
            self.config_SP = yaml.load(f, Loader=yaml.FullLoader)
            nms_dist = self.config_SP['model']['nms']
            conf_thresh = self.config_SP['model']['detection_threshold']
            # nn_thresh = config_SP['model']['nn_thresh']
            nn_thresh = 1.0
            path = DEEPSFM_PATH + '/' + self.config_SP['pretrained']
            device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

            self.fe = SuperPointFrontend_torch(weights_path=path,
                                        nms_dist=nms_dist,
                                        conf_thresh=conf_thresh,
                                        nn_thresh=nn_thresh,
                                        cuda=False,
                                        device=device)
Example #3
0
def export_detector_homoAdapt_gpu(config, output_dir, args):
    """
    input 1 images, output pseudo ground truth by homography adaptation.
    Save labels:
        pred:
            'prob' (keypoints): np (N1, 3)
    """
    from utils.utils import pltImshow
    from utils.utils import saveImg
    from utils.draw import draw_keypoints

    # basic setting
    task = config["data"]["dataset"]
    export_task = config["data"]["export_folder"]
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    logging.info("train on device: %s", device)
    with open(os.path.join(output_dir, "config.yml"), "w") as f:
        yaml.dump(config, f, default_flow_style=False)
    writer = SummaryWriter(
        getWriterPath(task=args.command, exper_name=args.exper_name, date=True)
    )

    ## parameters
    nms_dist = config["model"]["nms"]  # 4
    top_k = config["model"]["top_k"]
    homoAdapt_iter = config["data"]["homography_adaptation"]["num"]
    conf_thresh = config["model"]["detection_threshold"]
    nn_thresh = 0.7
    outputMatches = True
    count = 0
    max_length = 5
    output_images = args.outputImg
    check_exist = True

    ## save data
    save_path = Path(output_dir)
    save_output = save_path
    save_output = save_output / "predictions" / export_task
    save_path = save_path / "checkpoints"
    logging.info("=> will save everything to {}".format(save_path))
    os.makedirs(save_path, exist_ok=True)
    os.makedirs(save_output, exist_ok=True)

    # data loading
    from utils.loader import dataLoader_test as dataLoader

    data = dataLoader(config, dataset=task, export_task=export_task)
    print("Data is: ",data)
    test_set, test_loader = data["test_set"], data["test_loader"]
    print("Size test: ",len(test_set))
    print("Size loader: ",len(test_loader))
    # model loading
    ## load pretrained
    try:
        path = config["pretrained"]
        print("==> Loading pre-trained network.")
        print("path: ", path)
        # This class runs the SuperPoint network and processes its outputs.

        fe = SuperPointFrontend_torch(
            config=config,
            weights_path=path,
            nms_dist=nms_dist,
            conf_thresh=conf_thresh,
            nn_thresh=nn_thresh,
            cuda=False,
            device=device,
        )
        print("==> Successfully loaded pre-trained network.")

        fe.net_parallel()
        print(path)
        # save to files
        save_file = save_output / "export.txt"
        with open(save_file, "a") as myfile:
            myfile.write("load model: " + path + "\n")
    except Exception:
        print(f"load model: {path} failed! ")
        raise

    def load_as_float(path):
        return imread(path).astype(np.float32) / 255
    print("Tracker")
    tracker = PointTracker(max_length, nn_thresh=fe.nn_thresh)
    with open(save_file, "a") as myfile:
        myfile.write("homography adaptation: " + str(homoAdapt_iter) + "\n")
    print("Load save file")
    '''
    print(len(test_loader))
    for i,sample in enumerate(test_loader):
        print("Hello world")
        print("Img: ",sample["image"].size())
        print("Name: ",test_set[i]["name"])
        print("valid mask: ",test_set[i]["valid_mask"].size())
        print("valid img_2D: ",test_set[i]["image_2D"].size())
        print("valid mask: ",test_set[i]["valid_mask"].size())
        print("homograpgy: ",test_set[i]["homographies"].size())
        print("inverse: ",test_set[i]["inv_homographies"].size())
        print("scene name: ",test_set[i]["scene_name"])
        print()
    '''
    ## loop through all images
    for i, sample in tqdm(enumerate(test_loader)):
        img, mask_2D = sample["image"], sample["valid_mask"]
        img = img.transpose(0, 1)
        img_2D = sample["image_2D"].numpy().squeeze()
        mask_2D = mask_2D.transpose(0, 1)

        inv_homographies, homographies = (
            sample["homographies"],
            sample["inv_homographies"],
        )
        img, mask_2D, homographies, inv_homographies = (
            img.to(device),
            mask_2D.to(device),
            homographies.to(device),
            inv_homographies.to(device),
        )
        # sample = test_set[i]
        name = sample["name"][0]
        logging.info(f"name: {name}")
        if check_exist:
            p = Path(save_output, "{}.npz".format(name))
            if p.exists():
                logging.info("file %s exists. skip the sample.", name)
                continue
        print("Pass img to network")
        # pass through network
        heatmap = fe.run(img, onlyHeatmap=True, train=False)
        outputs = combine_heatmap(heatmap, inv_homographies, mask_2D, device=device)
        pts = fe.getPtsFromHeatmap(outputs.detach().cpu().squeeze())  # (x,y, prob)

        # subpixel prediction
        if config["model"]["subpixel"]["enable"]:
            fe.heatmap = outputs  # tensor [batch, 1, H, W]
            print("outputs: ", outputs.shape)
            print("pts: ", pts.shape)
            pts = fe.soft_argmax_points([pts])
            pts = pts[0]

        ## top K points
        pts = pts.transpose()
        print("total points: ", pts.shape)
        print("pts: ", pts[:5])
        if top_k:
            if pts.shape[0] > top_k:
                pts = pts[:top_k, :]
                print("topK filter: ", pts.shape)

        ## save keypoints
        pred = {}
        pred.update({"pts": pts})

        ## - make directories
        filename = str(name)
        if task == "Kitti" or "Kitti_inh":
            scene_name = sample["scene_name"][0]
            os.makedirs(Path(save_output, scene_name), exist_ok=True)

        path = Path(save_output, "{}.npz".format(filename))
        np.savez_compressed(path, **pred)

        ## output images for visualization labels
        if output_images:
            img_pts = draw_keypoints(img_2D * 255, pts.transpose())
            f = save_output / (str(count) + ".png")
            if task == "Coco" or "Kitti":
                f = save_output / (name + ".png")
            saveImg(img_pts, str(f))
        count += 1

    print("output pseudo ground truth: ", count)
    save_file = save_output / "export.txt"
    with open(save_file, "a") as myfile:
        myfile.write("Homography adaptation: " + str(homoAdapt_iter) + "\n")
        myfile.write("output pairs: " + str(count) + "\n")
    pass
Example #4
0
def export_detector_phototourism_gpu(config, output_dir, args):
    """
    input 1 images, output pseudo ground truth by homography adaptation.
    Save labels:
        pred:
            'prob' (keypoints): np (N1, 3)
    """
    from utils.utils import pltImshow
    from utils.utils import saveImg
    from utils.draw import draw_keypoints

    proj_path = "/data/projects/pytorch-superpoint"
    splits = ["train", "val"]

    # basic setting
    task = config["data"]["dataset"]
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    logging.info("train on device: %s", device)
    with open(osp.join(proj_path, output_dir, "config.yml"), "w") as f:
        yaml.dump(config, f, default_flow_style=False)

    ## parameters
    nms_dist = config["model"]["nms"]  # 4
    top_k = config["model"]["top_k"]
    homoAdapt_iter = config["data"]["homography_adaptation"]["num"]
    conf_thresh = config["model"]["detection_threshold"]
    nn_thresh = 0.7
    count = 0
    output_images = args.outputImg
    check_exist = True

    ## save data
    '''
    save_path = Path(output_dir)
    save_output = save_path
    save_output = save_output / "predictions" / export_task
    os.makedirs(save_output, exist_ok=True)
    '''
    def _create_loader(dataset, n_workers=8):
        return torch.utils.data.DataLoader(
            dataset,
            shuffle=False,
            pin_memory=True,
            num_workers=n_workers,
        )

    data_loaders = {}
    # create the dataset and dataloader classes
    for split in splits:
        dataset = Phototourism(split=split, **config["data"])
        data_loaders[split] = _create_loader(dataset)

    # model loading
    ## load pretrained
    try:
        path = config["pretrained"]
        print("==> Loading pre-trained network.")
        print("path: ", path)
        # This class runs the SuperPoint network and processes its outputs.

        fe = SuperPointFrontend_torch(
            config=config,
            weights_path=path,
            nms_dist=nms_dist,
            conf_thresh=conf_thresh,
            nn_thresh=nn_thresh,
            cuda=False,
            device=device,
        )
        print("==> Successfully loaded pre-trained network.")

        fe.net_parallel()
        print(path)
        # save to files
        '''
        save_file = save_output / "export.txt"
        with open(save_file, "a") as myfile:
            myfile.write("load model: " + path + "\n")
        '''
    except Exception:
        print(f"load model: {path} failed! ")
        raise

    ## loop through all images
    for split in splits:
        save_path = osp.join(proj_path, output_dir, "predictions", split)
        det_path = osp.join(save_path, "detection")
        if not osp.isdir(det_path):
            os.makedirs(det_path)

        if output_images:
            quality_res_path = osp.join(save_path, "quality_res")
            if not osp.isdir(quality_res_path):
                os.makedirs(quality_res_path)

        print(len(data_loaders[split]))
        for i, sample in tqdm(enumerate(data_loaders[split])):
            img, mask_2D = sample["image"], sample["valid_mask"]
            img = img.transpose(0, 1)
            img_2D = sample["image_2D"].numpy().squeeze()
            mask_2D = mask_2D.transpose(0, 1)

            inv_homographies, homographies = (
                sample["homographies"],
                sample["inv_homographies"],
            )
            img, mask_2D, homographies, inv_homographies = (
                img.to(device),
                mask_2D.to(device),
                homographies.to(device),
                inv_homographies.to(device),
            )
            # sample = test_set[i]
            name = sample["name"][0]
            fname_out = osp.join(det_path,
                                 "{}.npz".format(str(name).replace('/', '_')))
            if osp.isfile(fname_out):
                continue

            # pass through network
            heatmap = fe.run(img, onlyHeatmap=True, train=False)
            outputs = combine_heatmap(heatmap,
                                      inv_homographies,
                                      mask_2D,
                                      device=device)
            pts = fe.getPtsFromHeatmap(
                outputs.detach().cpu().squeeze())  # (x,y, prob)

            # subpixel prediction
            if config["model"]["subpixel"]["enable"]:
                fe.heatmap = outputs  # tensor [batch, 1, H, W]
                pts = fe.soft_argmax_points([pts])
                pts = pts[0]

            ## top K points
            pts = pts.transpose()
            if top_k:
                if pts.shape[0] > top_k:
                    pts = pts[:top_k, :]
                    print("topK filter: ", pts.shape)

            ## save keypoints
            pred = {}
            pred.update({"pts": pts})

            ## - make directories
            np.savez_compressed(fname_out, **pred)

            ## output images for visualization labels
            if output_images:
                img_pts = draw_keypoints(img_2D * 255, pts.transpose())
                fname_out_det = osp.join(quality_res_path,
                                         str(name).replace('/', '_') + ".png")
                saveImg(img_pts, fname_out_det)
            count += 1
            print(
                str(i + 1) + " out of " + str(len(data_loaders[split])) +
                " done.")

        print("output pseudo ground truth, ", split.capitalize(), ": ", count)

    print("Done")
Example #5
0
class KittiOdoLoader(object):
    def __init__(self,
                 dataset_dir,
                 img_height=375,
                 img_width=1242,
                 cam_ids=['02'],
                 get_X=False,
                 get_pose=False,
                 get_sift=False,
                 get_SP=False,
                 sift_num=2000,
                 if_BF_matcher=False,
                 save_npy=True):
                 # depth_size_ratio=1):
        dir_path = Path(__file__).realpath().dirname()

        self.dataset_dir = Path(dataset_dir)
        self.img_height = img_height
        self.img_width = img_width
        self.cam_ids = cam_ids
        # assert self.cam_ids == ['02'], 'Support left camera only!'
        self.cid_to_num = {'00': 0, '01': 1, '02': 2, '03': 3}
        self.train_seqs = [0, 1, 2, 3, 4, 5, 6, 7, 8]
        self.test_seqs = [9, 10]
        # self.train_seqs = [4]
        # self.test_seqs = []
        # self.train_seqs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
        # self.test_seqs = []
        self.map_to_raw = {'00': '2011_10_03_drive_0027', '01': '2011_10_03_drive_0042', '02': '2011_10_03_drive_0034', '03': '2011_09_26_drive_0067', \
            '04': '2011_09_30_drive_0016', '05': '2011_09_30_drive_0018', '06': '2011_09_30_drive_0020', '07': '2011_09_30_drive_0027', \
            '08': '2011_09_30_drive_0028', '09': '2011_09_30_drive_0033', '10': '2011_09_30_drive_0034'}

        self.get_X = get_X
        self.get_pose = get_pose
        self.get_sift = get_sift
        self.get_SP = get_SP
        self.save_npy = save_npy
        if self.save_npy:
            logging.info('+++ Dumping as npy')
        else:
            logging.info('+++ Dumping as h5')
        if self.get_sift:
            self.sift_num = sift_num
            self.if_BF_matcher = if_BF_matcher
            self.sift = cv2.xfeatures2d.SIFT_create(nfeatures=self.sift_num, contrastThreshold=1e-5)
            # self.bf = cv2.BFMatcher(normType=cv2.NORM_L2)
            # FLANN_INDEX_KDTREE = 0
            # index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
            # search_params = dict(checks = 50)
            # self.flann = cv2.FlannBasedMatcher(index_params, search_params)
            # self.sift_matcher = self.bf if BF_matcher else self.flann

        self.scenes = {'train': [], 'test': []}
        if self.get_SP:
            self.prapare_SP()
        self.collect_train_folders()
        self.collect_test_folders()

    def prapare_SP(self):
        logging.info('Preparing SP inference.')
        with open(DEEPSFM_PATH + '/configs/superpoint_coco_train.yaml', 'r') as f:
            self.config_SP = yaml.load(f, Loader=yaml.FullLoader)
            nms_dist = self.config_SP['model']['nms']
            conf_thresh = self.config_SP['model']['detection_threshold']
            # nn_thresh = config_SP['model']['nn_thresh']
            nn_thresh = 1.0
            path = DEEPSFM_PATH + '/' + self.config_SP['pretrained']
            device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

            self.fe = SuperPointFrontend_torch(weights_path=path,
                                        nms_dist=nms_dist,
                                        conf_thresh=conf_thresh,
                                        nn_thresh=nn_thresh,
                                        cuda=False,
                                        device=device)


    def collect_train_folders(self):
        for seq in self.train_seqs:
            seq_dir = os.path.join(self.dataset_dir, 'sequences', '%.2d' % seq)
            self.scenes['train'].append(seq_dir)

    def collect_test_folders(self):
        for seq in self.test_seqs:
            seq_dir = os.path.join(self.dataset_dir, 'sequences', '%.2d' % seq)
            self.scenes['test'].append(seq_dir)

    def collect_scene_from_drive(self, drive_path):
        train_scenes = []
        logging.info('Gathering info for %s...'%drive_path)
        for c in self.cam_ids:
            scene_data = {'cid': c, 'cid_num': self.cid_to_num[c], 'dir': Path(drive_path), 'rel_path': Path(drive_path).name + '_' + c}
            img_dir = os.path.join(drive_path, 'image_%d'%scene_data['cid_num'])
            scene_data['img_files'] = sorted(glob(img_dir + '/*.png'))
            scene_data['N_frames'] = len(scene_data['img_files'])
            assert scene_data['N_frames'] != 0, 'No file found for %s!'%drive_path
            scene_data['frame_ids'] = ['{:06d}'.format(i) for i in range(scene_data['N_frames'])]

            img_shape = None
            zoom_xy = None
            show_zoom_info = True
            for idx in tqdm(range(scene_data['N_frames'])):
                img, zoom_xy, _ = self.load_image(scene_data, idx, show_zoom_info)
                show_zoom_info = False
                if img is None and idx==0:
                    logging.warning('0 images in %s. Skipped.'%drive_path)
                    return []
                else:
                    if img_shape is not None:
                        assert img_shape == img.shape, 'Inconsistent image shape in seq %s!'%drive_path
                    else:
                        img_shape = img.shape
            # print(img_shape)
            scene_data['calibs'] = {'im_shape': [img_shape[0], img_shape[1]], 'zoom_xy': zoom_xy, 'rescale': True if zoom_xy != (1., 1.) else False}
            # Get geo params from the RAW dataset calibs
            P_rect_ori_dict = self.get_P_rect(scene_data, scene_data['calibs'])
            intrinsics = P_rect_ori_dict[c][:,:3]
            calibs_rects = self.get_rect_cams(intrinsics, P_rect_ori_dict['02'])

            drive_in_raw = self.map_to_raw[drive_path[-2:]]
            date = drive_in_raw[:10]
            seq = drive_in_raw[-4:]
            calib_path_in_raw = Path(self.dataset_dir)/'raw'/date
            imu2velo_dict = read_calib_file(calib_path_in_raw/'calib_imu_to_velo.txt')
            velo2cam_dict = read_calib_file(calib_path_in_raw/'calib_velo_to_cam.txt')
            cam2cam_dict = read_calib_file(calib_path_in_raw/'calib_cam_to_cam.txt')
            velo2cam_mat = transform_from_rot_trans(velo2cam_dict['R'], velo2cam_dict['T'])
            imu2velo_mat = transform_from_rot_trans(imu2velo_dict['R'], imu2velo_dict['T'])
            cam_2rect_mat = transform_from_rot_trans(cam2cam_dict['R_rect_00'], np.zeros(3))
            scene_data['calibs'].update({'K': intrinsics, 'P_rect_ori_dict': P_rect_ori_dict, 'cam_2rect': cam_2rect_mat, 'velo2cam': velo2cam_mat})
            scene_data['calibs'].update(calibs_rects)

            # Get pose
            poses = np.genfromtxt(self.dataset_dir/'poses'/'{}.txt'.format(drive_path[-2:])).astype(np.float32).reshape(-1, 3, 4)
            assert scene_data['N_frames']==poses.shape[0], 'scene_data[N_frames]!=poses.shape[0], %d!=%d'%(scene_data['N_frames'], poses.shape[0])
            scene_data['poses'] = poses

            scene_data['Rt_cam2_gt'] = scene_data['calibs']['Rtl_gt']

            train_scenes.append(scene_data)
        return train_scenes

    def construct_sample(self, scene_data, idx, frame_id, show_zoom_info):
        img, zoom_xy, img_ori = self.load_image(scene_data, idx, show_zoom_info)
        # print(img.shape, img_ori.shape)
        sample = {"img":img, "id":frame_id}
        if self.get_X:
            velo = load_velo(scene_data, idx)
            if velo is None:
                logging.error('0 velo in %s. Skipped.'%scene_data['dir'])
            velo_homo = utils_misc.homo_np(velo)
            val_idxes, X_rect, X_cam0 = rectify(velo_homo, scene_data['calibs']) # list, [N, 3]
            sample['X_cam2_vis'] = X_rect[val_idxes].astype(np.float32)
            sample['X_cam0_vis'] = X_cam0[val_idxes].astype(np.float32)
        if self.get_pose:
            sample['pose'] = scene_data['poses'][idx].astype(np.float32)
        if self.get_sift:
            # logging.info('Getting sift for frame %d/%d.'%(idx, scene_data['N_frames']))
            kp, des = self.sift.detectAndCompute(img_ori, None) ## IMPORTANT: normalize these points
            x_all = np.array([p.pt for p in kp])
            # print(zoom_xy)
            x_all = (x_all * np.array([[zoom_xy[0], zoom_xy[1]]])).astype(np.float32)
            # print(x_all.shape, np.amax(x_all, axis=0), np.amin(x_all, axis=0))
            if x_all.shape[0] != self.sift_num:
                choice = crop_or_pad_choice(x_all.shape[0], self.sift_num, shuffle=True)
                x_all = x_all[choice]
                des = des[choice]
            sample['sift_kp'] = x_all
            sample['sift_des'] = des
        if self.get_SP:
            img_ori_gray = cv2.cvtColor(img_ori, cv2.COLOR_RGB2GRAY)
            img = torch.from_numpy(img_ori_gray).float().unsqueeze(0).unsqueeze(0).float() / 255.
            pts, desc, _, heatmap = self.fe.run(img)
            pts = pts[0].T # [N, 3]
            pts[:, :2] = (pts[:, :2] * np.array([[zoom_xy[0], zoom_xy[1]]])).astype(np.float32)
            desc = desc[0].T # [N, 256]
            sample['SP_kp'] = pts
            sample['SP_des'] = desc
        return sample

    def dump_drive(self, args, drive_path, split, scene_data=None):
        assert split in ['train', 'test']
        if scene_data is None:
            train_scenes = self.collect_scene_from_drive(drive_path)
            if not train_scenes:
                logging.warning('Empty scene data for %s. Skipped.'%drive_path)
                return
            assert len(train_scenes)==1, 'More than one camera not supported! %d'%len(train_scenes)
            scene_data = train_scenes[0]

        dump_dir = Path(args.dump_root)/scene_data['rel_path']
        dump_dir.mkdir_p()
        intrinsics = scene_data['calibs']['K']
        dump_cam_file = dump_dir/'cam'
        np.save(dump_cam_file+'.npy', intrinsics.astype(np.float32))
        dump_Rt_cam2_gt_file = dump_dir/'Rt_cam2_gt'
        np.save(dump_Rt_cam2_gt_file, scene_data['Rt_cam2_gt'].astype(np.float32))
        poses_file = dump_dir/'poses'
        poses = []

        logging.info('Dumping %d samples to %s...'%(scene_data['N_frames'], dump_dir))
        sample_name_list = []
        # sift_des_list = []
        for idx in tqdm(range(scene_data['N_frames'])):
            frame_id = scene_data['frame_ids'][idx]
            assert int(frame_id)==idx
            sample = self.construct_sample(scene_data, idx, frame_id, show_zoom_info=False)

            img, frame_nb = sample["img"], sample["id"]
            dump_img_file = dump_dir/'{}.jpg'.format(frame_nb)
            scipy.misc.imsave(dump_img_file, img)
            if "pose" in sample.keys():
                poses.append(sample["pose"].astype(np.float32))
            if "X_cam0_vis" in sample.keys():
                dump_X_cam0_file = dump_dir/'X_cam0_{}'.format(frame_nb)
                dump_X_cam2_file = dump_dir/'X_cam2_{}'.format(frame_nb)
                if self.save_npy:
                    np.save(dump_X_cam0_file+'.npy', sample["X_cam0_vis"])
                    np.save(dump_X_cam2_file+'.npy', sample["X_cam2_vis"])
                else:
                    saveh5({"X_cam0_vis": sample["X_cam0_vis"], "X_cam2_vis": sample["X_cam2_vis"]}, dump_X_file+'.h5')
            if "sift_kp" in sample.keys():
                dump_sift_file = dump_dir/'sift_{}'.format(frame_nb)
                if self.save_npy:
                    np.save(dump_sift_file+'.npy', np.hstack((sample['sift_kp'], sample['sift_des'])))
                else:
                    saveh5({'sift_kp': sample['sift_kp'], 'sift_des': sample['sift_des']}, dump_sift_file+'.h5')
                # sift_des_list.append(sample['sift_des'])
            if "SP_kp" in sample.keys():
                dump_sift_file = dump_dir/'SP_{}'.format(frame_nb)
                if self.save_npy:
                    np.save(dump_sift_file+'.npy', np.hstack((sample['SP_kp'], sample['SP_des'])))
                    # print(sample['SP_kp'].shape, sample['SP_des'].shape)
                else:
                    pass

            sample_name_list.append('%s %s'%(dump_dir[-5:], frame_nb))

        # Get all poses    
        if "pose" in sample.keys():      
            if len(poses) != 0:
                # np.savetxt(poses_file, np.array(poses).reshape(-1, 16), fmt='%.20e')a
                if self.save_npy:
                    np.save(poses_file+'.npy', np.stack(poses).reshape(-1, 3, 4))
                else:
                    saveh5({"poses": np.array(poses).reshape(-1, 3, 4)}, poses_file+'.h5')

        # Get SIFT matches
        if self.get_sift:
            delta_ijs = [1, 2, 3, 5, 8, 10]
            # delta_ijs = [1]
            num_tasks = len(delta_ijs)
            num_workers = min(len(delta_ijs), default_number_of_process)
            # num_workers = 1
            logging.info('Getting SIFT matches on %d workers for delta_ijs = %s'%(num_workers, ' '.join(str(e) for e in delta_ijs)))

            with ProcessPool(max_workers=num_workers) as pool:
                tasks = pool.map(dump_sift_match_idx, delta_ijs, [scene_data['N_frames']]*num_tasks, \
                    [dump_dir]*num_tasks, [self.save_npy]*num_tasks, [self.if_BF_matcher]*num_tasks)
                try:
                    for _ in tqdm(tasks.result(), total=num_tasks):
                        pass
                except KeyboardInterrupt as e:
                    tasks.cancel()
                    raise e

        # Get SP matches
        if self.get_SP:
            delta_ijs = [1, 2, 3, 5, 8, 10]
            nn_threshes = [0.7, 1.0]
            # delta_ijs = [1]
            num_tasks = len(delta_ijs)
            num_workers = min(len(delta_ijs), default_number_of_process)
            # num_workers = 1
            logging.info('Getting SP matches on %d workers for delta_ijs = %s'%(num_workers, ' '.join(str(e) for e in delta_ijs)))

            with ProcessPool(max_workers=num_workers) as pool:
                tasks = pool.map(dump_SP_match_idx, delta_ijs, [scene_data['N_frames']]*num_tasks, \
                    [dump_dir]*num_tasks, [self.save_npy]*num_tasks, [nn_threshes]*num_tasks)
                try:
                    for _ in tqdm(tasks.result(), total=num_tasks):
                        pass
                except KeyboardInterrupt as e:
                    tasks.cancel()
                    raise e

            # for delta_ij in delta_ijs:
            #     dump_match_idx(delta_ij, scene_data['N_frames'], sift_des_list, dump_dir, self.save_npy, self.if_BF_matcher)

        if len(dump_dir.files('*.jpg')) < 2:
            dump_dir.rmtree()

        return sample_name_list

    def load_image(self, scene_data, tgt_idx, show_zoom_info=True):
        img_file = scene_data['dir']/'image_{}'.format(scene_data['cid_num'])/scene_data['frame_ids'][tgt_idx]+'.png'
        if not img_file.isfile():
            logging.warning('Image %s not found!'%img_file)
            return None, None, None
        img_ori = scipy.misc.imread(img_file)
        if [self.img_height, self.img_width] == [img_ori.shape[0], img_ori.shape[1]]:
            return img_ori, (1., 1.), img_ori
        else:
            zoom_y = self.img_height/img_ori.shape[0]
            zoom_x = self.img_width/img_ori.shape[1]
            if show_zoom_info:
                logging.warning('[%s] Zooming the image (H%d, W%d) with zoom_yH=%f, zoom_xW=%f to (H%d, W%d).'%\
                    (img_file, img_ori.shape[0], img_ori.shape[1], zoom_y, zoom_x, self.img_height, self.img_width))
            img = scipy.misc.imresize(img_ori, (self.img_height, self.img_width))
            return img, (zoom_x, zoom_y), img_ori

    def get_P_rect(self, scene_data, calibs, get_2cam_dict=True):
        # calib_file = scene_data['dir'].parent/'calib_cam_to_cam.txt'
        calib_file = scene_data['dir']/'calib.txt'
        if get_2cam_dict:
            P_rect = {}
            for cid in ['00', '01', '02', '03']:
                P_rect[cid], _ = read_odo_calib_file(calib_file, cid=self.cid_to_num[cid])
                if calibs['rescale']:
                    P_rect[cid] = scale_P(P_rect[cid], calibs['zoom_xy'][0], calibs['zoom_xy'][1])
            return P_rect
        else:
            P_rect, _ = read_odo_calib_file(calib_file, cid=self.cid_to_num[cid])
            if calibs['rescale']:
                P_rect = scale_P(P_rect, calibs['zoom_xy'][0], calibs['zoom_xy'][1])
        return P_rect

    def get_rect_cams(self, K, P_rect_20):
        Ml_gt = np.matmul(np.linalg.inv(K), P_rect_20)
        tl_gt = Ml_gt[:, 3:4]
        Rl_gt = Ml_gt[:, :3]
        Rtl_gt = np.vstack((np.hstack((Rl_gt, tl_gt)), np.array([0., 0., 0., 1.], dtype=np.float32)))
        calibs_rects = {'Rtl_gt': Rtl_gt}
        return calibs_rects