コード例 #1
0
    def _pre_transform_fragments(self):
        """
        apply pre_transform on fragments (ply) and save the results
        """
        out_dir = osp.join(self.processed_dir, 'test', 'fragment')
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)

        # table to map fragment numper with
        self.table = dict()
        list_scene = [f for f in os.listdir(osp.join(self.raw_dir, "test"))]
        for scene_path in list_scene:
            fragment_dir = osp.join(self.raw_dir, "test", scene_path)

            if (osp.isfile(fragment_dir)):
                continue
            list_fragment_path = sorted(
                [f for f in os.listdir(fragment_dir) if 'pcd' in f])
            for i, f_p in enumerate(list_fragment_path):
                fragment_path = osp.join(fragment_dir, f_p)
                out_dir = osp.join(self.processed_dir, "test", 'fragment',
                                   scene_path)
                makedirs(out_dir)
                out_path = osp.join(out_dir,
                                    'fragment_{:06d}.pt'.format(find_int(f_p)))
                pos = torch.from_numpy(BasePCRBTest.read_pcd(fragment_path)[0])
                data = Data(pos=pos)
                if (self.pre_transform is not None):
                    data = self.pre_transform(data)
                torch.save(data, out_path)
コード例 #2
0
    def _compute_matches_between_fragments(self):
        ind = 0
        out_dir = osp.join(self.processed_dir, "test", "matches")
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)

        list_scene = os.listdir(osp.join(self.raw_dir, "test"))
        for scene in list_scene:
            if (osp.isfile(osp.join(self.raw_dir, "test", scene))):
                continue
            path_log = osp.join(self.raw_dir, "test", scene + "_global.txt")
            list_pair = BasePCRBTest.parse_pair_files(path_log)
            for i, pair in enumerate(list_pair):
                path1 = osp.join(
                    self.processed_dir, "test", 'fragment', scene,
                    'fragment_{:06d}.pt'.format(find_int(pair["source_name"])))
                path2 = osp.join(
                    self.processed_dir, "test", 'fragment', scene,
                    'fragment_{:06d}.pt'.format(find_int(pair["target_name"])))
                data1 = torch.load(path1)
                data2 = torch.load(path2)
                match = compute_overlap_and_matches(data1, data2,
                                                    self.max_dist_overlap)
                match['path_source'] = path1
                match['path_target'] = path2
                match['name_source'] = pair["source_name"]
                match['name_target'] = pair["target_name"]
                match['scene'] = scene
                match['trans'] = pair["trans"]
                out_path = osp.join(self.processed_dir, "test", 'matches',
                                    'matches{:06d}.npy'.format(ind))
                np.save(out_path, match)
                ind += 1
コード例 #3
0
 def _pre_transform_fragment(self, mod):
     """
     pre_transform raw fragments and save it into fragments
     """
     out_dir = osp.join(self.processed_dir, mod, 'fragment')
     if files_exist([out_dir]):  # pragma: no cover
         return
     makedirs(out_dir)
     for scene_path in os.listdir(osp.join(self.raw_dir, mod)):
         # TODO list the right sequences.
         list_seq = [
             f for f in os.listdir(osp.join(self.raw_dir, mod, scene_path))
             if 'seq' in f
         ]
         for seq in list_seq:
             in_dir = osp.join(self.processed_dir, mod, 'raw_fragment',
                               scene_path, seq)
             out_dir = osp.join(self.processed_dir, mod, 'fragment',
                                scene_path, seq)
             makedirs(out_dir)
             list_fragment_path = sorted(
                 [f for f in os.listdir(in_dir) if 'fragment' in f])
             for path in list_fragment_path:
                 data = torch.load(osp.join(in_dir, path))
                 if (self.pre_transform is not None):
                     data = self.pre_transform(data)
                 torch.save(data, osp.join(out_dir, path))
コード例 #4
0
 def download(self):
     folder = osp.join(self.raw_dir, "test")
     print(folder)
     if files_exist([folder]):  # pragma: no cover
         log.warning("already downloaded {}".format("test"))
         return
     else:
         makedirs(folder)
     log.info("Download elements in the file {}...".format(folder))
     for name, url in self.DATASETS:
         req = requests.get(url)
         with open(osp.join(folder, name + ".zip"), "wb") as archive:
             archive.write(req.content)
         with ZipFile(osp.join(folder, name + ".zip"), "r") as zip_obj:
             log.info("extracting dataset {}".format(name))
             zip_obj.extractall(osp.join(folder, name))
             log.info("converting to PCD")
             asl_to_pcd(osp.join(folder, name))
         file_not_to_remove = glob.glob(osp.join(folder, name, "*.pcd"))
         filelist = glob.glob(osp.join(folder, name, "*"))
         for file_to_remove in filelist:
             if file_to_remove not in file_not_to_remove:
                 os.remove(file_to_remove)
         os.remove(osp.join(folder, name + ".zip"))
     self.download_pairs(folder)
コード例 #5
0
    def _compute_matches_between_fragments(self):
        ind = 0
        out_dir = osp.join(self.processed_dir, "test", "matches")
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)

        list_scene = os.listdir(osp.join(self.raw_dir, "test"))
        for scene in list_scene:
            path_log = osp.join(self.raw_dir, "test", scene, "gt.log")
            list_pair_num, list_mat = read_gt_log(path_log)
            for i, pair in enumerate(list_pair_num):
                path1 = osp.join(self.processed_dir, "test", 'fragment', scene,
                                 'fragment_{:06d}.pt'.format(pair[0]))
                path2 = osp.join(self.processed_dir, "test", 'fragment', scene,
                                 'fragment_{:06d}.pt'.format(pair[1]))
                data1 = torch.load(path1)
                data2 = torch.load(path2)
                match = compute_overlap_and_matches(
                    data1,
                    data2,
                    self.max_dist_overlap,
                    trans_gt=torch.from_numpy(np.linalg.inv(list_mat[i])).to(
                        data1.pos.dtype))
                match['path_source'] = path1
                match['path_target'] = path2
                match['name_source'] = str(pair[0])
                match['name_target'] = str(pair[1])
                match['scene'] = scene
                out_path = osp.join(self.processed_dir, "test", 'matches',
                                    'matches{:06d}.npy'.format(ind))
                np.save(out_path, match)
                ind += 1
コード例 #6
0
    def _create_fragment(self, mod):
        r"""
        create fragments from the rgbd frames ie a partial reconstruction of
        the scene with some frames(usually 50).
        We will only use the first sequence for each scene
        """

        print("Create fragment from RGBD frames...")
        if files_exist([osp.join(self.processed_dir, mod,
                                 'raw_fragment')]):  # pragma: no cover
            log.warning("the fragments on mode {} already exists".format(mod))
            return
        for scene_path in os.listdir(osp.join(self.raw_dir, mod)):
            # TODO list the right sequences.
            list_seq = [
                f for f in os.listdir(osp.join(self.raw_dir, mod, scene_path))
                if 'seq' in f
            ]
            for seq in list_seq:
                frames_dir = osp.join(self.raw_dir, self.mode, scene_path, seq)
                out_dir = osp.join(self.processed_dir, mod, 'raw_fragment',
                                   scene_path, seq)
                makedirs(out_dir)
                path_intrinsic = osp.join(self.raw_dir, self.mode, scene_path,
                                          'camera-intrinsics.txt')
                list_path_frames = sorted([
                    osp.join(frames_dir, f) for f in os.listdir(frames_dir)
                    if 'png' in f and 'depth' in f
                ])
                # list_path_color = sorted([osp.join(frames_dir, f)
                #                          for f in os.listdir(frames_dir)
                #                          if 'png' in f and 'color' in f])
                list_path_trans = sorted([
                    osp.join(frames_dir, f) for f in os.listdir(frames_dir)
                    if 'pose' in f and 'txt' in f
                ])
                # compute each fragment and save it
                if (not self.is_fine):
                    rgbd2fragment_rough(list_path_frames,
                                        path_intrinsic,
                                        list_path_trans,
                                        out_dir,
                                        self.num_frame_per_fragment,
                                        pre_transform=None)
                else:
                    assert len(list_path_frames) == len(list_path_trans), \
                        log.error("For the sequence {},"
                                  "the number of frame "
                                  "and the number of "
                                  "pose is different".format(frames_dir))
                    rgbd2fragment_fine(list_path_frames,
                                       path_intrinsic,
                                       list_path_trans,
                                       out_dir,
                                       self.num_frame_per_fragment,
                                       voxel_size=self.tsdf_voxel_size,
                                       pre_transform=None,
                                       depth_thresh=self.depth_thresh,
                                       limit_size=self.limit_size)
コード例 #7
0
 def download(self):
     # we download the raw RGBD file for the train and the validation data
     folder = osp.join(self.raw_dir, self.mode)
     if files_exist([folder]):  # pragma: no cover
         log.warning("already downloaded {}".format(self.mode))
         return
     log.info("Download elements in the file {}...".format(folder))
     for url in self.dict_urls[self.mode]:
         path = download_url(url, folder, self.verbose)
         extract_zip(path, folder, self.verbose)
         os.unlink(path)
コード例 #8
0
ファイル: basetest.py プロジェクト: zx991201/torch-points3d
    def _pre_transform_fragments(self):
        """
        apply pre_transform on fragments (ply) and save the results
        """
        out_dir = osp.join(self.processed_dir, "test", "fragment")
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)

        # table to map fragment numper with
        self.table = dict()
        list_scene = [f for f in os.listdir(osp.join(self.raw_dir, "test"))]
        for scene_path in list_scene:

            pose_path = osp.join(self.raw_dir, "test",
                                 "pose_{}.csv".format(scene_path))

            fragment_dir = osp.join(self.raw_dir, "test", scene_path)

            if osp.isfile(fragment_dir):
                continue
            list_fragment_path = sorted(
                [f for f in os.listdir(fragment_dir) if "pcd" in f])
            for i, f_p in enumerate(list_fragment_path):
                fragment_path = osp.join(fragment_dir, f_p)
                out_dir = osp.join(self.processed_dir, "test", "fragment",
                                   scene_path)
                makedirs(out_dir)
                if (self.is_name_path_int):
                    out_path = osp.join(
                        out_dir, "fragment_{:06d}.pt".format(find_int(f_p)))
                else:
                    out_path = osp.join(out_dir, "{}.pt".format(f_p[:-4]))
                pos = torch.from_numpy(
                    BasePCRBTest.read_pcd(fragment_path)[0][:, :3]).float()
                data = Data(pos=pos)
                if self.pre_transform is not None:
                    data = self.pre_transform(data)
                if (osp.exists(pose_path)):
                    ind = find_int(f_p)
                    df = pd.read_csv(pose_path)
                    center = torch.tensor(
                        [[df[' T03'][ind], df[' T13'][ind],
                          df[' T23'][ind]]]).float().unsqueeze(0)

                    ind_sensors, _ = dense_knn(data.pos.unsqueeze(0).float(),
                                               center,
                                               k=1)
                    data.ind_sensors = ind_sensors[0][0]
                else:
                    log.warn("No censors data")

                torch.save(data, out_path)
コード例 #9
0
ファイル: basetest.py プロジェクト: zx991201/torch-points3d
    def _pre_transform_fragments_ply(self):
        """
        apply pre_transform on fragments (ply) and save the results
        """
        out_dir = osp.join(self.processed_dir, "test", "fragment")
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)
        ind = 0
        # table to map fragment numper with
        self.table = dict()

        for scene_path in os.listdir(osp.join(self.raw_dir, "test")):

            fragment_dir = osp.join(self.raw_dir, "test", scene_path)
            list_fragment_path = sorted(
                [f for f in os.listdir(fragment_dir) if "ply" in f])

            for i, f_p in enumerate(list_fragment_path):
                fragment_path = osp.join(fragment_dir, f_p)
                out_dir = osp.join(self.processed_dir, "test", "fragment",
                                   scene_path)
                makedirs(out_dir)
                out_path = osp.join(out_dir,
                                    "fragment_{:06d}.pt".format(find_int(f_p)))
                # read ply file
                with open(fragment_path, "rb") as f:
                    data = PlyData.read(f)
                pos = [
                    torch.tensor(data["vertex"][axis])
                    for axis in ["x", "y", "z"]
                ]
                pos = torch.stack(pos, dim=-1)
                data = Data(pos=pos)
                if self.pre_transform is not None:
                    data = self.pre_transform(data)
                torch.save(data, out_path)
                self.table[ind] = {
                    "in_path": fragment_path,
                    "scene_path": scene_path,
                    "fragment_name": f_p,
                    "out_path": out_path,
                }
                ind += 1

        # save this file into json
        with open(osp.join(out_dir, "table.json"), "w") as f:
            json.dump(self.table, f)
コード例 #10
0
    def _pre_transform_fragments_ply(self):
        """
        apply pre_transform on fragments (ply) and save the results
        """
        out_dir = osp.join(self.processed_dir, 'test', 'fragment')
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)
        ind = 0
        # table to map fragment numper with
        self.table = dict()

        for scene_path in os.listdir(osp.join(self.raw_dir, "test")):

            fragment_dir = osp.join(self.raw_dir, "test", scene_path)
            list_fragment_path = sorted(
                [f for f in os.listdir(fragment_dir) if 'ply' in f])

            for i, f_p in enumerate(list_fragment_path):
                fragment_path = osp.join(fragment_dir, f_p)
                out_dir = osp.join(self.processed_dir, "test", 'fragment',
                                   scene_path)
                makedirs(out_dir)
                out_path = osp.join(out_dir,
                                    'fragment_{:06d}.pt'.format(find_int(f_p)))
                # read ply file
                with open(fragment_path, 'rb') as f:
                    data = PlyData.read(f)
                pos = ([
                    torch.tensor(data['vertex'][axis])
                    for axis in ['x', 'y', 'z']
                ])
                pos = torch.stack(pos, dim=-1)
                data = Data(pos=pos)
                if (self.pre_transform is not None):
                    data = self.pre_transform(data)
                torch.save(data, out_path)
                self.table[ind] = {
                    'in_path': fragment_path,
                    'scene_path': scene_path,
                    'fragment_name': f_p,
                    'out_path': out_path
                }
                ind += 1

        # save this file into json
        with open(osp.join(out_dir, 'table.json'), 'w') as f:
            json.dump(self.table, f)
コード例 #11
0
    def _save_patches(self, mod):
        """
        save patch to load it offline for the training
        """
        p_extractor = PatchExtractor(self.radius_patch)
        out_dir = osp.join(self.processed_dir, mod, 'patches')
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)
        match_dir = osp.join(self.processed_dir, mod, 'matches')
        idx = 0
        for i in range(len(os.listdir(match_dir))):
            match = np.load(osp.join(match_dir, 'matches{:06d}.npy'.format(i)),
                            allow_pickle=True).item()

            for _ in range(self.num_random_pt):
                data_source = torch.load(match['path_source'])
                data_target = torch.load(match['path_target'])
                rand = np.random.randint(0, len(match['pair']))
                data_source = p_extractor(data_source, match['pair'][rand][0])
                data_target = p_extractor(data_target, match['pair'][rand][1])
                if (self.pre_transform_patch is not None):
                    data_source = self.pre_transform_patch(data_source)
                    data_target = self.pre_transform_patch(data_target)
                if (self.pre_filter is not None):
                    if (self.pre_filter(data_source)
                            and self.pre_filter(data_target)):

                        torch.save(
                            data_source,
                            osp.join(out_dir,
                                     'patches_source{:06d}.pt'.format(idx)))
                        torch.save(
                            data_target,
                            osp.join(out_dir,
                                     'patches_target{:06d}.pt'.format(idx)))
                        idx += 1
                else:
                    torch.save(
                        data_source,
                        osp.join(out_dir,
                                 'patches_source{:06d}.pt'.format(idx)))
                    torch.save(
                        data_target,
                        osp.join(out_dir,
                                 'patches_target{:06d}.pt'.format(idx)))
                    idx += 1
コード例 #12
0
    def download(self):
        folder = osp.join(self.raw_dir, "test")
        if files_exist([folder]):  # pragma: no cover
            log.warning("already downloaded {}".format("test"))
            return
        else:
            makedirs(folder)
        ftp = FTP('asrl3.utias.utoronto.ca')
        ftp.login()
        log.info("Download elements in the file {}...".format(folder))
        for name, url in self.DATASETS:
            zip_file = osp.join(folder, name + '.zip')
            log.info("Downloading dataset %s" % name)
            ftp.retrbinary('RETR ' + url, open(zip_file, 'wb').write)
            with ZipFile(zip_file, 'r') as zip_obj:
                log.info("Extracting dataset %s" % name)
                zip_obj.extractall(folder)
            with os.scandir(osp.join(folder, name)) as directory:
                log.info("Configuring dataset %s" % name)
                for entry in directory:
                    if entry.is_dir():
                        base_path = entry.path + "/" + entry.name
                        file_name = base_path + ".xyz"
                        ground_truth_name = base_path + ".gt"
                        pcd_file_name = entry.path + ".pcd"
                        pcd = open3d.io.read_point_cloud(
                            file_name,
                            format="xyz",
                            remove_nan_points=True,
                            remove_infinite_points=True,
                            print_progress=False)
                        ground_truth = numpy.loadtxt(ground_truth_name)
                        pcd.transform(ground_truth)
                        open3d.io.write_point_cloud(pcd_file_name,
                                                    pcd,
                                                    write_ascii=True,
                                                    compressed=False,
                                                    print_progress=False)
                        shutil.rmtree(entry.path)
            os.remove(zip_file)

        gdown.download(
            "https://drive.google.com/uc?id=1marTTFGjlDTb-MLj7pm5zV1u-0IS-xFc",
            folder + "/p2at_met/box_map.pcd",
            quiet=True)
        self.download_pairs(folder)
コード例 #13
0
    def download(self):
        folder = os.path.join(self.raw_dir, "test")
        if files_exist([folder]):  # pragma: no cover
            log.warning("already downloaded {}".format("test"))
            return
        else:
            makedirs(folder)
        log.info("Download elements in the file {}...".format(folder))
        for name, url in self.DATASETS:
            log.info(f'Downloading sequence {name}')
            filename = os.path.join(folder, name + ".zip")
            gdown.download(url, filename, quiet=False)
            with ZipFile(filename, 'r') as zip_obj:
                zip_obj.extractall(folder)
            os.remove(filename)

        self.download_pairs(folder)
コード例 #14
0
    def _pre_transform_fragment(self, mod):
        """
        read raw fragment, rotate the raw fragment using the calibration and the given pose,
        pre transform raw_fragment and save it into fragments
        """

        if files_exist([osp.join(self.processed_dir, mod,
                                 "fragment")]):  # pragma: no cover
            return

        in_dir = osp.join(self.raw_dir, "dataset")
        list_drive = self.dict_seq[mod]

        for drive in list_drive:
            out_dir = osp.join(self.processed_dir, mod, "fragment",
                               "{:02d}".format(drive))
            makedirs(out_dir)
            path_frames = osp.join(in_dir, "sequences", "{:02d}".format(drive),
                                   "velodyne")
            T_calib = read_calib_file(
                osp.join(in_dir, "sequences", "{:02d}".format(drive),
                         "calib.txt"))
            all_poses = np.genfromtxt(
                osp.join(in_dir, "refined_poses", "{:02d}.txt".format(drive)))
            list_name_frames = sorted(
                [f for f in os.listdir(path_frames) if "bin" in f])
            for i, name in enumerate(list_name_frames):

                pose = all_poses[i].reshape((3, 4))
                xyzr = np.fromfile(osp.join(path_frames, name),
                                   dtype=np.float32).reshape((-1, 4))
                xyzr[:, :3] = xyzr[:, :3].dot(T_calib[:3, :3].T) + T_calib[:3,
                                                                           3]
                xyzr[:, :3] = xyzr[:, :3].dot(pose[:3, :3].T) + pose[:3, 3]
                # store position of the car to filter some frames
                pos_sensor = pose[:3, :3].dot(T_calib[:3, 3]) + pose[:3, 3]
                data = Data(
                    pos=torch.from_numpy(xyzr[:, :3]),
                    reflectance=torch.from_numpy(xyzr[:, 3]),
                    pos_sensor=torch.from_numpy(pos_sensor),
                )
                out_path = osp.join(out_dir, name.split(".")[0] + ".pt")
                if self.pre_transform is not None:
                    data = self.pre_transform(data)
                torch.save(data, out_path)
コード例 #15
0
 def download(self):
     """
     TODO: download the datasets
     """
     folder = osp.join(self.raw_dir, "test")
     print(folder)
     if files_exist([folder]):  # pragma: no cover
         log.warning("already downloaded {}".format("test"))
         return
     else:
         makedirs(folder)
     log.info("Download elements in the file {}...".format(folder))
     req = requests.get(self.url)
     with open(osp.join(folder, self.name + ".zip"), "wb") as archive:
         archive.write(req.content)
     with ZipFile(osp.join(folder, self.name + ".zip"), "r") as zip_obj:
         log.info("extracting dataset {}".format(self.name))
         zip_obj.extractall(folder)
コード例 #16
0
 def download(self):
     folder_test = osp.join(self.raw_dir, "test")
     if files_exist([folder_test]):  # pragma: no cover
         log.warning("already downloaded {}".format("test"))
         return
     for url_raw in self.list_urls_test:
         url = url_raw.split("\n")[0]
         path = download_url(url, folder_test)
         extract_zip(path, folder_test)
         log.info(path)
         folder = path.split(".zip")[0]
         os.unlink(path)
         path_eval = download_url(url.split(".zip")[0] + "-evaluation.zip", folder)
         extract_zip(path_eval, folder)
         os.unlink(path_eval)
         folder_eval = path_eval.split(".zip")[0]
         for f in os.listdir(folder_eval):
             os.rename(osp.join(folder_eval, f), osp.join(folder, f))
         shutil.rmtree(folder_eval)
コード例 #17
0
    def _compute_matches_between_fragments(self, mod):

        out_dir = osp.join(self.processed_dir, mod, 'matches')
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)

        for scene_path in os.listdir(osp.join(self.raw_dir, mod)):

            list_seq = sorted([
                f for f in os.listdir(osp.join(self.raw_dir, mod, scene_path))
                if 'seq' in f
            ])
            for seq in list_seq:
                log.info("{}, {}".format(scene_path, seq))
                fragment_dir = osp.join(self.processed_dir, mod, 'fragment',
                                        scene_path, seq)
                list_fragment_path = sorted([
                    osp.join(fragment_dir, f) for f in os.listdir(fragment_dir)
                    if 'fragment' in f
                ])
                log.info("compute_overlap_and_matches")
                ind = 0
                for path1 in list_fragment_path:
                    for path2 in list_fragment_path:
                        if path1 < path2:
                            out_path = osp.join(
                                out_dir, 'matches{:06d}.npy'.format(ind))

                            match = compute_overlap_and_matches(
                                path1, path2, self.max_dist_overlap)
                            if (self.verbose):
                                log.info(match['path_source'],
                                         match['path_target'],
                                         'overlap={}'.format(match['overlap']))
                            if (np.max(
                                    match['overlap']) > self.min_overlap_ratio
                                    and np.max(match['overlap']) <
                                    self.max_overlap_ratio):
                                np.save(out_path, match)
                                ind += 1
コード例 #18
0
    def _compute_matches_between_fragments(self, mod):
        out_dir = osp.join(self.processed_dir, mod, 'matches')
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)
        ind = 0
        list_drive = self.dict_seq[mod]
        for drive in list_drive:
            path_fragment = osp.join(self.processed_dir, mod, "fragment",
                                     "{:02d}".format(drive))
            list_name_frames = sorted(
                [f for f in os.listdir(path_fragment) if "pt" in f])

            # pre_compute specific pair
            log.info("Compute the pairs")
            if (self.min_dist is not None):
                pair_time_frame = compute_spaced_time_frame(
                    list_name_frames, path_fragment, self.min_dist)
            else:
                pair_time_frame = [
                    (i, j) for i in range(len(list_name_frames))
                    for j in range(len(list_name_frames))
                    if (j - i) > 0 and (j - i) < self.max_time_distance
                ]
            log.info("Compute the matches")
            for i, j in pair_time_frame:
                out_path = osp.join(out_dir, 'matches{:06d}.npy'.format(ind))
                path1 = osp.join(path_fragment, list_name_frames[i])
                path2 = osp.join(path_fragment, list_name_frames[j])
                data1 = torch.load(path1)
                data2 = torch.load(path2)
                match = compute_overlap_and_matches(data1, data2,
                                                    self.max_dist_overlap)
                match['path_source'] = path1
                match['path_target'] = path2
                match['name_source'] = i
                match['name_target'] = j
                match['scene'] = drive
                np.save(out_path, match)
                ind += 1