예제 #1
0
 def _pre_transform_fragment(self, mod):
     """
     pre_transform raw fragments and save it into fragments
     """
     out_dir = osp.join(self.processed_dir, mod, 'fragment')
     if files_exist([out_dir]):  # pragma: no cover
         return
     makedirs(out_dir)
     for scene_path in os.listdir(osp.join(self.raw_dir, mod)):
         # TODO list the right sequences.
         list_seq = [
             f for f in os.listdir(osp.join(self.raw_dir, mod, scene_path))
             if 'seq' in f
         ]
         for seq in list_seq:
             in_dir = osp.join(self.processed_dir, mod, 'raw_fragment',
                               scene_path, seq)
             out_dir = osp.join(self.processed_dir, mod, 'fragment',
                                scene_path, seq)
             makedirs(out_dir)
             list_fragment_path = sorted(
                 [f for f in os.listdir(in_dir) if 'fragment' in f])
             for path in list_fragment_path:
                 data = torch.load(osp.join(in_dir, path))
                 if (self.pre_transform is not None):
                     data = self.pre_transform(data)
                 torch.save(data, osp.join(out_dir, path))
예제 #2
0
    def _create_fragment(self, mod):
        r"""
        create fragments from the rgbd frames ie a partial reconstruction of
        the scene with some frames(usually 50).
        We will only use the first sequence for each scene
        """

        print("Create fragment from RGBD frames...")
        if files_exist([osp.join(self.processed_dir, mod,
                                 'raw_fragment')]):  # pragma: no cover
            log.warning("the fragments on mode {} already exists".format(mod))
            return
        for scene_path in os.listdir(osp.join(self.raw_dir, mod)):
            # TODO list the right sequences.
            list_seq = [
                f for f in os.listdir(osp.join(self.raw_dir, mod, scene_path))
                if 'seq' in f
            ]
            for seq in list_seq:
                frames_dir = osp.join(self.raw_dir, self.mode, scene_path, seq)
                out_dir = osp.join(self.processed_dir, mod, 'raw_fragment',
                                   scene_path, seq)
                makedirs(out_dir)
                path_intrinsic = osp.join(self.raw_dir, self.mode, scene_path,
                                          'camera-intrinsics.txt')
                list_path_frames = sorted([
                    osp.join(frames_dir, f) for f in os.listdir(frames_dir)
                    if 'png' in f and 'depth' in f
                ])
                # list_path_color = sorted([osp.join(frames_dir, f)
                #                          for f in os.listdir(frames_dir)
                #                          if 'png' in f and 'color' in f])
                list_path_trans = sorted([
                    osp.join(frames_dir, f) for f in os.listdir(frames_dir)
                    if 'pose' in f and 'txt' in f
                ])
                # compute each fragment and save it
                if (not self.is_fine):
                    rgbd2fragment_rough(list_path_frames,
                                        path_intrinsic,
                                        list_path_trans,
                                        out_dir,
                                        self.num_frame_per_fragment,
                                        pre_transform=None)
                else:
                    assert len(list_path_frames) == len(list_path_trans), \
                        log.error("For the sequence {},"
                                  "the number of frame "
                                  "and the number of "
                                  "pose is different".format(frames_dir))
                    rgbd2fragment_fine(list_path_frames,
                                       path_intrinsic,
                                       list_path_trans,
                                       out_dir,
                                       self.num_frame_per_fragment,
                                       voxel_size=self.tsdf_voxel_size,
                                       pre_transform=None,
                                       depth_thresh=self.depth_thresh,
                                       limit_size=self.limit_size)
예제 #3
0
    def _compute_points_on_fragments(self, mod):
        """
        compute descriptors on fragments and save fragments with points on a pt file.
        """
        out_dir = osp.join(self.processed_dir, mod, 'fragment')
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)
        ind = 0
        # table to map fragment numper with
        self.table = dict()
        tot = 0
        for scene_path in os.listdir(osp.join(self.raw_dir, mod)):

            fragment_dir = osp.join(self.raw_dir, mod, scene_path)
            list_fragment_path = sorted([
                osp.join(fragment_dir, f) for f in os.listdir(fragment_dir)
                if 'ply' in f
            ])

            for i, fragment_path in enumerate(list_fragment_path):
                out_path = osp.join(out_dir, 'fragment_{:06d}.pt'.format(ind))

                # read ply file
                with open(fragment_path, 'rb') as f:
                    data = PlyData.read(f)
                pos = ([
                    torch.tensor(data['vertex'][axis])
                    for axis in ['x', 'y', 'z']
                ])
                pos = torch.stack(pos, dim=-1)

                # compute keypoints indices
                data = Data(pos=pos)
                if (self.pre_transform is not None):
                    data = self.pre_transform(data)
                data = self.detector(data)
                torch.save(data, out_path)
                num_points = getattr(data, 'keypoints', data.pos).shape[0]
                tot += num_points
                self.table[ind] = {
                    'in_path': fragment_path,
                    'scene_path': scene_path,
                    'out_path': out_path,
                    'num_points': num_points
                }
                ind += 1
        self.table['total_num_points'] = tot

        # save this file into json
        with open(osp.join(out_dir, 'table.txt'), 'w') as f:
            json.dump(self.table, f)
    def _pre_transform_fragments_ply(self):
        """
        apply pre_transform on fragments (ply) and save the results
        """
        out_dir = osp.join(self.processed_dir,
                           'fragment')
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)
        ind = 0
        # table to map fragment numper with
        self.table = dict()

        for scene_path in os.listdir(osp.join(self.raw_dir, "raw_fragment")):

            fragment_dir = osp.join(self.raw_dir,
                                    "raw_fragment",
                                    scene_path)
            list_fragment_path = sorted([f
                                         for f in os.listdir(fragment_dir)
                                         if 'ply' in f])

            for i, f_p in enumerate(list_fragment_path):
                fragment_path = osp.join(fragment_dir, f_p)
                out_path = osp.join(out_dir, 'fragment_{:06d}.pt'.format(ind))

                # read ply file
                with open(fragment_path, 'rb') as f:
                    data = PlyData.read(f)
                pos = ([torch.tensor(data['vertex'][axis]) for axis in ['x', 'y', 'z']])
                pos = torch.stack(pos, dim=-1)
                data = Data(pos=pos)
                if(self.pre_transform is not None):
                    data = self.pre_transform(data)

                if(self.num_random_pt > 0):
                    detector = RandomDetector(self.num_random_pt)
                    data = detector(data)
                # compute keypoints indices
                torch.save(data, out_path)
                self.table[ind] = {'in_path': fragment_path,
                                   'scene_path': scene_path,
                                   'fragment_name': f_p,
                                   'out_path': out_path}
                ind += 1

        # save this file into json
        with open(osp.join(out_dir, 'table.json'), 'w') as f:
            json.dump(self.table, f)
예제 #5
0
    def _save_patches(self, mod):
        """
        save patch to load it offline for the training
        """
        p_extractor = PatchExtractor(self.radius_patch)
        out_dir = osp.join(self.processed_dir, mod, 'patches')
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)
        match_dir = osp.join(self.processed_dir, mod, 'matches')
        idx = 0
        for i in range(len(os.listdir(match_dir))):
            match = np.load(osp.join(match_dir, 'matches{:06d}.npy'.format(i)),
                            allow_pickle=True).item()

            for _ in range(self.num_random_pt):
                data_source = torch.load(match['path_source'])
                data_target = torch.load(match['path_target'])
                rand = np.random.randint(0, len(match['pair']))
                data_source = p_extractor(data_source, match['pair'][rand][0])
                data_target = p_extractor(data_target, match['pair'][rand][1])
                if (self.pre_transform_patch is not None):
                    data_source = self.pre_transform_patch(data_source)
                    data_target = self.pre_transform_patch(data_target)
                if (self.pre_filter is not None):
                    if (self.pre_filter(data_source)
                            and self.pre_filter(data_target)):

                        torch.save(
                            data_source,
                            osp.join(out_dir,
                                     'patches_source{:06d}.pt'.format(idx)))
                        torch.save(
                            data_target,
                            osp.join(out_dir,
                                     'patches_target{:06d}.pt'.format(idx)))
                        idx += 1
                else:
                    torch.save(
                        data_source,
                        osp.join(out_dir,
                                 'patches_source{:06d}.pt'.format(idx)))
                    torch.save(
                        data_target,
                        osp.join(out_dir,
                                 'patches_target{:06d}.pt'.format(idx)))
                    idx += 1
예제 #6
0
    def _compute_matches_between_fragments(self, mod):

        out_dir = osp.join(self.processed_dir, mod, 'matches')
        if files_exist([out_dir]):  # pragma: no cover
            return
        makedirs(out_dir)

        for scene_path in os.listdir(osp.join(self.raw_dir, mod)):

            list_seq = sorted([
                f for f in os.listdir(osp.join(self.raw_dir, mod, scene_path))
                if 'seq' in f
            ])
            for seq in list_seq:
                log.info("{}, {}".format(scene_path, seq))
                fragment_dir = osp.join(self.processed_dir, mod, 'fragment',
                                        scene_path, seq)
                list_fragment_path = sorted([
                    osp.join(fragment_dir, f) for f in os.listdir(fragment_dir)
                    if 'fragment' in f
                ])
                log.info("compute_overlap_and_matches")
                ind = 0
                for path1 in list_fragment_path:
                    for path2 in list_fragment_path:
                        if path1 < path2:
                            out_path = osp.join(
                                out_dir, 'matches{:06d}.npy'.format(ind))

                            match = compute_overlap_and_matches(
                                path1, path2, self.max_dist_overlap)
                            if (self.verbose):
                                log.info(match['path_source'],
                                         match['path_target'],
                                         'overlap={}'.format(match['overlap']))
                            if (np.max(
                                    match['overlap']) > self.min_overlap_ratio
                                    and np.max(match['overlap']) <
                                    self.max_overlap_ratio):
                                np.save(out_path, match)
                                ind += 1