Exemplo n.º 1
0
    def __getitem__(self, idx):
        if idx < 0 or idx >= self.__len__():
            return None
        sparse_img = Image.open(str(self.sparse_depth_paths[idx]))
        gt_img = Image.open(str(self.gt_paths[idx]))

        # convert to meters
        # TODO float16 or float32
        sparse_img = np.array(sparse_img, dtype=np.float32) / self.norm_factor
        gt_img = np.array(gt_img, dtype=np.float32) / self.norm_factor
        sparse_img = self._crop_img(sparse_img)
        gt_img = self._crop_img(gt_img)

        # convert to torch tensor
        sparse_img = utils.to_torch(sparse_img[None])
        gt_img = utils.to_torch(gt_img[None])

        if self.load_rgb:
            rgb_img = self._read_rgb_image(str(self.rgb_paths[idx]))

        result_dict = {
            keys.SPARSE_DEPTH: sparse_img,
            keys.ANNOTATED_DEPTH: gt_img,
        }

        if self.load_rgb:
            result_dict[keys.ALIGNED_RGB] = rgb_img
        return result_dict
Exemplo n.º 2
0
 def get_frame_space_scaling_homography():
     src_4pts = utils.to_torch(utils.FULL_CANON4PTS_NP())
     dest_4pts = utils.to_torch(
         np.array([[0, 0], [0, self.frame_h],
                   [self.frame_w, self.frame_h], [self.frame_w, 0]],
                  dtype=np.float32))
     scaling_transformation = utils.get_perspective_transform(
         src_4pts[None], dest_4pts[None])
     scaling_transformation = utils.to_numpy(scaling_transformation[0])
     return scaling_transformation
Exemplo n.º 3
0
def get_default_canon4pts(batch_size, canon4pts_type: str):
    if canon4pts_type == 'lower':
        lower_canon4pts = utils.LOWER_CANON4PTS_NP()
        lower_canon4pts = np.tile(lower_canon4pts, (batch_size, 1, 1))
        lower_canon4pts = utils.to_torch(lower_canon4pts)
        return lower_canon4pts
    elif canon4pts_type == 'full':
        full_canon4pts = utils.FULL_CANON4PTS_NP()
        full_canon4pts = np.tile(full_canon4pts, (batch_size, 1, 1))
        full_canon4pts = utils.to_torch(full_canon4pts)
        return full_canon4pts
    else:
        raise ValueError('unknown canon4pts type')
def np_img_to_torch_img(np_img):
    '''convert a numpy image to bilinear-samplable torch image
    numpy use Height x Width x Channels
    torch use Channels x Height x Width
    
    Arguments:
        np_img {[type]} -- [description]
    '''
    if len(np_img.shape) == 4 and (np_img.shape[3] == 3 or np_img.shape[3] == 1):
        return utils.to_torch(np.transpose(np_img, (0, 3, 1, 2)))
    if len(np_img.shape) == 3 and (np_img.shape[2] == 3 or np_img.shape[2] == 1):
        return utils.to_torch(np.transpose(np_img, (2, 0, 1)))
    elif len(np_img.shape) == 2:
        return utils.to_torch(np_img)
    else:
        raise ValueError('cannot process this image')
Exemplo n.º 5
0
 def get_warped_tmp_by_id(self, data_id):
     homo_mat = self.get_homography_by_id(data_id)
     warped_tmp = warp.warp_image(self.template_torch,
                                  utils.to_torch(homo_mat),
                                  out_shape=(self.frame_h, self.frame_w))
     warped_tmp = utils.torch_img_to_np_img(warped_tmp[0])
     return warped_tmp
Exemplo n.º 6
0
def get_four_corners(homo_mat, canon4pts=None):
    '''
    calculate the 4 corners after transformation, from frame to template
    assuming the original 4 corners of the frame are [+-0.5, +-0.5]
    note: this function supports batch processing
    Arguments:
        homo_mat {[type]} -- [homography, shape: (B, 3, 3) or (3, 3)]

    Return:
        xy_warped -- torch.Size([B, 2, 4])
    '''
    # append ones for homogeneous coordinates
    if homo_mat.shape == (3, 3):
        homo_mat = homo_mat[None]
    assert homo_mat.shape[1:] == (3, 3)
    if canon4pts is None:
        canon4pts = utils.to_torch(utils.FULL_CANON4PTS_NP())
    assert canon4pts.shape == (4, 2)
    x, y = canon4pts[:, 0], canon4pts[:, 1]
    xy = torch.stack([x, y, torch.ones_like(x)])
    # warp points to model coordinates
    xy_warped = torch.matmul(homo_mat, xy)  # H.bmm(xy)
    xy_warped, z_warped = xy_warped.split(2, dim=1)
    xy_warped = xy_warped / (z_warped + 1e-8)
    return xy_warped
Exemplo n.º 7
0
 def get_image_by_index(self, index):
     img = self.raw_data.root.frames[index]
     img = img[..., [2, 1, 0]]
     img = img / 255.0
     img = utils.to_torch(img).permute(2, 0, 1)
     if self.opt.need_single_image_normalization:
         img = image_utils.normalize_single_image(img)
     return img
Exemplo n.º 8
0
 def _read_rgb_image(self, rgb_path: str):
     rgb_img = Image.open(rgb_path)
     # normalize the color
     rgb_img = (np.array(rgb_img, dtype=np.float32) / 255.0) * 2.0 - 1.0
     rgb_img = self._crop_img(rgb_img)
     rgb_img = utils.to_torch(np.transpose(rgb_img, (2, 0, 1)))
     rgb_img = image_utils.normalize_single_image(rgb_img)
     # exec(utils.TEST_EMBEDDING)
     return rgb_img
    def get_homography_by_index(self, index):
        homography = self.raw_data.root.homographies[index]
        # if hasattr(self, 'raw_data.root'):
        #     homography = self.raw_data.root.homographies[index]
        # else:
        #     homography = self.homographies[index]

        homography = utils.to_torch(homography)
        homography = homography / homography[2:3, 2:3]

        return homography
def squash_image(image):
    '''squash image to [0, 1]
    '''
    min_val = image.min()
    max_val = image.max()
    if isinstance(image, np.ndarray):
        image = np.interp(image, (min_val, max_val), (0, 1))
        return image
    elif isinstance(image, torch.Tensor):
        image_np = utils.to_numpy(image)
        image_np = np.interp(image_np, (min_val, max_val), (0, 1))
        image_torch = utils.to_torch(image_np)
        return image_torch
    else:
        raise ValueError('unsupported data type: {0}'.format(type(image)))
Exemplo n.º 11
0
 def load_template(self):
     self.template_np = imageio.imread(self.template_path,
                                       pilmode='RGB') / 255.0
     self.template_torch = utils.to_torch(
         imageio.imread(self.template_path, pilmode='RGB') / 255.0).permute(
             2, 0, 1)
     self.template_torch = torch.mean(self.template_torch, 0, keepdim=True)
     x_coord, y_coord = torch.meshgrid([
         torch.linspace(-1, 1, steps=self.template_torch.shape[-2]),
         torch.linspace(-1, 1, steps=self.template_torch.shape[-1])
     ])
     x_coord = x_coord.to(self.template_torch.device)[None]
     y_coord = y_coord.to(self.template_torch.device)[None]
     self.template_torch = torch.cat(
         [self.template_torch, x_coord, y_coord], dim=0)
Exemplo n.º 12
0
    def __getitem__(self, idx):
        sparse_img = Image.open(str(self.sparse_depth_paths[idx]))
        gt_img = Image.open(str(self.gt_paths[idx]))
        sparse_nn_depth, sparse_confidence_map = self._read_nn_filled_image(
            str(self.sparse_nn_depth_paths[idx]))
        gt_nn_depth, gt_confidence_map = self._read_nn_filled_image(
            str(self.gt_nn_paths[idx]))
        # convert to meters
        sparse_img = np.array(sparse_img, dtype=np.float32) / self.norm_factor
        gt_img = np.array(gt_img, dtype=np.float32) / self.norm_factor

        sparse_img = self._crop_img(sparse_img)
        gt_img = self._crop_img(gt_img)

        # exec(utils.TEST_EMBEDDING)
        # sparse_nn_depth, sparse_confidence_map = image_utils.gen_voronoi_from_sparse(self.opt, sparse_img)
        # gt_nn_depth, gt_confidence_map = image_utils.gen_voronoi_from_sparse(self.opt, gt_img)
        # convert to torch tensor
        sparse_img = utils.to_torch(sparse_img[None])
        gt_img = utils.to_torch(gt_img[None])
        sparse_nn_depth = utils.to_torch(sparse_nn_depth[None])
        gt_nn_depth = utils.to_torch(gt_nn_depth[None])
        # normalize confidence map
        sparse_confidence_map = (utils.to_torch(sparse_confidence_map[None]) /
                                 255.0) * 2.0 - 1.0
        gt_confidence_map = (utils.to_torch(gt_confidence_map[None]) /
                             255.0) * 2.0 - 1.0

        # exec(utils.TEST_EMBEDDING)
        result_dict = {
            keys.SPARSE_DEPTH: sparse_img,
            keys.NN_FILLED_SPARSE_DEPTH: sparse_nn_depth,
            keys.NN_FILLED_SPARSE_CONFIDENCE: sparse_confidence_map,
            keys.ANNOTATED_DEPTH: gt_img,
            keys.NN_FILLED_ANNOTATED_DEPTH: gt_nn_depth,
            keys.NN_FILLED_ANNOTATED_CONFIDENCE: gt_confidence_map,
            # 'sparse_depth_path': str(self.sparse_depth_paths[idx]),
            # 'gt_path': str(self.gt_paths[idx])
        }
        if self.load_rgb:
            rgb_img = self._read_rgb_image(str(self.rgb_paths[idx]))
            result_dict[keys.ALIGNED_RGB] = rgb_img

        if random.random() > self.thres:
            result_dict = self.flip_result_dict(result_dict)

        return result_dict
Exemplo n.º 13
0
 def get_homography_by_index(self, index):
     homography = self.raw_data.root.homographies[index]
     homography = utils.to_torch(homography)
     homography = homography / homography[2:3, 2:3]
     return homography