def test_jit(self): @torch.jit.script def op_script(points_3d, camera_matrix): return kornia.project_points(points_3d, camera_matrix) points_3d = torch.zeros(1, 3) camera_matrix = torch.eye(3).expand(1, -1, -1) actual = op_script(points_3d, camera_matrix) expected = kornia.project_points(points_3d, camera_matrix) assert_allclose(actual, expected)
def test_unproject_and_project(self, device, dtype): depth = 2 * torch.tensor( [[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]], device=device, dtype=dtype ) camera_matrix = torch.tensor([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype) points3d = kornia.depth_to_3d(depth, camera_matrix) points2d = kornia.project_points(points3d.permute(0, 2, 3, 1), camera_matrix[:, None, None]) points2d_expected = kornia.create_meshgrid(4, 3, False, device=device).to(dtype=dtype) assert_close(points2d, points2d_expected, atol=1e-4, rtol=1e-4)
def test_project_and_unproject(self): point_3d = torch.tensor([[10., 2., 30.]]) depth = point_3d[..., -1:] camera_matrix = torch.tensor([[ [2746., 0., 991.], [0., 2748., 619.], [0., 0., 1.], ]]) point_2d = kornia.project_points(point_3d, camera_matrix) point_3d_hat = kornia.unproject_points(point_2d, depth, camera_matrix) assert_allclose(point_3d, point_3d_hat)
def test_unproject_and_project(self): point_2d = torch.tensor([[0., 0.]]) depth = torch.tensor([[2.]]) camera_matrix = torch.tensor([ [1., 0., 0.], [0., 1., 0.], [0., 0., 1.], ]) point_3d = kornia.unproject_points(point_2d, depth, camera_matrix) point_2d_hat = kornia.project_points(point_3d, camera_matrix) assert_allclose(point_2d, point_2d_hat)
def warp_frame_depth( image_src: torch.Tensor, depth_dst: torch.Tensor, src_trans_dst: torch.Tensor, camera_matrix: torch.Tensor, normalize_points: bool = False, sampling_mode='bilinear') -> torch.Tensor: # TAKEN FROM KORNIA LIBRARY if not isinstance(image_src, torch.Tensor): raise TypeError(f"Input image_src type is not a torch.Tensor. Got {type(image_src)}.") if not len(image_src.shape) == 4: raise ValueError(f"Input image_src musth have a shape (B, D, H, W). Got: {image_src.shape}") if not isinstance(depth_dst, torch.Tensor): raise TypeError(f"Input depht_dst type is not a torch.Tensor. Got {type(depth_dst)}.") if not len(depth_dst.shape) == 4 and depth_dst.shape[-3] == 1: raise ValueError(f"Input depth_dst musth have a shape (B, 1, H, W). Got: {depth_dst.shape}") if not isinstance(src_trans_dst, torch.Tensor): raise TypeError(f"Input src_trans_dst type is not a torch.Tensor. " f"Got {type(src_trans_dst)}.") if not len(src_trans_dst.shape) == 3 and src_trans_dst.shape[-2:] == (3, 3): raise ValueError(f"Input src_trans_dst must have a shape (B, 3, 3). " f"Got: {src_trans_dst.shape}.") if not isinstance(camera_matrix, torch.Tensor): raise TypeError(f"Input camera_matrix type is not a torch.Tensor. " f"Got {type(camera_matrix)}.") if not len(camera_matrix.shape) == 3 and camera_matrix.shape[-2:] == (3, 3): raise ValueError(f"Input camera_matrix must have a shape (B, 3, 3). " f"Got: {camera_matrix.shape}.") # unproject source points to camera frame points_3d_dst: torch.Tensor = kornia.depth_to_3d(depth_dst, camera_matrix, normalize_points) # Bx3xHxW # transform points from source to destination points_3d_dst = points_3d_dst.permute(0, 2, 3, 1) # BxHxWx3 # apply transformation to the 3d points points_3d_src = kornia.transform_points(src_trans_dst[:, None], points_3d_dst) # BxHxWx3 points_3d_src[:, :, :, 2] = torch.relu(points_3d_src[:, :, :, 2]) # project back to pixels camera_matrix_tmp: torch.Tensor = camera_matrix[:, None, None] # Bx1x1xHxW points_2d_src: torch.Tensor = kornia.project_points(points_3d_src, camera_matrix_tmp) # BxHxWx2 # normalize points between [-1 / 1] height, width = depth_dst.shape[-2:] points_2d_src_norm: torch.Tensor = kornia.normalize_pixel_coordinates(points_2d_src, height, width) # BxHxWx2 return torch.nn.functional.grid_sample(image_src, points_2d_src_norm, align_corners=True, mode=sampling_mode)
def forward(self, frame_id, point_id): #indexing ## it is the same wiht torch where TODO indexKF = torch.where(frame_id == self.idxKF)[1] indexMP = torch.where(point_id == self.idxMP)[1] # In test set, tKF is the Twc inverse. We may need to store something else in Memory points = (self.tKF[indexKF] @ self.tMPhomo[indexMP].unsqueeze(-1)).squeeze(-1) Pc = kn.convert_points_from_homogeneous(points) return kn.project_points(Pc, self.K)
def test_jit(self, device, dtype): @torch.jit.script def op_script(points_3d, camera_matrix): return kornia.project_points(points_3d, camera_matrix) points_3d = torch.zeros(1, 3, device=device, dtype=dtype) camera_matrix = torch.eye(3, device=device, dtype=dtype).expand(1, -1, -1) actual = op_script(points_3d, camera_matrix) expected = kornia.project_points(points_3d, camera_matrix) assert_allclose(actual, expected, atol=1e-4, rtol=1e-4)
def test_unproject_and_project(self, device, dtype): point_2d = torch.tensor([[0., 0.]], device=device, dtype=dtype) depth = torch.tensor([[2.]], device=device, dtype=dtype) camera_matrix = torch.tensor([ [1., 0., 0.], [0., 1., 0.], [0., 0., 1.], ], device=device, dtype=dtype) point_3d = kornia.unproject_points(point_2d, depth, camera_matrix) point_2d_hat = kornia.project_points(point_3d, camera_matrix) assert_allclose(point_2d, point_2d_hat, atol=1e-4, rtol=1e-4)
def get_non_differentiable_rectangle_depth_estimation(reference_pose_torch, measurement_pose_torch, previous_depth_torch, full_K_torch, half_K_torch, original_width, original_height): batch_size, _, _ = reference_pose_torch.shape half_width = int(original_width / 2) half_height = int(original_height / 2) trans = torch.bmm(torch.inverse(reference_pose_torch), measurement_pose_torch) points_3d_src = kornia.depth_to_3d(previous_depth_torch, full_K_torch, normalize_points=False) points_3d_src = points_3d_src.permute(0, 2, 3, 1) points_3d_dst = kornia.transform_points(trans[:, None], points_3d_src) points_3d_dst = points_3d_dst.view(batch_size, -1, 3) z_values = points_3d_dst[:, :, -1] z_values = torch.relu(z_values) sorting_indices = torch.argsort(z_values, descending=True) z_values = torch.gather(z_values, dim=1, index=sorting_indices) sorting_indices_for_points = torch.stack([sorting_indices] * 3, dim=-1) points_3d_dst = torch.gather(points_3d_dst, dim=1, index=sorting_indices_for_points) projections = torch.round(kornia.project_points(points_3d_dst, half_K_torch.unsqueeze(1))).long() is_valid_below = (projections[:, :, 0] >= 0) & (projections[:, :, 1] >= 0) is_valid_above = (projections[:, :, 0] < half_width) & (projections[:, :, 1] < half_height) is_valid = is_valid_below & is_valid_above depth_hypothesis = torch.zeros(size=(batch_size, 1, half_height, half_width)).cuda() for projection_index in range(0, batch_size): valid_points_zs = z_values[projection_index][is_valid[projection_index]] valid_projections = projections[projection_index][is_valid[projection_index]] i_s = valid_projections[:, 1] j_s = valid_projections[:, 0] ij_combined = i_s * half_width + j_s _, ij_combined_unique_indices = np.unique(ij_combined.cpu().numpy(), return_index=True) ij_combined_unique_indices = torch.from_numpy(ij_combined_unique_indices).long().cuda() i_s = i_s[ij_combined_unique_indices] j_s = j_s[ij_combined_unique_indices] valid_points_zs = valid_points_zs[ij_combined_unique_indices] torch.index_put_(depth_hypothesis[projection_index, 0], (i_s, j_s), valid_points_zs) return depth_hypothesis
def test_unproject_and_project(self, device): depth = 2 * torch.tensor([[[ [1., 1., 1.], [1., 1., 1.], [1., 1., 1.], [1., 1., 1.], ]]]).to(device) camera_matrix = torch.tensor([[ [1., 0., 0.], [0., 1., 0.], [0., 0., 1.], ]]).to(device) points3d = kornia.depth_to_3d(depth, camera_matrix) points2d = kornia.project_points(points3d.permute(0, 2, 3, 1), camera_matrix[:, None, None]) points2d_expected = kornia.create_meshgrid(4, 3, False).to(device) assert_allclose(points2d, points2d_expected)
def op_script(points_3d, camera_matrix): return kornia.project_points(points_3d, camera_matrix)
def test_smoke_batch_multi(self): point_3d = torch.zeros(2, 4, 3) camera_matrix = torch.eye(3).expand(2, 4, -1, -1) point_2d = kornia.project_points(point_3d, camera_matrix) assert point_2d.shape == (2, 4, 2)
def test_smoke(self): point_3d = torch.zeros(1, 3) camera_matrix = torch.eye(3).expand(1, -1, -1) point_2d = kornia.project_points(point_3d, camera_matrix) assert point_2d.shape == (1, 2)
def test_smoke_batch(self, device): point_3d = torch.zeros(2, 3).to(device) camera_matrix = torch.eye(3).expand(2, -1, -1).to(device) point_2d = kornia.project_points(point_3d, camera_matrix) assert point_2d.shape == (2, 2)