Пример #1
0
    def test_unproject_normalized(self, device):
        # this is for default normalize_points=False
        depth = 2 * torch.tensor([[[
            [1., 1., 1.],
            [1., 1., 1.],
            [1., 1., 1.],
            [1., 1., 1.],
        ]]]).to(device)

        camera_matrix = torch.tensor([[
            [1., 0., 0.],
            [0., 1., 0.],
            [0., 0., 1.],
        ]]).to(device)

        points3d_expected = torch.tensor([[[
            [0., 2., 4.],
            [0., 2., 4.],
            [0., 2., 4.],
            [0., 2., 4.],
        ], [
            [0., 0., 0.],
            [2., 2., 2.],
            [4., 4., 4.],
            [6., 6., 6.],
        ], [
            [2., 2., 2.],
            [2., 2., 2.],
            [2., 2., 2.],
            [2., 2., 2.],
        ]]]).to(device)

        points3d = kornia.depth_to_3d(
            depth, camera_matrix)  # default is normalize_points=False
        assert_allclose(points3d, points3d_expected)
Пример #2
0
    def test_unproject(self, device):
        depth = 2 * torch.tensor([[[
            [1., 1., 1.],
            [1., 1., 1.],
            [1., 1., 1.],
            [1., 1., 1.],
        ]]]).to(device)

        camera_matrix = torch.tensor([[
            [1., 0., 0.],
            [0., 1., 0.],
            [0., 0., 1.],
        ]]).to(device)

        points3d_expected = torch.tensor([[[
            [0.0000, 1.4142, 1.7889],
            [0.0000, 1.1547, 1.6330],
            [0.0000, 0.8165, 1.3333],
            [0.0000, 0.6030, 1.0690],
        ],
                                           [
                                               [0.0000, 0.0000, 0.0000],
                                               [1.4142, 1.1547, 0.8165],
                                               [1.7889, 1.6330, 1.3333],
                                               [1.8974, 1.8091, 1.6036],
                                           ],
                                           [
                                               [2.0000, 1.4142, 0.8944],
                                               [1.4142, 1.1547, 0.8165],
                                               [0.8944, 0.8165, 0.6667],
                                               [0.6325, 0.6030, 0.5345],
                                           ]]]).to(device)

        points3d = kornia.depth_to_3d(depth, camera_matrix)
        assert_allclose(points3d, points3d_expected)
Пример #3
0
    def test_shapes(self, batch_size, device, dtype):
        depth = torch.rand(batch_size, 1, 3, 4, device=device, dtype=dtype)
        camera_matrix = torch.rand(batch_size,
                                   3,
                                   3,
                                   device=device,
                                   dtype=dtype)

        points3d = kornia.depth_to_3d(depth, camera_matrix)
        assert points3d.shape == (batch_size, 3, 3, 4)
Пример #4
0
    def test_unproject_and_project(self, device, dtype):
        depth = 2 * torch.tensor(
            [[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]], device=device, dtype=dtype
        )

        camera_matrix = torch.tensor([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype)

        points3d = kornia.depth_to_3d(depth, camera_matrix)
        points2d = kornia.project_points(points3d.permute(0, 2, 3, 1), camera_matrix[:, None, None])
        points2d_expected = kornia.create_meshgrid(4, 3, False, device=device).to(dtype=dtype)
        assert_close(points2d, points2d_expected, atol=1e-4, rtol=1e-4)
Пример #5
0
def warp_frame_depth(
        image_src: torch.Tensor,
        depth_dst: torch.Tensor,
        src_trans_dst: torch.Tensor,
        camera_matrix: torch.Tensor,
        normalize_points: bool = False,
        sampling_mode='bilinear') -> torch.Tensor:
    # TAKEN FROM KORNIA LIBRARY
    if not isinstance(image_src, torch.Tensor):
        raise TypeError(f"Input image_src type is not a torch.Tensor. Got {type(image_src)}.")

    if not len(image_src.shape) == 4:
        raise ValueError(f"Input image_src musth have a shape (B, D, H, W). Got: {image_src.shape}")

    if not isinstance(depth_dst, torch.Tensor):
        raise TypeError(f"Input depht_dst type is not a torch.Tensor. Got {type(depth_dst)}.")

    if not len(depth_dst.shape) == 4 and depth_dst.shape[-3] == 1:
        raise ValueError(f"Input depth_dst musth have a shape (B, 1, H, W). Got: {depth_dst.shape}")

    if not isinstance(src_trans_dst, torch.Tensor):
        raise TypeError(f"Input src_trans_dst type is not a torch.Tensor. "
                        f"Got {type(src_trans_dst)}.")

    if not len(src_trans_dst.shape) == 3 and src_trans_dst.shape[-2:] == (3, 3):
        raise ValueError(f"Input src_trans_dst must have a shape (B, 3, 3). "
                         f"Got: {src_trans_dst.shape}.")

    if not isinstance(camera_matrix, torch.Tensor):
        raise TypeError(f"Input camera_matrix type is not a torch.Tensor. "
                        f"Got {type(camera_matrix)}.")

    if not len(camera_matrix.shape) == 3 and camera_matrix.shape[-2:] == (3, 3):
        raise ValueError(f"Input camera_matrix must have a shape (B, 3, 3). "
                         f"Got: {camera_matrix.shape}.")
    # unproject source points to camera frame
    points_3d_dst: torch.Tensor = kornia.depth_to_3d(depth_dst, camera_matrix, normalize_points)  # Bx3xHxW

    # transform points from source to destination
    points_3d_dst = points_3d_dst.permute(0, 2, 3, 1)  # BxHxWx3

    # apply transformation to the 3d points
    points_3d_src = kornia.transform_points(src_trans_dst[:, None], points_3d_dst)  # BxHxWx3
    points_3d_src[:, :, :, 2] = torch.relu(points_3d_src[:, :, :, 2])

    # project back to pixels
    camera_matrix_tmp: torch.Tensor = camera_matrix[:, None, None]  # Bx1x1xHxW
    points_2d_src: torch.Tensor = kornia.project_points(points_3d_src, camera_matrix_tmp)  # BxHxWx2

    # normalize points between [-1 / 1]
    height, width = depth_dst.shape[-2:]
    points_2d_src_norm: torch.Tensor = kornia.normalize_pixel_coordinates(points_2d_src, height, width)  # BxHxWx2

    return torch.nn.functional.grid_sample(image_src, points_2d_src_norm, align_corners=True, mode=sampling_mode)
Пример #6
0
def get_differentiable_square_depth_estimation(reference_pose_torch,
                                               measurement_pose_torch,
                                               previous_depth_torch,
                                               full_K_torch,
                                               half_K_torch,
                                               original_image_size,
                                               device):
    batch_size, _, _ = full_K_torch.size()
    R_render = torch.eye(3, dtype=torch.float, device=device)
    T_render = torch.zeros(3, dtype=torch.float, device=device)
    R_render = torch.stack(batch_size * [R_render], dim=0)
    T_render = torch.stack(batch_size * [T_render], dim=0)
    R_render[:, 0, 0] *= -1
    R_render[:, 1, 1] *= -1

    trans = torch.bmm(torch.inverse(reference_pose_torch), measurement_pose_torch)
    points_3d_src = kornia.depth_to_3d(previous_depth_torch, full_K_torch, normalize_points=False)
    points_3d_src = points_3d_src.permute(0, 2, 3, 1)
    points_3d_dst = kornia.transform_points(trans[:, None], points_3d_src).view(batch_size, -1, 3)
    point_cloud_p3d = structures.Pointclouds(points=points_3d_dst, features=None)

    width_normalizer = original_image_size / 4.0
    height_normalizer = original_image_size / 4.0
    px_ndc = (half_K_torch[:, 0, 2] - width_normalizer) / width_normalizer
    py_ndc = (half_K_torch[:, 1, 2] - height_normalizer) / height_normalizer
    fx_ndc = half_K_torch[:, 0, 0] / width_normalizer
    fy_ndc = half_K_torch[:, 1, 1] / height_normalizer

    principal_point = torch.stack([px_ndc, py_ndc], dim=-1)
    focal_length = torch.stack([fx_ndc, fy_ndc], dim=-1)

    cameras = renderer.SfMPerspectiveCameras(focal_length=focal_length,
                                             principal_point=principal_point,
                                             R=R_render,
                                             T=T_render,
                                             device=torch.device('cuda'))

    raster_settings = renderer.PointsRasterizationSettings(
        image_size=int(original_image_size / 2.0),
        radius=0.02,
        points_per_pixel=3)

    depth_renderer = renderer.PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
    rendered_depth = torch.min(depth_renderer(point_cloud_p3d).zbuf, dim=-1)[0]
    depth_hypothesis = torch.relu(rendered_depth).unsqueeze(1)
    return depth_hypothesis
Пример #7
0
def get_non_differentiable_rectangle_depth_estimation(reference_pose_torch,
                                                      measurement_pose_torch,
                                                      previous_depth_torch,
                                                      full_K_torch,
                                                      half_K_torch,
                                                      original_width,
                                                      original_height):
    batch_size, _, _ = reference_pose_torch.shape
    half_width = int(original_width / 2)
    half_height = int(original_height / 2)

    trans = torch.bmm(torch.inverse(reference_pose_torch), measurement_pose_torch)
    points_3d_src = kornia.depth_to_3d(previous_depth_torch, full_K_torch, normalize_points=False)
    points_3d_src = points_3d_src.permute(0, 2, 3, 1)
    points_3d_dst = kornia.transform_points(trans[:, None], points_3d_src)

    points_3d_dst = points_3d_dst.view(batch_size, -1, 3)

    z_values = points_3d_dst[:, :, -1]
    z_values = torch.relu(z_values)
    sorting_indices = torch.argsort(z_values, descending=True)
    z_values = torch.gather(z_values, dim=1, index=sorting_indices)

    sorting_indices_for_points = torch.stack([sorting_indices] * 3, dim=-1)
    points_3d_dst = torch.gather(points_3d_dst, dim=1, index=sorting_indices_for_points)

    projections = torch.round(kornia.project_points(points_3d_dst, half_K_torch.unsqueeze(1))).long()
    is_valid_below = (projections[:, :, 0] >= 0) & (projections[:, :, 1] >= 0)
    is_valid_above = (projections[:, :, 0] < half_width) & (projections[:, :, 1] < half_height)
    is_valid = is_valid_below & is_valid_above

    depth_hypothesis = torch.zeros(size=(batch_size, 1, half_height, half_width)).cuda()
    for projection_index in range(0, batch_size):
        valid_points_zs = z_values[projection_index][is_valid[projection_index]]
        valid_projections = projections[projection_index][is_valid[projection_index]]
        i_s = valid_projections[:, 1]
        j_s = valid_projections[:, 0]
        ij_combined = i_s * half_width + j_s
        _, ij_combined_unique_indices = np.unique(ij_combined.cpu().numpy(), return_index=True)
        ij_combined_unique_indices = torch.from_numpy(ij_combined_unique_indices).long().cuda()
        i_s = i_s[ij_combined_unique_indices]
        j_s = j_s[ij_combined_unique_indices]
        valid_points_zs = valid_points_zs[ij_combined_unique_indices]
        torch.index_put_(depth_hypothesis[projection_index, 0], (i_s, j_s), valid_points_zs)
    return depth_hypothesis
Пример #8
0
    def test_unproject_normalized(self, device, dtype):
        # this is for normalize_points=True
        depth = 2 * torch.tensor([[[
            [1., 1., 1.],
            [1., 1., 1.],
            [1., 1., 1.],
            [1., 1., 1.],
        ]]],
                                 device=device,
                                 dtype=dtype)

        camera_matrix = torch.tensor([[
            [1., 0., 0.],
            [0., 1., 0.],
            [0., 0., 1.],
        ]],
                                     device=device,
                                     dtype=dtype)

        points3d_expected = torch.tensor([[[
            [0.0000, 1.4142, 1.7889],
            [0.0000, 1.1547, 1.6330],
            [0.0000, 0.8165, 1.3333],
            [0.0000, 0.6030, 1.0690],
        ],
                                           [
                                               [0.0000, 0.0000, 0.0000],
                                               [1.4142, 1.1547, 0.8165],
                                               [1.7889, 1.6330, 1.3333],
                                               [1.8974, 1.8091, 1.6036],
                                           ],
                                           [
                                               [2.0000, 1.4142, 0.8944],
                                               [1.4142, 1.1547, 0.8165],
                                               [0.8944, 0.8165, 0.6667],
                                               [0.6325, 0.6030, 0.5345],
                                           ]]],
                                         device=device,
                                         dtype=dtype)

        points3d = kornia.depth_to_3d(depth,
                                      camera_matrix,
                                      normalize_points=True)
        assert_allclose(points3d, points3d_expected, atol=1e-4, rtol=1e-4)
Пример #9
0
    def test_unproject_and_project(self, device):
        depth = 2 * torch.tensor([[[
            [1., 1., 1.],
            [1., 1., 1.],
            [1., 1., 1.],
            [1., 1., 1.],
        ]]]).to(device)

        camera_matrix = torch.tensor([[
            [1., 0., 0.],
            [0., 1., 0.],
            [0., 0., 1.],
        ]]).to(device)

        points3d = kornia.depth_to_3d(depth, camera_matrix)
        points2d = kornia.project_points(points3d.permute(0, 2, 3, 1),
                                         camera_matrix[:, None, None])
        points2d_expected = kornia.create_meshgrid(4, 3, False).to(device)
        assert_allclose(points2d, points2d_expected)
Пример #10
0
    def test_unproject_denormalized(self, device, dtype):
        # this is for default normalize_points=False
        depth = 2 * torch.tensor(
            [[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]], device=device, dtype=dtype
        )

        camera_matrix = torch.tensor([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype)

        points3d_expected = torch.tensor(
            [
                [
                    [[0.0, 2.0, 4.0], [0.0, 2.0, 4.0], [0.0, 2.0, 4.0], [0.0, 2.0, 4.0]],
                    [[0.0, 0.0, 0.0], [2.0, 2.0, 2.0], [4.0, 4.0, 4.0], [6.0, 6.0, 6.0]],
                    [[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
                ]
            ],
            device=device,
            dtype=dtype,
        )

        points3d = kornia.depth_to_3d(depth, camera_matrix)  # default is normalize_points=False
        assert_close(points3d, points3d_expected, atol=1e-4, rtol=1e-4)
Пример #11
0
    def test_shapes_broadcast(self, device, batch_size):
        depth = torch.rand(batch_size, 1, 3, 4).to(device)
        camera_matrix = torch.rand(1, 3, 3).to(device)

        points3d = kornia.depth_to_3d(depth, camera_matrix)
        assert points3d.shape == (batch_size, 3, 3, 4)
Пример #12
0
    def test_smoke(self, device):
        depth = torch.rand(1, 1, 3, 4).to(device)
        camera_matrix = torch.rand(1, 3, 3).to(device)

        points3d = kornia.depth_to_3d(depth, camera_matrix)
        assert points3d.shape == (1, 3, 3, 4)