Esempio n. 1
0
    def test_random_motion_blur(self, same_on_batch, return_transform, p,
                                device, dtype):
        f = RandomMotionBlur(kernel_size=(3, 5),
                             angle=(10, 30),
                             direction=0.5,
                             same_on_batch=same_on_batch,
                             return_transform=return_transform,
                             p=p)
        torch.manual_seed(0)
        batch_size = 2
        input = torch.randn(1, 3, 5, 6).repeat(batch_size, 1, 1, 1)

        output = f(input)

        if return_transform:
            assert len(
                output
            ) == 2, f"must return a length 2 tuple if return_transform is True. Got {len(output)}."
            identity = kornia.eye_like(3, input)
            output, mat = output
            assert_allclose(mat, identity, rtol=1e-4, atol=1e-4)

        if same_on_batch:
            assert_allclose(output[0], output[1], rtol=1e-4, atol=1e-4)
        elif p == 0:
            assert_allclose(output, input, rtol=1e-4, atol=1e-4)
        else:
            assert not torch.allclose(
                output[0], output[1], rtol=1e-4, atol=1e-4)

        assert output.shape == torch.Size([batch_size, 3, 5, 6])
Esempio n. 2
0
    def test_random_equalize(self, device, dtype):
        f = RandomEqualize3D(p=1.0, return_transform=True)
        f1 = RandomEqualize3D(p=0.0, return_transform=True)
        f2 = RandomEqualize3D(p=1.0)
        f3 = RandomEqualize3D(p=0.0)

        bs, channels, depth, height, width = 1, 3, 6, 10, 10

        inputs3d = self.build_input(channels, depth, height, width, bs, device=device, dtype=dtype)

        row_expected = torch.tensor(
            [0.0000, 0.11764, 0.2353, 0.3529, 0.4706, 0.5882, 0.7059, 0.8235, 0.9412, 1.0000],
            device=device,
            dtype=dtype,
        )
        expected = self.build_input(channels, depth, height, width, bs=1, row=row_expected, device=device, dtype=dtype)

        identity = kornia.eye_like(4, expected)

        assert_allclose(f(inputs3d)[0], expected, rtol=1e-4, atol=1e-4)
        assert_allclose(f(inputs3d)[1], identity, rtol=1e-4, atol=1e-4)
        assert_allclose(f1(inputs3d)[0], inputs3d, rtol=1e-4, atol=1e-4)
        assert_allclose(f1(inputs3d)[1], identity, rtol=1e-4, atol=1e-4)
        assert_allclose(f2(inputs3d), expected, rtol=1e-4, atol=1e-4)
        assert_allclose(f3(inputs3d), inputs3d, rtol=1e-4, atol=1e-4)
Esempio n. 3
0
    def test_batch_random_equalize(self, device, dtype):
        f = RandomEqualize3D(p=1.0)
        f1 = RandomEqualize3D(p=0.0)

        bs, channels, depth, height, width = 2, 3, 6, 10, 10

        inputs3d = self.build_input(channels,
                                    depth,
                                    height,
                                    width,
                                    bs,
                                    device=device,
                                    dtype=dtype)

        row_expected = torch.tensor([
            0.0000, 0.11764, 0.2353, 0.3529, 0.4706, 0.5882, 0.7059, 0.8235,
            0.9412, 1.0000
        ])
        expected = self.build_input(channels,
                                    depth,
                                    height,
                                    width,
                                    bs,
                                    row=row_expected,
                                    device=device,
                                    dtype=dtype)

        identity = kornia.eye_like(4, expected)  # 2 x 4 x 4

        assert_close(f(inputs3d), expected, rtol=1e-4, atol=1e-4)
        assert_close(f.transform_matrix, identity, rtol=1e-4, atol=1e-4)
        assert_close(f1(inputs3d), inputs3d, rtol=1e-4, atol=1e-4)
        assert_close(f1.transform_matrix, identity, rtol=1e-4, atol=1e-4)
Esempio n. 4
0
    def test_dirty_points_and_gradcheck(self, batch_size, device, dtype):
        # generate input data
        points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
        H = kornia.eye_like(3, points_src)
        H = H * 0.3 * torch.rand_like(H)
        H = H / H[:, 2:3, 2:3]

        points_src = 100. * torch.rand(
            batch_size, 20, 2, device=device, dtype=dtype)
        points_dst = kornia.transform_points(H, points_src)

        # making last point an outlier
        points_dst[:, -1, :] += 20

        weights = torch.ones(batch_size, 20, device=device, dtype=dtype)

        # compute transform from source to target
        dst_homo_src = find_homography_dlt_iterated(points_src, points_dst,
                                                    weights, 0.5, 10)

        assert_allclose(kornia.transform_points(dst_homo_src,
                                                points_src[:, :-1]),
                        points_dst[:, :-1],
                        rtol=1e-3,
                        atol=1e-3)
Esempio n. 5
0
 def test_cardinality(self, device, dtype, batch_shape, out_shape):
     batch_size, channels, height, width = batch_shape
     h_out, w_out = out_shape
     img_b = torch.rand(batch_size, channels, height, width, device=device, dtype=dtype)
     H_ab = kornia.eye_like(3, img_b)
     img_a = kornia.warp_perspective(img_b, H_ab, (h_out, w_out))
     assert img_a.shape == (batch_size, channels, h_out, w_out)
Esempio n. 6
0
 def test_jit(self, device, dtype):
     img = torch.rand(1, 2, 3, 4, device=device, dtype=dtype)
     H_ab = kornia.eye_like(3, img)
     args = (img, H_ab, (4, 5))
     op = kornia.warp_perspective
     op_jit = torch.jit.script(op)
     assert_allclose(op(*args), op_jit(*args))
Esempio n. 7
0
 def test_shape(self, batch_size, eye_size, device, dtype):
     B, N = batch_size, eye_size
     image = torch.rand(B, 3, 4, 4, device=device, dtype=dtype)
     identity = kornia.eye_like(N, image)
     assert identity.shape == (B, N, N)
     assert identity.device == image.device
     assert identity.dtype == image.dtype
Esempio n. 8
0
def compute_perspective_transformation(
        input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:
    r"""Compute the applied transformation matrix :math: `(*, 3, 3)`.

    Args:
        input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (B, C, H, W).
        params (Dict[str, torch.Tensor]):
            - params['start_points']: Tensor containing [top-left, top-right, bottom-right,
              bottom-left] of the orignal image with shape Bx4x2.
            - params['end_points']: Tensor containing [top-left, top-right, bottom-right,
              bottom-left] of the transformed image with shape Bx4x2.

    Returns:
        torch.Tensor: The applied transformation matrix :math: `(*, 3, 3)`
    """
    input = _transform_input(input)
    _validate_input_dtype(
        input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])
    perspective_transform: torch.Tensor = get_perspective_transform(
        params['start_points'], params['end_points']).type_as(input)

    transform: torch.Tensor = K.eye_like(3, input)

    transform = perspective_transform

    return transform
Esempio n. 9
0
 def test_jit(self, device, dtype):
     points = torch.ones(1, 2, 2, device=device, dtype=dtype)
     transform = kornia.eye_like(3, points)
     op = kornia.geometry.transform_points
     op_script = torch.jit.script(op)
     actual = op_script(transform, points)
     expected = op(transform, points)
     assert_close(actual, expected, atol=1e-4, rtol=1e-4)
Esempio n. 10
0
    def test_uniform_dist(self, device, dtype):
        input1 = torch.linspace(0, 255, 10, device=device, dtype=dtype).unsqueeze(0)
        input2 = torch.linspace(0, 255, 10, device=device, dtype=dtype).unsqueeze(0)
        bins = torch.linspace(0, 255, 10, device=device, dtype=dtype)
        bandwidth = torch.tensor(2 * 0.4 ** 2, device=device, dtype=dtype)

        pdf = TestHistogram2d.fcn(input1, input2, bins, bandwidth)
        ans = 0.1 * kornia.eye_like(10, pdf)
        assert_close(ans, pdf)
Esempio n. 11
0
 def test_gradcheck(self, device, dtype):
     batch_size, channels, height, width = 1, 2, 3, 4
     img_b = torch.rand(batch_size, channels, height, width, device=device, dtype=dtype)
     H_ab = kornia.eye_like(3, img_b)
     img_b = utils.tensor_to_gradcheck_var(img_b)  # to var
     # TODO(dmytro/edgar): firgure out why gradient don't propagate for the tranaform
     H_ab = utils.tensor_to_gradcheck_var(H_ab, requires_grad=False)  # to var
     assert gradcheck(kornia.warp_perspective, (img_b, H_ab, (height, width),),
                      raise_exception=True)
Esempio n. 12
0
    def get_transformation_matrix(
            self,
            input: torch.Tensor,
            params: Optional[List[ParamItem]] = None,
            recompute: bool = False) -> Optional[torch.Tensor]:
        """Compute the transformation matrix according to the provided parameters.

        Args:
            input: the input tensor.
            params: params for the sequence.
            recompute: if to recompute the transformation matrix according to the params.
                default: False.
        """
        if params is None:
            raise NotImplementedError("requires params to be provided.")
        named_modules: Iterator[Tuple[
            str, nn.Module]] = self.get_forward_sequence(params)

        # Define as 1 for broadcasting
        res_mat: Optional[torch.Tensor] = None
        for (_, module), param in zip(named_modules,
                                      params if params is not None else []):
            if isinstance(module, (_AugmentationBase, )) and not isinstance(
                    module, (MixAugmentationBase, )):
                to_apply = param.data['batch_prob']  # type: ignore
                ori_shape = input.shape
                try:
                    input = module.transform_tensor(input)
                except ValueError:
                    # Ignore error for 5-dim video
                    pass
                # Standardize shape
                if recompute:
                    mat: torch.Tensor = kornia.eye_like(3, input)
                    mat[to_apply] = module.compute_transformation(
                        input[to_apply], param.data)  # type: ignore
                else:
                    mat = torch.as_tensor(module._transform_matrix,
                                          device=input.device,
                                          dtype=input.dtype)
                res_mat = mat if res_mat is None else mat @ res_mat
                input = module.transform_output_tensor(input, ori_shape)
                if module.keepdim and ori_shape != input.shape:
                    res_mat = res_mat.squeeze()
            elif isinstance(module, (ImageSequential, )):
                # If not augmentationSequential
                if isinstance(module,
                              (kornia.augmentation.AugmentationSequential,
                               )) and not recompute:
                    mat = torch.as_tensor(module._transform_matrix,
                                          device=input.device,
                                          dtype=input.dtype)
                else:
                    mat = module.get_transformation_matrix(
                        input, param.data)  # type: ignore
                res_mat = mat if res_mat is None else mat @ res_mat
        return res_mat
Esempio n. 13
0
def get_projective_transform(center: torch.Tensor, angles: torch.Tensor,
                             scales: torch.Tensor) -> torch.Tensor:
    r"""Calculate the projection matrix for a 3D rotation.

    .. warning::
        This API signature it is experimental and might suffer some changes in the future.

    The function computes the projection matrix given the center and angles per axis.

    Args:
        center: center of the rotation (x,y,z) in the source with shape :math:`(B, 3)`.
        angles: angle axis vector containing the rotation angles in degrees in the form
            of (rx, ry, rz) with shape :math:`(B, 3)`. Internally it calls Rodrigues to compute
            the rotation matrix from axis-angle.
        scales: scale factor for x-y-z-directions with shape :math:`(B, 3)`.

    Returns:
        the projection matrix of 3D rotation with shape :math:`(B, 3, 4)`.

    .. note::
        This function is often used in conjunction with :func:`warp_affine3d`.
    """
    if not (len(center.shape) == 2 and center.shape[-1] == 3):
        raise AssertionError(center.shape)
    if not (len(angles.shape) == 2 and angles.shape[-1] == 3):
        raise AssertionError(angles.shape)
    if center.device != angles.device:
        raise AssertionError(center.device, angles.device)
    if center.dtype != angles.dtype:
        raise AssertionError(center.dtype, angles.dtype)

    # create rotation matrix
    angle_axis_rad: torch.Tensor = K.deg2rad(angles)
    rmat: torch.Tensor = K.angle_axis_to_rotation_matrix(
        angle_axis_rad)  # Bx3x3
    scaling_matrix: torch.Tensor = K.eye_like(3, rmat)
    scaling_matrix = scaling_matrix * scales.unsqueeze(dim=1)
    rmat = rmat @ scaling_matrix.to(rmat)

    # define matrix to move forth and back to origin
    from_origin_mat = torch.eye(4)[None].repeat(rmat.shape[0], 1,
                                                1).type_as(center)  # Bx4x4
    from_origin_mat[..., :3, -1] += center

    to_origin_mat = from_origin_mat.clone()
    to_origin_mat = _torch_inverse_cast(from_origin_mat)

    # append translation with zeros
    proj_mat = projection_from_Rt(rmat,
                                  torch.zeros_like(center)[..., None])  # Bx3x4

    # chain 4x4 transforms
    proj_mat = convert_affinematrix_to_homography3d(proj_mat)  # Bx4x4
    proj_mat = from_origin_mat @ proj_mat @ to_origin_mat

    return proj_mat[..., :3, :]  # Bx3x4
Esempio n. 14
0
    def get_transformation_matrix(
        self, input: torch.Tensor, params: Optional[List[ParamItem]] = None,
    ) -> torch.Tensor:
        """Compute the transformation matrix according to the provided parameters."""
        if params is None:
            raise NotImplementedError("requires params to be provided.")
        named_modules: Iterator[Tuple[str, nn.Module]] = self.get_forward_sequence(params)

        res_mat: torch.Tensor = kornia.eye_like(3, input)
        for (_, module), param in zip(named_modules, params):
            if isinstance(module, (_AugmentationBase, MixAugmentationBase)):
                mat: torch.Tensor = kornia.eye_like(3, input)
                to_apply = param.data['batch_prob']  # type: ignore
                mat[to_apply] = module.compute_transformation(input[to_apply], param.data)  # type: ignore
                res_mat = mat @ res_mat
            elif isinstance(module, (ImageSequential,)):
                mat = module.get_transformation_matrix(input, param.data)  # type: ignore
                res_mat = mat @ res_mat
        return res_mat
Esempio n. 15
0
 def test_smoke(self, device, dtype):
     batch_size, channels, height, width = 1, 2, 3, 4
     img_b = torch.rand(batch_size,
                        channels,
                        height,
                        width,
                        device=device,
                        dtype=dtype)
     H_ab = kornia.eye_like(3, img_b)
     img_a = kornia.warp_perspective(img_b, H_ab, (height, width))
     assert_allclose(img_b, img_a)
Esempio n. 16
0
    def test_jit(self, device):
        @torch.jit.script
        def op_script(transform, points):
            return kornia.transform_points(transform, points)

        points = torch.ones(1, 2, 2, device=device)
        transform = kornia.eye_like(3, points)
        op = kornia.transform_points
        op_script = torch.jit.script(op)
        actual = op_script(transform, points)
        expected = op(transform, points)
        assert_allclose(actual, expected)
Esempio n. 17
0
    def test_translation(self, device, dtype):
        R1 = torch.tensor([[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype)

        t1 = torch.tensor([[[10.0], [0.0], [0.0]]]).type_as(R1)

        R2 = kornia.eye_like(3, R1)
        t2 = kornia.vec_like(3, t1)

        R_expected = R1.clone()
        t_expected = -t1

        R, t = epi.relative_camera_motion(R1, t1, R2, t2)
        assert_close(R_expected, R)
        assert_close(t_expected, t)
Esempio n. 18
0
    def test_translation(self, device, dtype):
        offset = 1.
        h, w = 3, 4

        img_b = torch.arange(float(h * w), device=device, dtype=dtype).view(1, 1, h, w)
        homo_ab = kornia.eye_like(3, img_b)
        homo_ab[..., :2, -1] += offset

        expected = torch.zeros_like(img_b)
        expected[..., 1:, 1:] = img_b[..., :2, :3]

        # Same as opencv: cv2.warpPerspective(kornia.tensor_to_image(img_b), homo_ab[0].numpy(), (w, h))
        img_a = kornia.warp_perspective(img_b, homo_ab, (h, w))
        assert_allclose(img_a, expected, atol=1e-4, rtol=1e-4)
Esempio n. 19
0
    def test_translation_normalized(self, device, dtype):
        offset = 1.0
        h, w = 3, 4

        img_b = torch.arange(float(h * w), device=device,
                             dtype=dtype).view(1, 1, h, w)
        homo_ab = kornia.eye_like(3, img_b)
        homo_ab[..., :2, -1] += offset

        expected = torch.zeros_like(img_b)
        expected[..., 1:, 1:] = img_b[..., :2, :3]

        # Same as opencv: cv2.warpPerspective(kornia.tensor_to_image(img_b), homo_ab[0].numpy(), (w, h))
        img_a = kornia.geometry.transform.homography_warp(
            img_b, homo_ab, (h, w), normalized_homography=False)
        assert_close(img_a, expected, atol=1e-4, rtol=1e-4)
Esempio n. 20
0
def test_warp_perspective_gradcheck(device, dtype):
    H, W = 5, 5
    patch = torch.rand(1,
                       1,
                       5,
                       5,
                       device=device,
                       dtype=torch.float64,
                       requires_grad=True)
    M = kornia.eye_like(3, patch)
    assert gradcheck(kornia.warp_perspective, (
        patch,
        M,
        (H, W),
    ),
                     raise_exception=True)
Esempio n. 21
0
    def test_clean_points(self, batch_size, device, dtype):
        # generate input data
        points_src = torch.rand(batch_size, 10, 2, device=device, dtype=dtype)
        H = kornia.eye_like(3, points_src)
        H = H * 0.3 * torch.rand_like(H)
        H = H / H[:, 2:3, 2:3]

        points_dst = kornia.transform_points(H, points_src)
        weights = torch.ones(batch_size, 10, device=device, dtype=dtype)

        # compute transform from source to target
        dst_homo_src = find_homography_dlt(points_src, points_dst, weights)

        assert_allclose(kornia.transform_points(dst_homo_src, points_src),
                        points_dst,
                        rtol=1e-3,
                        atol=1e-4)
Esempio n. 22
0
    def compute_transformation(self, input: Tensor,
                               params: Dict[str, Tensor]) -> Tensor:
        yaw: Tensor = params["yaw"].to(input)
        pitch: Tensor = params["pitch"].to(input)
        roll: Tensor = params["roll"].to(input)

        center: Tensor = _compute_tensor_center3d(input)
        rotation_mat: Tensor = _compute_rotation_matrix3d(
            yaw, pitch, roll, center.expand(yaw.shape[0], -1))

        # rotation_mat is B x 3 x 4 and we need a B x 4 x 4 matrix
        trans_mat: Tensor = kornia.eye_like(4, input)
        trans_mat[:, 0] = rotation_mat[:, 0]
        trans_mat[:, 1] = rotation_mat[:, 1]
        trans_mat[:, 2] = rotation_mat[:, 2]

        return trans_mat
Esempio n. 23
0
    def test_project(self, num_points, device, dtype):

        intrinsics, _, world_points, img_points = self._get_test_data(
            num_points, device, dtype)

        pred_world_to_cam = kornia.geometry.solve_pnp_dlt(
            world_points, img_points, intrinsics)

        pred_world_to_cam_4x4 = kornia.eye_like(4, pred_world_to_cam)
        pred_world_to_cam_4x4[:, :3, :] = pred_world_to_cam

        repeated_intrinsics = intrinsics.unsqueeze(1).repeat(
            1, num_points, 1, 1)
        pred_img_points = self._project_to_image(world_points,
                                                 pred_world_to_cam_4x4,
                                                 repeated_intrinsics)

        assert_close(pred_img_points, img_points, atol=1e-3, rtol=1e-3)
Esempio n. 24
0
    def test_fill_padding_translation(self, device, dtype):
        offset = 1.0
        h, w = 3, 4

        img_b = torch.arange(float(3 * h * w), device=device,
                             dtype=dtype).view(1, 3, h, w)
        homo_ab = kornia.eye_like(3, img_b)
        homo_ab[..., :2, -1] += offset

        # normally fill_value will also be converted to the right device and type in warp_perspective
        fill_value = torch.tensor([0.5, 0.2, 0.1], device=device, dtype=dtype)

        img_a = kornia.geometry.warp_perspective(img_b,
                                                 homo_ab, (h, w),
                                                 padding_mode="fill",
                                                 fill_value=fill_value)
        top_row_mean = img_a[..., :1, :].mean(dim=[0, 2, 3])
        first_col_mean = img_a[..., :1].mean(dim=[0, 2, 3])
        assert_close(top_row_mean, fill_value)
        assert_close(first_col_mean, fill_value)
Esempio n. 25
0
def compute_perspective_transformation3d(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:
    r"""Compute the perspective transformation matrix :math: `(*, 4, 4)`.

    Args:
        input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`.
        params (Dict[str, torch.Tensor]):
            - params['start_points']: Tensor containing [top-left, top-right, bottom-right,
              bottom-left] of the orignal image with shape Bx8x3.
            - params['end_points']: Tensor containing [top-left, top-right, bottom-right,
              bottom-left] of the transformed image with shape Bx8x3.

    Returns:
        torch.Tensor: The perspective transformation matrix :math: `(*, 4, 4)`
    """
    perspective_transform: torch.Tensor = get_perspective_transform3d(
        params['start_points'], params['end_points']).to(input)

    transform: torch.Tensor = K.eye_like(4, input)

    transform = perspective_transform

    return transform
Esempio n. 26
0
 def identity_matrix(self, input) -> torch.Tensor:
     """Return 4x4 identity matrix."""
     return kornia.eye_like(4, input)
Esempio n. 27
0
 def test_smoke(self, device, dtype):
     image = torch.rand(1, 3, 4, 4, device=device, dtype=dtype)
     identity = kornia.eye_like(3, image)
     assert identity.shape == (1, 3, 3)
     assert identity.device == image.device
     assert identity.dtype == image.dtype
Esempio n. 28
0
 def identity_matrix(self, input) -> Tensor:
     """Return 3x3 identity matrix."""
     return kornia.eye_like(3, input)