Ejemplo n.º 1
0
    def __init__(self,
                 n_channels,
                 n_classes,
                 court_img,
                 target_size,
                 court_poi=None,
                 bilinear=True,
                 resnet_name='resnetreg50',
                 resnet_pretrained=None,
                 warp_with_nearest=False,
                 img2input=False):
        super(Reconstructor, self).__init__()
        self.n_channels = n_channels
        self.n_classes = n_classes
        self.bilinear = bilinear
        self.img2input = img2input

        # UNet:
        self.inc = DoubleConv(n_channels, 64)
        self.down1 = Down(64, 128)
        self.down2 = Down(128, 256)
        self.down3 = Down(256, 512)
        factor = 2 if bilinear else 1
        self.down4 = Down(512, 1024 // factor)
        self.up1 = Up(1024, 512 // factor, bilinear)
        self.up2 = Up(512, 256 // factor, bilinear)
        self.up3 = Up(256, 128 // factor, bilinear)
        self.up4 = Up(128, 64, bilinear)
        self.outc = OutConv(64, n_classes)

        # UNet regressor that outputs the first 3x3 transformation matrix:
        # self.conv_top = nn.Conv2d(1024 // factor, n_classes, kernel_size=1)
        # self.unet_reg = Reconstructor.make_regressor(n_classes)

        # ResNet regressor that outputs the second 3x3 transformation matrix:
        in_classes = n_classes+3 if img2input else n_classes
        self.resnet_reg = resnet(resnet_name, resnet_pretrained, in_classes)

        # The court template image and court points of interest.
        # This court template will be warped by the learnt transformation matrix:
        self.court_img = court_img
        self.court_poi = court_poi

        # STN warper:
        h, w = target_size[1], target_size[0]
        if warp_with_nearest is False:
            self.warper = kornia.HomographyWarper(h, w)
        else:
            # It seems mode='nearest' has a bug when used during training
            self.warper = kornia.HomographyWarper(h, w, mode='nearest')
Ejemplo n.º 2
0
    def __init__(self, args, direc, N, scale_h, scale_w):
        super(warp_kornia, self).__init__()

        self.warper = dgm.HomographyWarper(int(1208 * scale_h),
                                           int(1920 * scale_w)).to(args.device)
        self.MyHomography = Homography(direc, N, scale_h,
                                       scale_w).to(args.device)
Ejemplo n.º 3
0
    def __init__(self, n_channels, n_classes, template, target_size, bilinear=True):
        super(CourtReconstruction, self).__init__()
        self.n_channels = n_channels
        self.n_classes = n_classes
        self.bilinear = bilinear

        self.inc = DoubleConv(n_channels, 64)
        self.down1 = Down(64, 128)
        self.down2 = Down(128, 256)
        self.down3 = Down(256, 512)
        factor = 2 if bilinear else 1
        self.down4 = Down(512, 1024 // factor)
        self.up1 = Up(1024, 512 // factor, bilinear)
        self.up2 = Up(512, 256 // factor, bilinear)
        self.up3 = Up(256, 128 // factor, bilinear)
        self.up4 = Up(128, 64, bilinear)
        self.outc = OutConv(64, n_classes)

        # Regressor for the 3x3 transformation matrix:
        self.conv_reg = nn.Conv2d(1024 // factor, 8, kernel_size=1)
        self.reg = nn.Sequential(
            nn.Linear(8 * 22 * 40, 32),
            nn.ReLU(True),
            nn.Linear(32, 3 * 3)
        )
        # Initialize the weights/bias with identity transformation:
        self.reg[-1].weight.data.zero_()
        self.reg[-1].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0, 0, 0, 1], dtype=torch.float))

        # Court template that will be warped by the learnt transformation matrix:
        self.template = template

        # Warper:
        h, w = target_size[1], target_size[0]
        self.warper = kornia.HomographyWarper(h, w)#, mode='nearest')
Ejemplo n.º 4
0
    def test_gradcheck(self, batch_shape, device, dtype):
        # generate input data
        eye_size = 3  # identity 3x3

        # create checkerboard
        patch_src = torch.rand(batch_shape, device=device, dtype=dtype)
        patch_src = utils.tensor_to_gradcheck_var(patch_src)  # to var

        # create base homography
        batch_size, _, height, width = patch_src.shape
        dst_homo_src = utils.create_eye_batch(batch_size,
                                              eye_size,
                                              device=device,
                                              dtype=dtype)
        dst_homo_src = utils.tensor_to_gradcheck_var(
            dst_homo_src, requires_grad=False)  # to var

        # instantiate warper
        warper = kornia.HomographyWarper(height, width, align_corners=True)

        # evaluate function gradient
        assert gradcheck(warper, (
            patch_src,
            dst_homo_src,
        ),
                         raise_exception=True)
Ejemplo n.º 5
0
    def test_smoke(self, device):

        img_src_t: torch.Tensor = torch.rand(1, 3, 120, 120).to(device)
        img_dst_t: torch.Tensor = torch.rand(1, 3, 120, 120).to(device)

        init_homo: torch.Tensor = torch.from_numpy(
            np.array([[0.0415, 1.2731, -1.1731], [-0.9094, 0.5072, 0.4272],
                      [0.0762, 1.3981, 1.0646]])).float()

        height, width = img_dst_t.shape[-2:]
        warper = kornia.HomographyWarper(height, width)
        dst_homo_src = MyHomography(init_homo=init_homo).to(device)

        learning_rate = self.lr
        optimizer = optim.Adam(dst_homo_src.parameters(), lr=learning_rate)

        for _ in range(self.num_iterations):
            # warp the reference image to the destiny with current homography
            img_src_to_dst = warper(img_src_t, dst_homo_src())

            # compute the photometric loss
            loss = F.l1_loss(img_src_to_dst, img_dst_t)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            assert not bool(torch.isnan(dst_homo_src.h**o.grad).any())
Ejemplo n.º 6
0
    def test_rotation(self, device, batch_shape):
        # create input data
        batch_size, channels, height, width = batch_shape
        patch_src = torch.rand(batch_size, channels, height, width).to(device)
        # rotation of 90deg
        dst_homo_src = torch.eye(3).to(device)
        dst_homo_src[..., 0, 0] = 0.0
        dst_homo_src[..., 0, 1] = 1.0
        dst_homo_src[..., 1, 0] = -1.0
        dst_homo_src[..., 1, 1] = 0.0
        dst_homo_src = dst_homo_src.expand(batch_size, -1, -1)

        # instantiate warper and warp from source to destination
        warper = kornia.HomographyWarper(height, width, align_corners=True)
        patch_dst = warper(patch_src, dst_homo_src)

        # check the corners
        assert_allclose(
            patch_src[..., 0, 0], patch_dst[..., 0, -1])
        assert_allclose(
            patch_src[..., 0, -1], patch_dst[..., -1, -1])
        assert_allclose(
            patch_src[..., -1, 0], patch_dst[..., 0, 0])
        assert_allclose(
            patch_src[..., -1, -1], patch_dst[..., -1, 0])
Ejemplo n.º 7
0
    def test_translation(self, shape):
        # create input data
        offset = 2.  # in pixel
        height, width = shape
        patch_src = torch.rand(1, 1, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size=1, eye_size=3)
        dst_homo_src[..., 0, 2] = offset / (width - 1)  # apply offset in x

        # instantiate warper and from source to destination
        warper = kornia.HomographyWarper(height, width)
        patch_dst = warper(patch_src, dst_homo_src)
        assert_allclose(patch_src[..., 1:], patch_dst[..., :-1])
Ejemplo n.º 8
0
    def test_identity(self):
        # create input data
        height, width = 2, 5
        patch_src = torch.rand(1, 1, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size=1, eye_size=3)

        # instantiate warper
        warper = kornia.HomographyWarper(height, width)

        # warp from source to destination
        patch_dst = warper(patch_src, dst_homo_src)
        assert_allclose(patch_src, patch_dst)
Ejemplo n.º 9
0
    def test_homography_warper(self, batch_size, device, dtype):
        # generate input data
        height, width = 128, 64
        eye_size = 3  # identity 3x3

        # create checkerboard
        board = utils.create_checkerboard(height, width, 4)
        patch_src = torch.from_numpy(board).to(
            device=device,
            dtype=dtype).view(1, 1, height,
                              width).expand(batch_size, 1, height, width)

        # create base homography
        dst_homo_src = utils.create_eye_batch(batch_size,
                                              eye_size,
                                              device=device,
                                              dtype=dtype)

        # instantiate warper
        warper = kornia.HomographyWarper(height, width, align_corners=True)

        for i in range(self.num_tests):
            # generate homography noise
            homo_delta = torch.rand_like(dst_homo_src) * 0.3

            dst_homo_src_i = dst_homo_src + homo_delta

            # transform the points from dst to ref
            patch_dst = warper(patch_src, dst_homo_src_i)
            patch_dst_to_src = warper(patch_dst,
                                      _torch_inverse_cast(dst_homo_src_i))

            # same transform precomputing the grid
            warper.precompute_warp_grid(_torch_inverse_cast(dst_homo_src_i))
            patch_dst_to_src_precomputed = warper(patch_dst)
            assert (patch_dst_to_src_precomputed == patch_dst_to_src).all()

            # projected should be equal as initial
            error = utils.compute_patch_error(patch_src, patch_dst_to_src,
                                              height, width)

            assert error.item() < self.threshold

            # check functional api
            patch_dst_to_src_functional = kornia.homography_warp(
                patch_dst,
                _torch_inverse_cast(dst_homo_src_i), (height, width),
                align_corners=True)

            assert_allclose(patch_dst_to_src,
                            patch_dst_to_src_functional,
                            atol=1e-4,
                            rtol=1e-4)
Ejemplo n.º 10
0
    def test_translation(self, shape, device, dtype):
        # create input data
        offset = 2.0  # in pixel
        height, width = shape
        patch_src = torch.rand(1, 1, height, width, device=device, dtype=dtype)
        dst_homo_src = utils.create_eye_batch(batch_size=1, eye_size=3, device=device, dtype=dtype)
        dst_homo_src[..., 0, 2] = offset / (width - 1)  # apply offset in x

        # instantiate warper and from source to destination
        warper = kornia.HomographyWarper(height, width, align_corners=True)
        patch_dst = warper(patch_src, dst_homo_src)
        assert_close(patch_src[..., 1:], patch_dst[..., :-1], atol=1e-4, rtol=1e-4)
Ejemplo n.º 11
0
    def test_identity(self, device, dtype):
        # create input data
        height, width = 2, 5
        patch_src = torch.rand(1, 1, height, width, device=device, dtype=dtype)
        dst_homo_src = utils.create_eye_batch(batch_size=1, eye_size=3, device=device, dtype=dtype)

        # instantiate warper
        warper = kornia.HomographyWarper(height, width, align_corners=True)

        # warp from source to destination
        patch_dst = warper(patch_src, dst_homo_src)
        assert_close(patch_src, patch_dst)
Ejemplo n.º 12
0
    def forward(self, img_src, direc, scale):
        height, width = img_src.shape[-2:]

        self.warper = dgm.HomographyWarper(height, width)
        self.MyHomography = Homography(direc,
                                       height,
                                       width,
                                       N=img_src.shape[0],
                                       scale=scale)

        img_src_tensor = img_src / 255.

        warped_img_tensor = 255. * self.warper(img_src_tensor,
                                               self.MyHomography())
        return warped_img_tensor
Ejemplo n.º 13
0
    def test_identity_resize(self, batch_shape):
        # create input data
        batch_size, channels, height, width = batch_shape
        patch_src = torch.rand(batch_size, channels, height, width)
        dst_homo_src = utils.create_eye_batch(batch_size, eye_size=3)

        # instantiate warper warp from source to destination
        warper = kornia.HomographyWarper(height // 2, width // 2)
        patch_dst = warper(patch_src, dst_homo_src)

        # check the corners
        assert_allclose(patch_src[..., 0, 0], patch_dst[..., 0, 0])
        assert_allclose(patch_src[..., 0, -1], patch_dst[..., 0, -1])
        assert_allclose(patch_src[..., -1, 0], patch_dst[..., -1, 0])
        assert_allclose(patch_src[..., -1, -1], patch_dst[..., -1, -1])
Ejemplo n.º 14
0
    def __init__(self, window_size=512, border=32):
        super(AlignLoss, self).__init__()
        self.window_size = window_size
        self.border = border

        self.warper = tgm.HomographyWarper(window_size,
                                           window_size,
                                           normalized_coordinates=True,
                                           mode="nearest")

        self.L1_criterion = torch.nn.MSELoss()
        self.L1_criterion = self.L1_criterion.cuda()

        self.L2_criterion = torch.nn.L1Loss()
        self.L2_criterion = self.L2_criterion.cuda()
Ejemplo n.º 15
0
    def test_identity_resize(self, batch_shape, device, dtype):
        # create input data
        batch_size, channels, height, width = batch_shape
        patch_src = torch.rand(batch_size, channels, height, width, device=device, dtype=dtype)
        dst_homo_src = utils.create_eye_batch(batch_size, eye_size=3, device=device, dtype=dtype)

        # instantiate warper warp from source to destination
        warper = kornia.HomographyWarper(height // 2, width // 2, align_corners=True)
        patch_dst = warper(patch_src, dst_homo_src)

        # check the corners
        assert_close(patch_src[..., 0, 0], patch_dst[..., 0, 0], atol=1e-4, rtol=1e-4)
        assert_close(patch_src[..., 0, -1], patch_dst[..., 0, -1], atol=1e-4, rtol=1e-4)
        assert_close(patch_src[..., -1, 0], patch_dst[..., -1, 0], atol=1e-4, rtol=1e-4)
        assert_close(patch_src[..., -1, -1], patch_dst[..., -1, -1], atol=1e-4, rtol=1e-4)
Ejemplo n.º 16
0
    def test_warp_grid_translation(self, shape, offset, device):
        # create input data
        height, width = shape
        dst_homo_src = utils.create_eye_batch(batch_size=1,
                                              eye_size=3).to(device)
        dst_homo_src[..., 0, 2] = offset  # apply offset in x

        # instantiate warper
        warper = kornia.HomographyWarper(height,
                                         width,
                                         normalized_coordinates=False)
        flow = warper.warp_grid(dst_homo_src)

        # the grid the src plus the offset should be equal to the flow
        # on the x-axis, y-axis remains the same.
        assert_allclose(warper.grid[..., 0].to(device) + offset, flow[..., 0])
        assert_allclose(warper.grid[..., 1].to(device), flow[..., 1])
Ejemplo n.º 17
0
    def test_rotation(self, batch_shape):
        # create input data
        batch_size, channels, height, width = batch_shape
        patch_src = torch.rand(batch_size, channels, height, width)
        # rotation of 90deg
        dst_homo_src = utils.create_eye_batch(batch_size, 3)
        dst_homo_src[..., 0, 0] = 0.0
        dst_homo_src[..., 0, 1] = 1.0
        dst_homo_src[..., 1, 0] = -1.0
        dst_homo_src[..., 1, 1] = 0.0

        # instantiate warper and warp from source to destination
        warper = kornia.HomographyWarper(height, width)
        patch_dst = warper(patch_src, dst_homo_src)

        # check the corners
        assert_allclose(patch_src[..., 0, 0], patch_dst[..., 0, -1])
        assert_allclose(patch_src[..., 0, -1], patch_dst[..., -1, -1])
        assert_allclose(patch_src[..., -1, 0], patch_dst[..., 0, 0])
        assert_allclose(patch_src[..., -1, -1], patch_dst[..., -1, 0])
Ejemplo n.º 18
0
    def test_homography_warper(self, batch_size, device_type):
        # generate input data
        height, width = 128, 64
        eye_size = 3  # identity 3x3
        device = torch.device(device_type)

        # create checkerboard
        board = utils.create_checkerboard(height, width, 4)
        patch_src = torch.from_numpy(board).view(1, 1, height, width).expand(
            batch_size, 1, height, width)
        patch_src = patch_src.to(device)

        # create base homography
        dst_homo_src = utils.create_eye_batch(batch_size, eye_size).to(device)

        # instantiate warper
        warper = kornia.HomographyWarper(height, width)

        for i in range(self.num_tests):
            # generate homography noise
            homo_delta = torch.zeros_like(dst_homo_src)
            homo_delta[:, -1, -1] = 0.0

            dst_homo_src_i = dst_homo_src + homo_delta

            # transform the points from dst to ref
            patch_dst = warper(patch_src, dst_homo_src_i)
            patch_dst_to_src = warper(patch_dst, torch.inverse(dst_homo_src_i))

            # projected should be equal as initial
            error = utils.compute_patch_error(patch_dst, patch_dst_to_src,
                                              height, width)

            assert error.item() < self.threshold

            # check functional api
            patch_dst_to_src_functional = kornia.homography_warp(
                patch_dst, torch.inverse(dst_homo_src_i), (height, width))

            assert utils.check_equal_torch(patch_dst_to_src,
                                           patch_dst_to_src_functional)
Ejemplo n.º 19
0
def photometric_loss(H_norm, img_fix, img_move):
    b, c, w, h = img_fix.shape
    dev = img_fix.device

    # Since we assumed corner point coordinate is -1~1, Normalization will be given as follows:
    # N = torch.FloatTensor([[2/(w-1),0,-1],[0,2/(h-1),-1],[0,0,1]]).unsqueeze(0)
    # N_inv = torch.inverse(N)
    # # To compensate the negative movement,we should translate image.
    # # T = torch.FloatTensor([[1,0,w],[0,1,h],[0,0,1]]).unsqueeze(0)

    # # Matrix should be have same batch size
    # N = torch.cat([N]*b).to(dev)
    # N_inv = torch.cat([N_inv]*b).to(dev)
    # # T = torch.cat([T]*b).to(dev)

    # # Denormalize
    # H = torch.einsum('bij,bjk->bik',N_inv,torch.einsum('bij,bjk->bik',H_norm,N))

    # # Compensate the translation
    # # H = torch.einsum('bij,bjk->bik',T,H)

    # # fixed image has only translation
    # # img_fix_transformed = kornia.warp_perspective(img_fix,T,(h*3,w*3))
    # img_fix_transformed = img_fix

    # # warp the second image
    # # img_move_transformed = kornia.warp_perspective(img_move,H,(h*3,w*3))
    # img_move_transformed = kornia.warp_perspective(img_move,H,(h,w))
    # # img_move_transformed[img_move_transformed != img_move_transformed]=0

    warper = kornia.HomographyWarper(h, w)
    img_move_transformed = warper(img_move, torch.inverse(H_norm))
    img_fix_transformed = img_fix

    # Compute L1 loss
    loss = F.l1_loss(img_move_transformed,
                     img_fix_transformed,
                     reduction='sum')

    return loss / (2 * h * w), img_move_transformed
Ejemplo n.º 20
0
def HomographyRegressionApp():
    # Training settings
    parser = argparse.ArgumentParser(
        description='Homography Regression with photometric loss.')
    parser.add_argument('--input-dir',
                        type=str,
                        required=True,
                        help='the path to the directory with the input data.')
    parser.add_argument('--output-dir',
                        type=str,
                        required=True,
                        help='the path to output the results.')
    parser.add_argument('--num-iterations',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='number of training iterations (default: 1000)')
    parser.add_argument('--lr',
                        type=float,
                        default=1e-3,
                        metavar='LR',
                        help='learning rate (default: 1e-3)')
    parser.add_argument('--cuda',
                        action='store_true',
                        default=False,
                        help='enables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=666,
                        metavar='S',
                        help='random seed (default: 666)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status',
    )
    parser.add_argument(
        '--log-interval-vis',
        type=int,
        default=100,
        metavar='N',
        help='how many batches to wait before visual logging training status',
    )
    args = parser.parse_args()

    # define the device to use for inference
    use_cuda = args.cuda and torch.cuda.is_available()
    device = torch.device('cuda' if use_cuda else 'cpu')

    torch.manual_seed(args.seed)

    # load the data
    img_src, _ = load_image(os.path.join(args.input_dir, 'img1.ppm'))
    img_dst, _ = load_image(os.path.join(args.input_dir, 'img2.ppm'))

    # instantiate the homography warper from `kornia`
    height, width = img_src.shape[-2:]
    warper = dgm.HomographyWarper(height, width)

    # create the homography as the parameter to be optimized
    dst_homo_src = MyHomography().to(device)

    # create optimizer
    optimizer = optim.Adam(dst_homo_src.parameters(), lr=args.lr)

    # main training loop

    for iter_idx in range(args.num_iterations):
        # send data to device
        img_src, img_dst = img_src.to(device), img_dst.to(device)

        # warp the reference image to the destiny with current homography
        img_src_to_dst = warper(img_src, dst_homo_src())

        # compute the photometric loss
        loss = F.l1_loss(img_src_to_dst, img_dst, reduction='none')

        # propagate the error just for a fixed window
        w_size = 100  # window size
        h_2, w_2 = height // 2, width // 2
        loss = loss[..., h_2 - w_size:h_2 + w_size, w_2 - w_size:w_2 + w_size]
        loss = torch.mean(loss)

        # compute gradient and update optimizer parameters
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if iter_idx % args.log_interval == 0:
            print(
                f'Train iteration: {iter_idx}/{args.num_iterations}\tLoss: {loss.item():.6}'
            )
            print(dst_homo_src.h**o)

        def draw_rectangle(image, dst_homo_src):
            height, width = image.shape[:2]
            pts_src = torch.FloatTensor(
                [[[-1, -1], [1, -1], [1, 1],
                  [-1,
                   1]]]  # top-left  # bottom-left  # bottom-right  # top-right
            ).to(dst_homo_src.device)
            # transform points
            pts_dst = dgm.transform_points(torch.inverse(dst_homo_src),
                                           pts_src)

            def compute_factor(size):
                return 1.0 * size / 2

            def convert_coordinates_to_pixel(coordinates, factor):
                return factor * (coordinates + 1.0)

            # compute conversion factor
            x_factor = compute_factor(width - 1)
            y_factor = compute_factor(height - 1)
            pts_dst = pts_dst.cpu().squeeze().detach().numpy()
            pts_dst[...,
                    0] = convert_coordinates_to_pixel(pts_dst[..., 0],
                                                      x_factor)
            pts_dst[...,
                    1] = convert_coordinates_to_pixel(pts_dst[..., 1],
                                                      y_factor)

            # do the actual drawing
            for i in range(4):
                pt_i, pt_ii = tuple(pts_dst[i % 4]), tuple(pts_dst[(i + 1) %
                                                                   4])
                image = cv2.line(image, pt_i, pt_ii, (255, 0, 0), 3)
            return image

        if iter_idx % args.log_interval_vis == 0:
            # merge warped and target image for visualization
            img_src_to_dst = warper(img_src, dst_homo_src())
            img_vis = 255.0 * 0.5 * (img_src_to_dst + img_dst)
            img_vis_np = dgm.utils.tensor_to_image(img_vis)
            image_draw = draw_rectangle(img_vis_np, dst_homo_src())
            # save warped image to disk
            file_name = os.path.join(args.output_dir, f'warped_{iter_idx}.png')
            cv2.imwrite(file_name, image_draw)