Exemplo n.º 1
0
def test_warp_perspective_rotation(batch_shape, device):
    # generate input data
    batch_size, channels, height, width = batch_shape
    alpha = 0.5 * kornia.pi * torch.ones(batch_size).to(device)  # 90 deg rotation

    # create data patch
    patch = torch.rand(batch_shape).to(device)

    # create transformation (rotation)
    M = torch.eye(3, device=device).repeat(batch_size, 1, 1)  # Bx3x3
    M[:, 0, 0] = torch.cos(alpha)
    M[:, 0, 1] = -torch.sin(alpha)
    M[:, 1, 0] = torch.sin(alpha)
    M[:, 1, 1] = torch.cos(alpha)

    # apply transformation and inverse
    _, _, h, w = patch.shape
    patch_warped = kornia.warp_perspective(patch, M, dsize=(height, width), align_corners=True)
    patch_warped_inv = kornia.warp_perspective(
        patch_warped, torch.inverse(M), dsize=(height, width), align_corners=True)

    # generate mask to compute error
    mask = torch.ones_like(patch)
    mask_warped_inv = kornia.warp_perspective(
        kornia.warp_perspective(patch, M, dsize=(height, width), align_corners=True),
        torch.inverse(M),
        dsize=(height, width), align_corners=True)

    assert_allclose(mask_warped_inv * patch,
                    mask_warped_inv * patch_warped_inv)
Exemplo n.º 2
0
def homography_adaptation(image, probs, model, device, config):
    probs = probs.unsqueeze(0).unsqueeze(0)
    probs[probs.le(config['detection_threshold'])] = 0

    for i in range(config['homography_adaptation']['num']):
        flat_homography = sample_homography(
            image.shape[2:], **config['homography_adaptation']['homographies'])

        warped_image = kornia.warp_perspective(
            image,
            torch.tensor(flat2mat(flat_homography),
                         dtype=torch.float).to(device),
            dsize=(image.shape[2], image.shape[3]))
        warped_prob = model(warped_image)['probs'].unsqueeze(0).unsqueeze(0)

        unwarped_prob = kornia.warp_perspective(
            warped_prob,
            torch.tensor(flat2mat(invert_homography(flat_homography)),
                         dtype=torch.float).to(device),
            dsize=(image.shape[2], image.shape[3]))
        unwarped_prob[unwarped_prob.le(config['detection_threshold'])] = 0

        probs = torch.cat((probs, unwarped_prob))

    if config['homography_adaptation']['aggregation'] == 'sum':
        probs = probs.squeeze().sum(dim=0)

    if 'nms' in config and config['nms']:
        probs = non_maximum_supression(probs, config['nms'],
                                       config['iou_threshold'],
                                       config['top_k'])

    return probs
Exemplo n.º 3
0
def main(args):
    if args.resume !="":
        model = HomographyModel.load_from_checkpoint(args.resume)
    else:
        model_dir = 'lightning_logs/version*'
        model_dir_list = sorted(glob.glob(model_dir))
        model_dir = model_dir_list[-1]
        model_path = osp.join(model_dir, "checkpoints", "*.ckpt")
        model_path_list = sorted(glob.glob(model_path))
        if len(model_path_list) > 0:
            model_path = model_path_list[-1]
            print(model_path)
            # model = HomographyModel.load_from_checkpoint(model_path)
            model = HomographyModel() #test专用,内部重新定义精简版的类
            model_old = torch.load(model_path, map_location=lambda storage, loc: storage)
            # print(model_old.keys())
            # net.load_state_dict(torch.load('path/params.pkl'))
            model.load_state_dict(model_old['state_dict'])
            print(model_path)
            print("model loaded.")
        else:
            raise ValueError(f'No load model!')  #raise Error

    model.eval()  #不训练

    test_set = SyntheticDataset(args.test_path, rho=args.rho, filetype=args.filetype,pic_size=pic_size,patch_size=patch_size)

    #clear last output
    last_output = "figures/*"
    os.system("rm "+last_output)
    print('clear last ok.')
    for i in range(args.n):
        img_a,img_b, patch_a, patch_b, corners, delta = test_set[i]
        # after unsqueeze to save
        # tensors_to_gif(patch_a, patch_b, f"figures/input_{i}.gif")
        tensors_to_gif(img_a, img_b, f"figures/input_{i}.gif")

        #add
        img_a = img_a.unsqueeze(0)
        img_b = img_b.unsqueeze(0)


        ##
        patch_a = patch_a.unsqueeze(0)
        patch_b = patch_b.unsqueeze(0)
        corners = corners.unsqueeze(0)

        corners = corners - corners[:, 0].view(-1, 1, 2)
        delta_hat = model(patch_a, patch_b)
        corners_hat = corners + delta_hat
        #获取h
        h = kornia.get_perspective_transform(corners, corners_hat)
        h_inv = torch.inverse(h)

        patch_b_hat = kornia.warp_perspective(img_a, h_inv, (patch_a.shape[-2],patch_a.shape[-1]))  #128 最初设置 #注意,用img_a / patch_a
        img_b_hat = kornia.warp_perspective(img_a, h_inv, (img_a.shape[-2],img_a.shape[-1]))
        #输出

        tensors_to_gif(patch_b_hat[0], patch_b[0], f"figures/output_patch{i}.gif")
        tensors_to_gif(img_b_hat[0], img_b[0], f"figures/output_{i}.gif")
Exemplo n.º 4
0
    def stitch(self, images, Masks, ratio=0.8, reprojThresh=4.0):
        (image_left, image_center, image_right) = images
        (Maskleft_image, Maskcenter_image, Maskright_image) = Masks

        ##############EXECUTED ONLY ONE TIME##############
        if self.cachedHlc is None or self.cachedHrc is None:

            (kpsLeft, featuresLeft) = self.detectAndDescribe(image_left)
            (kpsCenter, featuresCenter) = self.detectAndDescribe(image_center)
            (kpsRight, featuresRight) = self.detectAndDescribe(image_right)

            M_left_center = self.matchKeypoints(kpsLeft, kpsCenter,
                                                featuresLeft, featuresCenter,
                                                ratio, reprojThresh)
            M_right_center = self.matchKeypoints(kpsRight, kpsCenter,
                                                 featuresRight, featuresCenter,
                                                 ratio, reprojThresh)

            #if M_left_center is None or M_right_center is None:
            #	return None
            self.cachedHlc = M_left_center[1]
            self.cachedHrc = M_right_center[1]

        ##################################################

        import kornia

        result_width = 3200
        T = np.array(
            [[1.0, 0.0, (result_width / 2) - (images[0].shape[1] / 2)],
             [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])

        ## warp the original image by the found transform
        #data_warp: torch.tensor = kornia.warp_perspective(data.float(), M, dsize=(h, w))

        transformations = [
            self.cachedHlc,
            np.identity(3, dtype=np.float32), self.cachedHrc
        ]
        result = np.zeros(
            (Masks[0].shape[0], result_width, 3)).astype(np.float32)
        weights = np.zeros_like(result)

        for i in range(len(Masks)):
            warp = kornia.warp_perspective(
                Masks[i], np.dot(T, transformations[i]),
                (result_width, images[i].shape[0])).astype(np.float32)
            weight = kornia.warp_perspective(
                np.ones_like(Masks[i]), np.dot(T, transformations[i]),
                (result_width, images[i].shape[0])).astype(np.float32)
            result = cv2.addWeighted(result, 1.0, warp, 1.0, 0.0)
            weights = cv2.addWeighted(weights, 1.0, weight, 1.0, 0.0)

        return np.uint8(result / weights)
Exemplo n.º 5
0
    def forward(self, x1, x2, h_matrix):
        #定义结构
        y1, g1_1, g1_2, g1_3 = self.encoder1(x1)
        z1 = self._h_a1(y1)
        #print(z1.device)
        z1_hat, z1_likelihoods = self.entropy_bottleneck1(z1)
        gmm1 = self._h_s1(z1_hat)  #三要素
        y1_hat, y1_likelihoods = self.gaussian1(y1, gmm1[0], gmm1[1],
                                                gmm1[2])  # sigma

        # #save
        # save_y1_hat = y1_hat.cpu().numpy()
        # data = {"y1_hat":save_y1_hat}
        # np.save('y1_hat.npy',data)
        #
        # raise ValueError("stop")
        # #end save

        x1_hat, g1_4, g1_5, g1_6 = self.decoder1(y1_hat)

        #############################################
        #encoder
        x1_warp = kornia.warp_perspective(x1, h_matrix,
                                          (x1.size()[-2], x1.size()[-1]))
        y2 = self.encoder2(x1_warp, x2)
        ##end encoder

        # hyper for pic2
        z2 = self._h_a2(y2)
        z2_hat, z2_likelihoods = self.entropy_bottleneck2(z2)
        gmm2 = self._h_s2(z2_hat, y1_hat)  # 三要素

        y2_hat, y2_likelihoods = self.gaussian2(y2, gmm2[0], gmm2[1],
                                                gmm2[2])  # 这里也是临时,待改gmm
        # end hyper for pic2

        ##decoder
        x1_hat_warp = kornia.warp_perspective(
            x1_hat, h_matrix, (x1_hat.size()[-2], x1_hat.size()[-1]))
        x2_hat = self.decoder2(y2_hat, x1_hat_warp)
        #end decoder
        # print(x1.size())

        return {
            'x1_hat': x1_hat,
            'x2_hat': x2_hat,
            'likelihoods': {
                'y1': y1_likelihoods,
                'y2': y2_likelihoods,
                'z1': z1_likelihoods,
                'z2': z2_likelihoods,
            }
        }
Exemplo n.º 6
0
    def forward(self, x1_hat, x2_hat, h_matrix):
        x1_hat_warp = kornia.warp_perspective(
            x1_hat, h_matrix, (x1_hat.size()[-2], x1_hat.size()[-1]))
        # end decoder
        # print(x1.size())
        h_inv = torch.inverse(h_matrix)
        x2_hat_warp = kornia.warp_perspective(
            x2_hat, h_inv, (x2_hat.size()[-2], x2_hat.size()[-1]))

        # 增强后
        x1_hat2 = self.EH1(x1_hat, x2_hat_warp)
        x2_hat2 = self.EH2(x2_hat, x1_hat_warp)

        return {'x1_hat': x1_hat2, 'x2_hat': x2_hat2}
Exemplo n.º 7
0
    def __call__(self, sample: dict) -> dict:
        image = sample['image'].unsqueeze(0)

        if 'label' in sample.keys():
            label = sample['label'].float().unsqueeze(0).unsqueeze(0)

        B, _, H, W = image.shape
        rotated_image, transform = self.rotate(image)

        if 'label' in sample.keys():
            rotated_label = K.warp_perspective(label,
                                               transform,
                                               dsize=(H, W),
                                               flags='nearest')
            rotated_label = rotated_label.int()

            sample.update({
                'image': rotated_image[0, ...],
                'label': rotated_label[0, 0, ...],
            })

        else:
            sample.update({
                'image': rotated_image[0, ...],
            })

        return sample
Exemplo n.º 8
0
    def test_crop(self, batch_size, channels, device, dtype):
        # generate input data
        src_h, src_w = 3, 3
        dst_h, dst_w = 3, 3

        # [x, y] origin
        # top-left, top-right, bottom-right, bottom-left
        points_src = torch.tensor(
            [[[0, 0], [0, src_w - 1], [src_h - 1, src_w - 1], [src_h - 1, 0]]],
            device=device,
            dtype=dtype)

        # [x, y] destination
        # top-left, top-right, bottom-right, bottom-left
        points_dst = torch.tensor(
            [[[0, 0], [0, dst_w - 1], [dst_h - 1, dst_w - 1], [dst_h - 1, 0]]],
            device=device,
            dtype=dtype)

        # compute transformation between points
        dst_trans_src = kornia.get_perspective_transform(
            points_src, points_dst).expand(batch_size, -1, -1)

        # warp tensor
        patch = torch.tensor([[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
                                [13, 14, 15, 16]]]],
                             device=device,
                             dtype=dtype).expand(batch_size, channels, -1, -1)

        expected = patch[..., :3, :3]

        # warp and assert
        patch_warped = kornia.warp_perspective(patch, dst_trans_src,
                                               (dst_h, dst_w))
        assert_close(patch_warped, expected)
Exemplo n.º 9
0
 def test_cardinality(self, device, dtype, batch_shape, out_shape):
     batch_size, channels, height, width = batch_shape
     h_out, w_out = out_shape
     img_b = torch.rand(batch_size, channels, height, width, device=device, dtype=dtype)
     H_ab = kornia.eye_like(3, img_b)
     img_a = kornia.warp_perspective(img_b, H_ab, (h_out, w_out))
     assert img_a.shape == (batch_size, channels, h_out, w_out)
Exemplo n.º 10
0
    def forward(self, images):
        batch_size = images.shape[0]

        data = []
        for i in range(6):
            img_warp = kornia.warp_perspective(
                images[:, i, :, :, :],
                self.M_matrices[i].unsqueeze(0).repeat(batch_size, 1, 1),
                dsize=(204, 306))
            img_warp = kornia.center_crop(img_warp, (192, 288))
            out = self.darknet(img_warp)
            data.append(out.unsqueeze(0))

        agg = torch.cat(data, dim=0)
        agg = torch.max(agg, dim=0)[0]
        agg = agg.view(agg.size(0), 1024, -1)
        agg = self.lin1(agg)

        boxes = agg.view(agg.size(0), -1)
        boxes = self.lin2(boxes)
        boxes = self.classifier(boxes)
        boxes = boxes.view(-1, self.feature_size, self.feature_size,
                           5 * self.num_bboxes)

        return boxes
Exemplo n.º 11
0
 def forward(self, x, M_matrices):
     data = []  #list to store all the features maps from multi-views
     #use shared weights CNN for 6 views
     for i in range(6):
         #get a batch of *same* view images
         img_batch = x[:, i, :, :, :]  #torch.stack(x)[:,i,:,:,:]
         if self.BEV:  #perform BEV transform: M - (batch_size, 3, 3)
             img_warp = kornia.warp_perspective(
                 img_batch,
                 M_matrices[i].unsqueeze(0).repeat(len(x), 1, 1),
                 dsize=(219, 306))
             x1 = self.inc(img_warp)
         else:
             x1 = self.inc(img_batch)
         x2 = self.down1(x1)
         x3 = self.down2(x2)
         x4 = self.down3(x3)
         #x5 = self.down4(x4)
         data.append(x4.unsqueeze(0))
     data = torch.cat(data, dim=0)
     #max pool feature maps from multi-view.
     agg = torch.max(
         data, dim=0
     )[0]  #get the max values among 6 views, shape: [batch_size, 512, 16, 19]
     #interpolate up
     x = self.up1(agg)
     x = self.up2(x)
     #x = self.up3(x)
     x = self.up_map(x)  #last one to match output 800x800
     x = self.outc(x)
     return x
Exemplo n.º 12
0
    def _convert(self):

        src_img = kornia.image_to_tensor(self.src_img, keepdim=False)

        dst_h, dst_w = 800, 800

        # Compute perspective transform
        M = kornia.get_perspective_transform(self.points_src, self.points_dst)

        # Image to BEV transformation
        dst_img = kornia.warp_perspective(src_img.float(),
                                          M,
                                          dsize=(dst_h, dst_w),
                                          flags='bilinear',
                                          border_mode='zeros')

        # remove unwanted portion of BEV image. e.g for FRONT view dst point should not be higher than 450.
        if 'FRONT' in self.path:
            dst_img[:, :, 400:, :] = 0

        if 'BACK' in self.path:
            dst_img[:, :, :400, :] = 0

        if 'LEFT' in self.path:
            dst_img[:, :, :, 400:] = 0

        if 'RIGHT' in self.path:
            dst_img[:, :, :, :400] = 0

        dst_img = kornia.tensor_to_image(dst_img.byte())
        return dst_img
Exemplo n.º 13
0
def warp(pert_tensor, bbox_src, bbox_dest):
    '''
    Input: pert_tensor : Tensor (3, W, H)
           bbox_src and bbox_dest: (B, 4)
    Output: Tensor (B, 3, W, H)
    '''

    if type(bbox_src) == torch.Tensor:
        bbox_src = bbox_src.cpu()
        bbox_dest = bbox_dest.cpu()
    bbox_src = np.array(bbox_src).reshape(-1, 4)
    bbox_dest = np.array(bbox_dest).reshape(-1, 4)

    masks = list()
    for i in range(bbox_src.shape[0]):
        x, y, w, h = bbox_src[i]
        points_src = torch.FloatTensor([[
            [x, y],
            [x + w, y],
            [x, y + h],
            [x + w, y + h],
        ]])
        x, y, w, h = bbox_dest[i]
        points_dst = torch.FloatTensor([[
            [x, y],
            [x + w, y],
            [x, y + h],
            [x + w, y + h],
        ]])

        M = kornia.get_perspective_transform(points_src,
                                             points_dst).to(pert_tensor.device)
        size = pert_tensor.shape[-2:]
        masks.append(kornia.warp_perspective(pert_tensor, M, size))
    return torch.cat(masks)
Exemplo n.º 14
0
def photometric_loss(delta, img_a, patch_b, corners):
    corners_hat = corners + delta

    # in order to apply transform and center crop,
    # subtract points by top-left corner (corners[N, 0])
    # print("ori_corners:",corners[0:1])
    corners = corners - corners[:, 0].view(-1, 1,
                                           2)  #关键!!!!区分大变换还是小变换!!!!!!!!!!!!!

    h = kornia.get_perspective_transform(corners, corners_hat)

    h_inv = torch.inverse(h)  #求逆矩阵
    # patch_b_hat = kornia.warp_perspective(img_a, h_inv, (128, 128))
    patch_b_hat = kornia.warp_perspective(
        img_a, h_inv, (patch_b.shape[-2], patch_b.shape[-1]))
    # patch_b_hat2 = kornia.warp_perspective(img_a, h_inv, (img_a.shape[-2],img_a.shape[-1]))
    # print("corners:",corners[0:1])
    # print("corners_hat:",corners_hat[0:1])
    # print("H:",h[0:1])
    # print(img_a.size())
    # print(patch_b.size())
    # print(patch_b_hat.size())
    #
    # save_pic(img_a[0:1, :], "img_a.png")
    # save_pic(patch_b[0:1, :], "patch_b.png")
    # save_pic(patch_b_hat[0:1, :], "patch_b_hat.png")
    # save_pic(patch_b_hat2[0:1, :], "patch_b_hat2.png")
    # raise  ValueError("print size")

    return F.l1_loss(patch_b_hat, patch_b)
Exemplo n.º 15
0
    def forward(self, imgs, visualize=False):
        B, N, C, H, W = imgs.shape
        assert N == self.num_cam
        world_features = []
        imgs_result = []
        for cam in range(self.num_cam):
            img_feature = self.base_pt1(imgs[:, cam].to('cuda:1'))
            img_feature = self.base_pt2(img_feature.to('cuda:0'))
            img_feature = F.interpolate(img_feature,
                                        self.upsample_shape,
                                        mode='bilinear')
            img_res = self.img_classifier(img_feature.to('cuda:0'))
            imgs_result.append(img_res)
            proj_mat = self.proj_mats[cam].repeat([B, 1,
                                                   1]).float().to('cuda:0')
            # head, *foot*
            world_feature = kornia.warp_perspective(
                img_res[:, 1].unsqueeze(1).to('cuda:0'), proj_mat,
                self.reducedgrid_shape)
            if visualize:
                plt.imshow(img_res[0, 0].detach().cpu().numpy())
                plt.show()
                plt.imshow(world_feature[0, 0].detach().cpu().numpy())
                plt.show()
            world_features.append(world_feature.to('cuda:0'))

        world_features = torch.cat(
            world_features +
            [self.coord_map.repeat([B, 1, 1, 1]).to('cuda:0')],
            dim=1)
        map_result = self.map_classifier(world_features.to('cuda:0'))
        map_result = F.interpolate(map_result,
                                   self.reducedgrid_shape,
                                   mode='bilinear')
        return map_result, imgs_result
Exemplo n.º 16
0
def stitch(x, M_matrices, M_rotations, M_flip, label=True):
    #Preprocessing: image stitch
    data = []  #list to store all the features maps from multi-views
    for i in range(6):
        #get a batch of *same* view images
        img_batch = x[:, i, :, :, :]  # torch.stack(x)[:,i,:,:,:] #
        img_warp = kornia.warp_perspective(img_batch,
                                           M_matrices[i].unsqueeze(0).repeat(
                                               len(x), 1, 1),
                                           dsize=(219, 306))
        img_rotated = kornia.warp_affine(img_warp,
                                         M_rotations[i].unsqueeze(0).repeat(
                                             len(x), 1, 1),
                                         dsize=(219, 306))
        data.append(img_rotated)

    data = torch.cat(data, dim=0).view(6, len(x), 3, 219, 306)
    #max pool feature maps from multi-view:black canvas and ensemble
    h, w = 219, 306
    #print(h,w)
    agg = torch.zeros((x.shape[0], 3, 2 * h,
                       2 * w))  #[batch_size, 3 ,h, w], twice width/height
    if torch.cuda.is_available():
        agg = agg.cuda()
    #two bases: front and back view
    agg[:, :, 0:h, (w - w // 2):(w + w // 2)] = data[1]
    agg[:, :, h:, (w - w // 2):(w + w // 2)] = data[4]
    #top left
    agg[:, :, (0 + 55):(h + 55), (0 + 55):(w + 55)] = torch.max(
        data[0], agg[:, :, (0 + 55):(h + 55), (0 + 55):(w + 55)])
    #top right
    agg[:, :, (0 + 55):(h + 55), (w - 55):(-55)] = torch.max(
        data[2], agg[:, :, (0 + 55):(h + 55), (w - 55):(-55)])
    #bottom left
    agg[:, :, (h - 55):(-55), (0 + 55):(w + 55)] = torch.max(
        data[3], agg[:, :, (h - 55):(-55), (0 + 55):(w + 55)])
    #bottom right
    agg[:, :, (h - 55):(-55),
        (w - 55):(-55)] = torch.max(data[5], agg[:, :, (h - 55):(-55),
                                                 (w - 55):(-55)])

    #center-crop
    crop_fn = kornia.augmentation.CenterCrop(size=438)
    agg = crop_fn(agg)

    #flip 90 degree
    agg = kornia.warp_affine(agg,
                             M_flip.repeat(len(x), 1, 1),
                             dsize=(438, 438))

    #Normalize color
    if label:
        normalize = K.Normalize(torch.tensor([0.698, 0.718, 0.730]),
                                torch.tensor([0.322, 0.313, 0.308]))
    else:
        normalize = K.Normalize(torch.tensor([0.548, 0.597, 0.630]),
                                torch.tensor([0.339, 0.340, 0.342]))

    return normalize(agg)
Exemplo n.º 17
0
    def forward(self, x, M_matrices, M_rotations):
        #Preprocessing: image stitch
        data = []  #list to store all the features maps from multi-views
        for i in range(6):
            #get a batch of *same* view images
            img_batch = x[:, i, :, :, :]  #torch.stack(x)[:,i,:,:,:]
            img_warp = kornia.warp_perspective(
                img_batch,
                M_matrices[i].unsqueeze(0).repeat(len(x), 1, 1),
                dsize=(219, 306))
            img_rotated = kornia.warp_affine(
                img_warp,
                M_rotations[i].unsqueeze(0).repeat(len(x), 1, 1),
                dsize=(219, 306))
            data.append(img_rotated)

        data = torch.cat(data, dim=0).view(6, len(x), 3, 219, 306)
        #max pool feature maps from multi-view:black canvas and ensemble
        h, w = 219, 306
        #print(h,w)
        agg = torch.zeros((x.shape[0], 3, 2 * h,
                           2 * w))  #[batch_size, 3 ,h, w], twice width/height
        if torch.cuda.is_available():
            agg = agg.cuda()
        #two bases: front and back view
        agg[:, :, 0:h, (w - w // 2):(w + w // 2)] = data[1]
        agg[:, :, h:, (w - w // 2):(w + w // 2)] = data[4]
        #top left
        agg[:, :, (0 + 55):(h + 55), (0 + 55):(w + 55)] = torch.max(
            data[0], agg[:, :, (0 + 55):(h + 55), (0 + 55):(w + 55)])
        #top right
        agg[:, :, (0 + 55):(h + 55), (w - 55):(-55)] = torch.max(
            data[2], agg[:, :, (0 + 55):(h + 55), (w - 55):(-55)])
        #bottom left
        agg[:, :, (h - 55):(-55), (0 + 55):(w + 55)] = torch.max(
            data[3], agg[:, :, (h - 55):(-55), (0 + 55):(w + 55)])
        #bottom right
        agg[:, :, (h - 55):(-55), (w - 55):(-55)] = torch.max(
            data[5], agg[:, :, (h - 55):(-55), (w - 55):(-55)])

        #center-crop
        crop_fn = kornia.augmentation.CenterCrop(size=438)
        agg = crop_fn(agg)

        ###CNN: convolve down
        x1 = self.inc(agg)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x4 = self.down3(
            x3
        )  #shape:[batch_size, 256, 255, 38], scale_factor around 8; pixel shift around 55/8 = 55

        ###CNN: interpolate up
        x = self.up1(x4)
        x = self.up2(x)
        #x = self.up3(x)
        x = self.up_map(x)  #last one to match output 800x800
        x = self.outc(x)
        return x
Exemplo n.º 18
0
def test_warp_perspective_rotation(batch_shape, device):
    # generate input data
    batch_size, channels, height, width = batch_shape
    alpha = 0.5 * kornia.pi * torch.ones(batch_size).to(
        device)  # 90 deg rotation

    # create data patch
    patch = torch.rand(batch_shape).to(device)

    # create transformation (rotation)
    M = torch.eye(3, device=device).repeat(batch_size, 1, 1)  # Bx3x3
    M[:, 0, 0] = torch.cos(alpha)
    M[:, 0, 1] = -torch.sin(alpha)
    M[:, 1, 0] = torch.sin(alpha)
    M[:, 1, 1] = torch.cos(alpha)

    # apply transformation and inverse
    _, _, h, w = patch.shape
    patch_warped = kornia.warp_perspective(patch,
                                           M,
                                           dsize=(height, width),
                                           align_corners=True)
    patch_warped_inv = kornia.warp_perspective(patch_warped,
                                               torch.inverse(M),
                                               dsize=(height, width),
                                               align_corners=True)

    # generate mask to compute error
    mask = torch.ones_like(patch)
    mask_warped_inv = kornia.warp_perspective(kornia.warp_perspective(
        patch, M, dsize=(height, width), align_corners=True),
                                              torch.inverse(M),
                                              dsize=(height, width),
                                              align_corners=True)

    assert_allclose(mask_warped_inv * patch,
                    mask_warped_inv * patch_warped_inv)

    # evaluate function gradient
    patch = utils.tensor_to_gradcheck_var(patch)  # to var
    M = utils.tensor_to_gradcheck_var(M, requires_grad=False)  # to var
    assert gradcheck(kornia.warp_perspective, (patch, M, (
        height,
        width,
    )),
                     raise_exception=True)
Exemplo n.º 19
0
    def test_exception(self, device, dtype):
        img = torch.rand(1, 2, 3, 4, device=device, dtype=dtype)
        h**o = torch.eye(3, device=device, dtype=dtype)[None]
        size = (4, 5)

        with pytest.raises(TypeError):
            assert kornia.warp_perspective(0., h**o, size)

        with pytest.raises(TypeError):
            assert kornia.warp_perspective(img, 0., size)

        with pytest.raises(ValueError):
            img = torch.rand(2, 3, 4, device=device, dtype=dtype)
            assert kornia.warp_perspective(img, h**o, size)

        with pytest.raises(ValueError):
            h**o = torch.eye(2, 2, device=device, dtype=dtype)[None]
            assert kornia.warp_perspective(img, h**o, size)
Exemplo n.º 20
0
    def test_crop_center_resize(self, device, dtype):
        # generate input data
        dst_h, dst_w = 4, 4

        # [x, y] origin
        # top-left, top-right, bottom-right, bottom-left
        points_src = torch.tensor([[
            [1, 1],
            [1, 2],
            [2, 2],
            [2, 1],
        ]],
                                  device=device,
                                  dtype=dtype)

        # [x, y] destination
        # top-left, top-right, bottom-right, bottom-left
        points_dst = torch.tensor([[
            [0, 0],
            [0, dst_w - 1],
            [dst_h - 1, dst_w - 1],
            [dst_h - 1, 0],
        ]],
                                  device=device,
                                  dtype=dtype)

        # compute transformation between points
        dst_trans_src = kornia.get_perspective_transform(
            points_src, points_dst)

        # warp tensor
        patch = torch.tensor([[[
            [1, 2, 3, 4],
            [5, 6, 7, 8],
            [9, 10, 11, 12],
            [13, 14, 15, 16],
        ]]],
                             device=device,
                             dtype=dtype)

        expected = torch.tensor([[[[5.1667, 5.6111, 6.0556, 6.5000],
                                   [6.9444, 7.3889, 7.8333, 8.2778],
                                   [8.7222, 9.1667, 9.6111, 10.0556],
                                   [10.5000, 10.9444, 11.3889, 11.8333]]]],
                                device=device,
                                dtype=dtype)

        # warp and assert
        patch_warped = kornia.warp_perspective(patch, dst_trans_src,
                                               (dst_h, dst_w))
        assert_allclose(patch_warped, expected, rtol=1e-4, atol=1e-4)

        # check jit
        patch_warped_jit = kornia.jit.warp_perspective(patch, dst_trans_src,
                                                       (dst_h, dst_w))
        assert_allclose(patch_warped, patch_warped_jit, rtol=1e-4, atol=1e-4)
Exemplo n.º 21
0
    def test_crop(self, batch_size, channels, device, dtype):
        # generate input data
        src_h, src_w = 3, 3
        dst_h, dst_w = 3, 3

        # [x, y] origin
        # top-left, top-right, bottom-right, bottom-left
        points_src = torch.tensor([[
            [0, 0],
            [0, src_w - 1],
            [src_h - 1, src_w - 1],
            [src_h - 1, 0],
        ]],
                                  device=device,
                                  dtype=dtype)

        # [x, y] destination
        # top-left, top-right, bottom-right, bottom-left
        points_dst = torch.tensor([[
            [0, 0],
            [0, dst_w - 1],
            [dst_h - 1, dst_w - 1],
            [dst_h - 1, 0],
        ]],
                                  device=device,
                                  dtype=dtype)

        # compute transformation between points
        dst_trans_src = kornia.get_perspective_transform(
            points_src, points_dst).expand(batch_size, -1, -1)

        # warp tensor
        patch = torch.tensor([[[
            [1, 2, 3, 4],
            [5, 6, 7, 8],
            [9, 10, 11, 12],
            [13, 14, 15, 16],
        ]]],
                             device=device,
                             dtype=dtype).expand(batch_size, channels, -1, -1)

        expected = torch.tensor(
            [[[[0.2500, 0.9167, 1.5833], [2.1667, 5.1667, 6.5000],
               [4.8333, 10.5000, 11.8333]]]],
            device=device,
            dtype=dtype).repeat(batch_size, channels, 1, 1)

        # warp and assert
        patch_warped = kornia.warp_perspective(patch, dst_trans_src,
                                               (dst_h, dst_w))
        assert_allclose(patch_warped, expected, rtol=1e-4, atol=1e-4)

        # check jit
        patch_warped_jit = kornia.jit.warp_perspective(patch, dst_trans_src,
                                                       (dst_h, dst_w))
        assert_allclose(patch_warped, patch_warped_jit, rtol=1e-4, atol=1e-4)
Exemplo n.º 22
0
 def sample(self, idx, points_source):
     r = self.dataloader._all_records.iloc[idx]
     image_id = r['Image']
     _device = points_source.device
     img = self.dataloader.read_image(image_id, r['flipped'])
     img = torchvision.transforms.functional.to_tensor(img).to(_device)
     px = points_source[:,0] * img.shape[1]
     py = points_source[:,1] * img.shape[2]
     p = torch.stack([px, py], dim=1)
     M = kornia.get_perspective_transform(p.unsqueeze(0), self.points_dest.to(_device))
     return kornia.warp_perspective(img.unsqueeze(0), M, dsize=(self.h, self.w))[0]
Exemplo n.º 23
0
 def test_smoke(self, device, dtype):
     batch_size, channels, height, width = 1, 2, 3, 4
     img_b = torch.rand(batch_size,
                        channels,
                        height,
                        width,
                        device=device,
                        dtype=dtype)
     H_ab = kornia.eye_like(3, img_b)
     img_a = kornia.warp_perspective(img_b, H_ab, (height, width))
     assert_allclose(img_b, img_a)
Exemplo n.º 24
0
    def test_rotation_inverse(self, device, dtype):
        h, w = 4, 4
        img_b = torch.rand(1, 1, h, w, device=device, dtype=dtype)

        # create rotation matrix of 90deg (anti-clockwise)
        center = torch.tensor([[w - 1, h - 1]], device=device, dtype=dtype) / 2
        scale = torch.ones((1, 2), device=device, dtype=dtype)
        angle = 90. * torch.ones(1, device=device, dtype=dtype)
        aff_ab = kornia.get_rotation_matrix2d(center, angle, scale)
        # Same as opencv: cv2.getRotationMatrix2D(((w-1)/2,(h-1)/2), 90., 1.)

        H_ab = kornia.convert_affinematrix_to_homography(aff_ab)  # Bx3x3

        # warp the tensor
        # Same as opencv: cv2.warpPerspecive(kornia.tensor_to_image(img_b), H_ab[0].numpy(), (w, h))
        img_a = kornia.warp_perspective(img_b, H_ab, (h, w))

        # invert the transform
        H_ba = torch.inverse(H_ab)
        img_b_hat = kornia.warp_perspective(img_a, H_ba, (h, w))
        assert_allclose(img_b_hat, img_b, rtol=1e-4, atol=1e-4)
Exemplo n.º 25
0
    def get_warped_3WW(self, photos_63hw, return_up=False, offset=5):
        img_warp_63WW = torch.zeros(6, 3, n_W, n_W)
        for idx, (M, photo_3hw) in enumerate(
                zip(self.CAM_matrices_6_3_3, photos_63hw)):
            img_warp_63WW[idx] = kornia.warp_perspective(self._clip_photo(
                photo_3hw[None], return_up=return_up, offset=offset),
                                                         torch.Tensor(M),
                                                         dsize=(n_W, n_W))[0]

        count_3WW = (img_warp_63WW > 0).sum(0)
        mean_img_warp_3WW = img_warp_63WW.sum(0) / (1e-6 + count_3WW)
        return torch.flip(mean_img_warp_3WW,
                          (1, 2)) if return_up else mean_img_warp_3WW
Exemplo n.º 26
0
def photometric_loss(delta, img_a, patch_b, corners):
    corners_hat = corners + delta

    # in order to apply transform and center crop,
    # subtract points by top-left corner (corners[N, 0])
    corners = corners - corners[:, 0].view(-1, 1, 2)

    h = kornia.get_perspective_transform(corners, corners_hat)

    h_inv = torch.inverse(h)
    patch_b_hat = kornia.warp_perspective(img_a, h_inv, (128, 128))

    return F.l1_loss(patch_b_hat, patch_b)
Exemplo n.º 27
0
    def test_crop_center_resize(self, device, dtype):
        # generate input data
        dst_h, dst_w = 4, 4

        # [x, y] origin
        # top-left, top-right, bottom-right, bottom-left
        points_src = torch.tensor([[
            [1, 1],
            [1, 2],
            [2, 2],
            [2, 1],
        ]],
                                  device=device,
                                  dtype=dtype)

        # [x, y] destination
        # top-left, top-right, bottom-right, bottom-left
        points_dst = torch.tensor([[
            [0, 0],
            [0, dst_w - 1],
            [dst_h - 1, dst_w - 1],
            [dst_h - 1, 0],
        ]],
                                  device=device,
                                  dtype=dtype)

        # compute transformation between points
        dst_trans_src = kornia.get_perspective_transform(
            points_src, points_dst)

        # warp tensor
        patch = torch.tensor([[[
            [1, 2, 3, 4],
            [5, 6, 7, 8],
            [9, 10, 11, 12],
            [13, 14, 15, 16],
        ]]],
                             device=device,
                             dtype=dtype)

        expected = torch.tensor([[[[6.0000, 6.3333, 6.6667, 7.0000],
                                   [7.3333, 7.6667, 8.0000, 8.3333],
                                   [8.6667, 9.0000, 9.3333, 9.6667],
                                   [10.0000, 10.3333, 10.6667, 11.0000]]]],
                                device=device,
                                dtype=dtype)

        # warp and assert
        patch_warped = kornia.warp_perspective(patch, dst_trans_src,
                                               (dst_h, dst_w))
        assert_allclose(patch_warped, expected)
Exemplo n.º 28
0
    def test_translation(self, device, dtype):
        offset = 1.
        h, w = 3, 4

        img_b = torch.arange(float(h * w), device=device, dtype=dtype).view(1, 1, h, w)
        homo_ab = kornia.eye_like(3, img_b)
        homo_ab[..., :2, -1] += offset

        expected = torch.zeros_like(img_b)
        expected[..., 1:, 1:] = img_b[..., :2, :3]

        # Same as opencv: cv2.warpPerspective(kornia.tensor_to_image(img_b), homo_ab[0].numpy(), (w, h))
        img_a = kornia.warp_perspective(img_b, homo_ab, (h, w))
        assert_allclose(img_a, expected, atol=1e-4, rtol=1e-4)
Exemplo n.º 29
0
def warp_transform(imgs, M_matrices):
    '''
  input: tuple of tensor, each element is [6,3,256,306]
  for eval: imgs is one element only
  '''
    data = []
    for i in range(6):  #loop through each view
        img_batch = imgs[:, i, :, :, :].cuda()
        img_batch = kornia.warp_perspective(img_batch,
                                            M_matrices[i],
                                            dsize=(204, 306))
        img_batch = kornia.center_crop(img_batch, (192, 288))
        data.append(img_batch.unsqueeze(0))
    return torch.cat(data, dim=0)
Exemplo n.º 30
0
    def stn(self, x, ret_theta=False):
        points_src = self.locnet(x)
        points_src = torch.clamp(points_src, -1, 1)
        points_src = (points_src + 1.) * 63.5  # rescale to actual coordinate
        points_src = points_src.view(-1, 4, 2)

        B = points_src.shape[0]
        points_dst = self.points_dst.repeat(B, 1, 1)

        M = kornia.get_perspective_transform(points_src, points_dst)
        x = kornia.warp_perspective(x, M, dsize=(self.h, self.w))

        if ret_theta: return x, points_src
        return x