コード例 #1
0
def test_homographic_adaptation(weights_path, image_path):
    homo_config = HomographyConfig()
    settings = SuperPointSettings()
    settings.cuda = True
    net = SuperPoint(settings)
    net.eval()
    load_checkpoint_for_inference(weights_path, net)

    image = torchvision.io.read_image(image_path,
                                      torchvision.io.image.ImageReadMode.GRAY)
    image = image.to(dtype=torch.float32) / 255.

    # ratio preserving resize
    _, img_h, img_w = image.shape
    scale_h = 240 / img_h
    scale_w = 320 / img_w
    scale_max = max(scale_h, scale_w)
    new_size = [int(img_h * scale_max), int(img_w * scale_max)]
    image = F.resize(
        image,
        new_size,
        interpolation=torchvision.transforms.InterpolationMode.BILINEAR)
    image = F.center_crop(image, [240, 320])

    image.unsqueeze_(dim=0)  # add batch dimension

    if settings.cuda:
        net = net.cuda()
        image = image.cuda()

    img_h, img_w = image.shape[2], image.shape[3]

    probs_with_adaptation = homography_adaptation(image, net, homo_config)
    points_with_adaptation = get_points(probs_with_adaptation, img_h, img_w,
                                        settings).T

    probs, _, _ = net(image)
    points = get_points(probs, img_h, img_w, settings).T

    # Draw result
    original_img = image.squeeze(dim=0).permute(1, 2, 0).data.cpu()
    original_img = cv2.UMat(original_img.numpy())
    original_img_with_adaptation = image.squeeze(dim=0).permute(1, 2,
                                                                0).data.cpu()
    original_img_with_adaptation = cv2.UMat(
        original_img_with_adaptation.numpy())

    draw_points(original_img, points, color=(255, 255, 255))
    draw_points(original_img_with_adaptation,
                points_with_adaptation,
                color=(255, 255, 255))

    cv2.imshow("Image", original_img)
    cv2.imshow("Adaptation", original_img_with_adaptation)
    key = cv2.waitKey(delay=0)
コード例 #2
0
 def add_image_summary(self, name, image, prob_map, labels):
     img_h = image.shape[2]
     img_w = image.shape[3]
     points = get_points(prob_map[0, :, :].unsqueeze(dim=0).cpu(), img_h, img_w, self.settings)
     true_prob_map = make_prob_map_from_labels(labels[0, :, :].cpu().numpy(), img_h, img_w,
                                               self.settings.cell)
     true_points = get_points(true_prob_map[0, :, :].unsqueeze(dim=0), img_h, img_w, self.settings)
     frame = image[0, :, :, :].cpu().numpy()
     res_img = (frame * 255.).astype('uint8')
     res_img = np.transpose(res_img, [1, 2, 0])  # OpenCV format
     res_img = cv2.UMat(res_img)
     for point in points.T:
         point_int = (int(round(point[0])), int(round(point[1])))
         cv2.circle(res_img, point_int, 3, (255, 0, 0), -1, lineType=16)
     for point in true_points.T:
         point_int = (int(round(point[0])), int(round(point[1])))
         cv2.circle(res_img, point_int, 1, (0, 255, 0), -1, lineType=16)
     self.summary_writer.add_image(f'Detector {name} result/train', res_img.get().transpose([2, 0, 1]),
                                   self.global_train_index)
コード例 #3
0
ファイル: synthetic-test.py プロジェクト: Kolkir/superpoint
def show_data(name, image, point_labels, color, settings):
    img_h, img_w = image.shape[1:]
    prob_map = make_prob_map_from_labels(point_labels.numpy(), img_h, img_w,
                                         settings.cell)
    points = get_points(prob_map, img_h, img_w, settings)
    points = points.T
    # Draw result
    original_img = image.permute(1, 2, 0).data.cpu()
    original_img = original_img.numpy()
    original_img = cv2.UMat(original_img)
    draw_points(original_img, points, color=color)
    cv2.imshow(name, original_img)
コード例 #4
0
    def add_mask_image_summary(self, name, mask, labels, prob_map):
        img_h = prob_map.shape[1]
        img_w = prob_map.shape[2]
        points = get_points(prob_map[0, :, :].unsqueeze(dim=0).cpu(), img_h, img_w, self.settings)
        points = points.T
        points[:, [0, 1]] = points[:, [1, 0]]
        predictions = make_points_labels(points, img_h, img_w, self.settings.cell)

        frame_predictions = (predictions != 64)
        frame_labels = (labels[0, :, :] != 64).cpu().numpy()
        frame = mask[0, 0, :, :].cpu().numpy()
        res_img = (np.dstack((frame, frame_labels, frame_predictions)) * 255.).astype('uint8')
        self.summary_writer.add_image(f'Detector {name} result/train', res_img.transpose([2, 0, 1]),
                                      self.global_train_index)
コード例 #5
0
ファイル: inferencewrapper.py プロジェクト: Kolkir/superpoint
    def run(self, img):
        with torch.no_grad():
            """ Process a image to extract points and descriptors.
            Input
              img - HxW float32 input image in range [0,1].
            Output
              corners - 3xN numpy array with corners [x_i, y_i, confidence_i]^T.
              desc - 256xN numpy array of corresponding unit normalized descriptors.
              """
            input_tensor = self.prepare_input(img)
            img_h, img_w = input_tensor.shape[2], input_tensor.shape[3]

            point_prob_map, descriptors_map, _ = self.net(input_tensor)

            points = get_points(point_prob_map, img_h, img_w, self.settings)
            descriptors = get_descriptors(points, descriptors_map, img_h,
                                          img_w, self.settings)

            return points, descriptors
コード例 #6
0
ファイル: inferencewrapper.py プロジェクト: Kolkir/superpoint
    def run_with_homography_adaptation(self, img, config):
        with torch.no_grad():
            """ Process a image to extract points and descriptors.
            Input
              img - NxCxHxW float32 input image in range [0,1].
            Output
              corners - Nx3xN numpy array with corners [x_i, y_i, confidence_i]^T.
              """
            input_tensor = self.prepare_input(img)
            img_h, img_w = input_tensor.shape[2], input_tensor.shape[3]

            batch_prob_map = homography_adaptation(input_tensor, self.net,
                                                   config)

            prob_maps = torch.unbind(batch_prob_map)

            points_list = []
            for prob_map in prob_maps:
                points = get_points(prob_map.unsqueeze(0), img_h, img_w,
                                    self.settings)
                points_list.append(points)

            return points_list
コード例 #7
0
def test_homography(image):
    # Generate random feature points
    num_points = 20
    img_h = image.shape[2]
    img_w = image.shape[3]
    ys = torch.randint(0, img_h, (num_points, ))
    xs = torch.randint(0, img_w, (num_points, ))
    points = torch.stack([ys, xs], dim=1)

    # Sample random homography transform and apply transformation
    homography_config = HomographyConfig()

    homographies = []
    batch_size = 8
    for _ in range(batch_size):
        warped_image, warped_points, valid_mask, homography = homographic_augmentation(
            image, points, homography_config)
        homographies.append(homography)
    homographies = torch.stack(homographies)
    homography = homographies[batch_size - 1]

    warped_points = warp_points(points, homographies)
    if batch_size > 1:
        warped_points = warped_points[batch_size - 1]
    warped_points = filter_points(warped_points,
                                  [image.shape[2], image.shape[3]])

    points = points.numpy()
    assert (torch.numel(warped_points) > 0)
    warped_points = warped_points.numpy()

    # Test prob maps
    settings = SuperPointSettings()
    point_labels = make_points_labels(points, img_h, img_w, settings.cell)
    prob_map = make_prob_map_from_labels(point_labels, img_h, img_w,
                                         settings.cell)
    points = get_points(prob_map, img_h, img_w, settings).T
    # swap x and y columns
    points[:, [0, 1]] = points[:, [1, 0]]

    warped_point_labels = make_points_labels(warped_points, img_h, img_w,
                                             settings.cell)
    prob_map = make_prob_map_from_labels(warped_point_labels, img_h, img_w,
                                         settings.cell)
    prob_map = prob_map * valid_mask
    warped_points = get_points(prob_map, img_h, img_w, settings).T
    # swap x and y columns
    warped_points[:, [0, 1]] = warped_points[:, [1, 0]]

    # Test inverse transform
    homography.unsqueeze_(dim=0)  # add batch size
    h_inv = invert_homography(homography)
    restored_image = homography_transform(warped_image, h_inv)

    # Draw result
    original_img = cv2.UMat(image.squeeze(dim=0).permute(1, 2, 0).numpy())
    warped_img = cv2.UMat(warped_image.squeeze(dim=0).permute(1, 2, 0).numpy())
    restored_img = restored_image.squeeze(dim=0).permute(1, 2, 0).numpy()
    mask_img = valid_mask.permute(1, 2, 0).numpy().astype(np.uint8)
    mask_img = mask_img * 255

    draw_points(original_img, points, color=(0, 255, 0))
    draw_points(warped_img, warped_points, color=(0, 0, 255))

    cv2.imshow("Original image", original_img)
    cv2.imshow("Warped image", warped_img)
    cv2.imshow("Restored image", restored_img)
    cv2.imshow("Mask", mask_img)