示例#1
0
    def __getitem__(self, index):
        img_path = self.img_paths[index]
        img = io.imread(img_path)
        img = convert_to_rgb(img)

        h, w = (240, 240)
        img0 = cv2.resize(img.copy(), (w, h), interpolation=cv2.INTER_LINEAR)
        # img0 = random_crop(img, size=(h, w))
        img1, H, scale = homography_adaption(img0)

        if np.random.uniform(0, 1) > 0.5:
            # swap two images
            img = img0
            img0 = img1
            img1 = img
            H = np.linalg.inv(H)
            h, w = img1.shape[:2]
            scale = compute_scale(np.linalg.inv(H), h, w)
            scale = 1. / scale

        pix_pos0, pix_pos1 = sample_ground_truth(img0, H)
        _, msk = get_homography_correspondence(h, w, np.linalg.inv(H))
        msk = msk.astype(np.float32)

        # img = draw_corspd_region(img0, img1, H)
        # import matplotlib.pyplot as plt
        # print(scale)
        # plt.imshow(np.concatenate([img, draw_scale(scale)], axis=1))
        # plt.show()

        # img = draw_kps(img0, pix_pos0, img1, pix_pos1)
        # import matplotlib.pyplot as plt
        # _, (ax1, ax2) = plt.subplots(1, 2)
        # ax1.imshow(img)
        # ax2.imshow(msk)
        # plt.show()

        if self.transforms is not None:
            img0, pix_pos0 = self.transforms(img0, pix_pos0)
            img1, pix_pos1 = self.transforms(img1, pix_pos1)

        pix_pos2 = sample_negative(img1, pix_pos1)

        img0 = torch.tensor(img0).permute(2, 0, 1).float()
        img1 = torch.tensor(img1).permute(2, 0, 1).float()

        pix_pos0 = torch.tensor(pix_pos0).float()
        pix_pos1 = torch.tensor(pix_pos1).float()
        target = dict(kps0=pix_pos0,
                      kps1=pix_pos1,
                      kps2=pix_pos2,
                      H=H,
                      scale=scale,
                      msk=msk)

        return img0, img1, target, index
示例#2
0
    def get_dataset_scale_range(self):
        scale_set = []
        for img0, img1, H in self.dataset:
            # compute the scale changes
            h1, w1 = img1.shape[:2]
            scale = compute_scale(np.linalg.inv(H), h1, w1)
            scale = 1. / scale
            # compute the valid pixels
            _, msk = get_homography_correspondence(h1, w1, np.linalg.inv(H))
            scale_set.append(scale[msk])
        scale_set = np.concatenate(scale_set)

        import matplotlib.pyplot as plt
        plt.hist(scale_set, bins=100)
        plt.show()
示例#3
0
    def __getitem__(self, index):
        """
        Note that unfortunately we have to reisze all images to 640x480 to allow
        batch training
        """
        # index = 11
        img0_path, img1_path, H_path = self.img_paths[index]
        # img0 = convert_to_rgb(io.imread(img0_path))
        # img1 = convert_to_rgb(io.imread(img1_path))
        img0, scale_h0, scale_w0 = read_img(img0_path, (240, 240))
        img1, scale_h1, scale_w1 = read_img(img1_path, (240, 240))

        # this information is needed for rescaling H
        # H = read_H(H_path)
        scale_ratio = scale_h0, scale_w0, scale_h1, scale_w1
        H = read_H(H_path, scale_ratio)

        # compute the pixel scale ratio of img1 to img0 for each pixel in img1
        h, w = img1.shape[:2]
        scale = compute_scale(np.linalg.inv(H), h, w)
        scale = 1. / scale

        # compute the mask that indicates region having corresponding pixels
        _, msk = get_homography_correspondence(h, w, np.linalg.inv(H))
        msk = msk.astype(np.float32)

        # compute the pixel scale ratio of img0 to img1 for each pixel in img0
        h, w = img0.shape[:2]
        left_scale = compute_scale(H, h, w)
        left_scale = 1. / left_scale

        # generate training targets, positive and negative
        targets = sample_training_targets(img0, img1, H)
        targets['H'] = H
        targets['scale'] = torch.tensor(scale).unsqueeze(0)
        targets['left_scale'] = torch.tensor(left_scale).unsqueeze(0)
        targets['msk'] = torch.tensor(msk).unsqueeze(0)

        # img = draw_corspd_region(img0, img1, H)
        # import matplotlib.pyplot as plt
        # _, (ax1, ax2, ax3) = plt.subplots(1, 3)
        # ax1.imshow(img)
        # ax2.imshow(draw_scale(left_scale))
        # ax3.imshow(draw_scale(scale))
        # plt.show()

        # img = draw_kps(img0, targets['kps0'], img1, targets['kps1'])
        # import matplotlib.pyplot as plt
        # plt.imshow(np.concatenate([img0, img], axis=1))
        # plt.show()

        # convert target points to tensor
        targets = {k: torch.tensor(v).float() for k, v in targets.items()}

        if self.transforms is not None:
            img0 = self.transforms(img0)
            img1 = self.transforms(img1)

        img0 = torch.tensor(img0).permute(2, 0, 1).float()
        img1 = torch.tensor(img1).permute(2, 0, 1).float()

        return img0, img1, targets, index
示例#4
0
def sample_ground_truth(img, H):
    h, w, _ = img.shape
    pix_pos, msk = get_homography_correspondence(h, w, H)
    pix_pos0, pix_pos1 = uniform_sample_correspondence(img, pix_pos, msk)
    return pix_pos0, pix_pos1