Exemple #1
0
    def get_non_matches_corr(img_b_shape, uv_a, uv_b_matches, num_masked_non_matches_per_match=10, device='cpu'):
        ## sample non matches
        uv_b_matches = uv_b_matches.squeeze()
        uv_b_matches_tuple = uv_to_tuple(uv_b_matches)
        uv_b_non_matches_tuple = correspondence_finder.create_non_correspondences(uv_b_matches_tuple,
                                        img_b_shape, num_non_matches_per_match=num_masked_non_matches_per_match,
                                        img_b_mask=None)

        ## create_non_correspondences
        #     print("img_b_shape ", img_b_shape)
        #     print("uv_b_matches ", uv_b_matches.shape)
        # print("uv_a: ", uv_to_tuple(uv_a))
        # print("uv_b_non_matches: ", uv_b_non_matches)
        #     print("uv_b_non_matches: ", tensorUv2tuple(uv_b_non_matches))
        uv_a_tuple, uv_b_non_matches_tuple = \
            create_non_matches(uv_to_tuple(uv_a), uv_b_non_matches_tuple, num_masked_non_matches_per_match)
        return uv_a_tuple, uv_b_non_matches_tuple
def get_first_term_corr(img_b_shape,
                        uv_a,
                        uv_b_matches,
                        num_masked_non_matches_per_match=10,
                        device='cpu'):
    ## sample non matches
    uv_b_matches = uv_b_matches.squeeze()
    uv_b_matches_tuple = uv_to_tuple(uv_b_matches)
    uv_b_non_matches_tuple = correspondence_finder.create_non_correspondences(
        uv_b_matches_tuple,
        img_b_shape,
        num_non_matches_per_match=num_masked_non_matches_per_match,
        img_b_mask=None)

    uv_b_long = (torch.t(uv_b_matches_tuple[0].repeat(
        num_masked_non_matches_per_match, 1)).contiguous().view(-1, 1),
                 torch.t(uv_b_matches_tuple[1].repeat(
                     num_masked_non_matches_per_match,
                     1)).contiguous().view(-1, 1))

    uv_a_tuple, uv_b_non_matches_tuple = \
        create_non_matches(uv_to_tuple(uv_a), uv_b_non_matches_tuple, num_masked_non_matches_per_match)
    return uv_a_tuple, uv_b_long, uv_b_non_matches_tuple
Exemple #3
0
    def reliability_loss(descriptors_a,
                         descriptors_b,
                         image_b_pred,
                         reliability_a,
                         reliability_b,
                         uv_a,
                         uv_b,
                         img_b_shape,
                         aplosser,
                         method='1d',
                         device='cpu',
                         reli_base=0.5):
        uv_b = uv_b.squeeze()
        uv_b_matches_tuple = uv_to_tuple(uv_b)
        uv_b_non_matches_tuple = correspondence_finder.create_non_correspondences(
            uv_b_matches_tuple,
            img_b_shape,
            num_non_matches_per_match=1,
            img_b_mask=None)
        _, uv_b_non_matches_tuple = create_non_matches(uv_to_tuple(uv_a),
                                                       uv_b_non_matches_tuple,
                                                       1)
        uv_b_non = tuple_to_uv(uv_b_non_matches_tuple).squeeze().transpose(
            1, 0)  # negative sample coordinates

        # generate negative descriptors
        if method == '2d':

            def sampleDescriptors(image_a_pred, matches_a, mode, norm=False):
                image_a_pred = image_a_pred.unsqueeze(0)  # torch [1, D, H, W]
                matches_a.unsqueeze_(0).unsqueeze_(2)
                matches_a_descriptors = torch.nn.functional.grid_sample(
                    image_a_pred, matches_a, mode=mode, align_corners=True)
                matches_a_descriptors = matches_a_descriptors.squeeze(
                ).transpose(0, 1)

                # print("image_a_pred: ", image_a_pred.shape)
                # print("matches_a: ", matches_a.shape)
                # print("matches_a: ", matches_a)
                # print("matches_a_descriptors: ", matches_a_descriptors)
                if norm:
                    dn = torch.norm(matches_a_descriptors, p=2,
                                    dim=1)  # Compute the norm of b_descriptors
                    matches_a_descriptors = matches_a_descriptors.div(
                        torch.unsqueeze(dn, 1))  # Divide by norm to normalize.
                return matches_a_descriptors

            matches_b_non = normPts(
                uv_b_non,
                torch.tensor([img_b_shape[1], img_b_shape[0]]).float())
            descriptors_b_non = sampleDescriptors(image_b_pred,
                                                  matches_b_non.to(device),
                                                  mode='bilinear',
                                                  norm=False)
        else:
            matches_b_non = uv_to_1d(uv_b_matches, img_b_shape[1])
            descriptors_b_non = torch.index_select(
                image_b_pred, 1,
                matches_b_non.long().to(device))

        qconf = reliability_a[:, uv_a[:, 1].long(), uv_a[:, 0].long()] + \
                reliability_b[:, uv_b[:, 1].long(), uv_b[:, 0].long()]
        qconf /= 2

        pscores = (descriptors_a * descriptors_b).sum(-1)[:, None]
        nscores = (descriptors_a * descriptors_b_non).sum(-1)[:, None]
        scores = torch.cat((pscores, nscores), dim=1)

        gt = torch.zeros_like(scores, dtype=torch.uint8)
        gt[:, :pscores.shape[1]] = 1

        ap = aplosser(scores, gt)
        ap_loss = 1 - ap * qconf - (1 - qconf) * reli_base

        return ap_loss.mean()
# img_b_index = dataset.get_img_idx_with_different_pose(scene, img_a_pose, num_attempts=50)
# img_b_rgb, img_b_depth, _, img_b_pose = dataset.get_rgbd_mask_pose(scene, img_b_index)

# img_a_depth_numpy = np.asarray(img_a_depth)
# img_b_depth_numpy = np.asarray(img_b_depth)

# start = time.time()
# uv_a, uv_b = correspondence_finder.batch_find_pixel_correspondences(img_a_depth_numpy, img_a_pose,
#                                                                     img_b_depth_numpy, img_b_pose,
#                                                                     num_attempts=num_attempts,
#                                                                     device='CPU')

start = time.time()
# uv_b_non_matches = correspondence_finder.create_non_correspondences(uv_b, img_a_depth_numpy.shape, num_non_matches_per_match=10)
uv_b_non_matches = correspondence_finder.create_non_correspondences(
    uv_b_matches, img_b_shape, num_non_matches_per_match=10, img_b_mask=None)
print(time.time() - start, "seconds for non-matches")
if uv_b_non_matches is not None:
    print(uv_b_non_matches[0].shape)

    import torch
    # This just checks to make sure nothing is out of bounds
    print(torch.min(uv_b_non_matches[0]))
    print(torch.min(uv_b_non_matches[1]))
    print(torch.max(uv_b_non_matches[0]))
    print(torch.max(uv_b_non_matches[1]))

#     fig, axes = correspondence_plotter.plot_correspondences_direct(img_a_rgb, img_a_depth_numpy, img_b_rgb, img_b_depth_numpy, uv_a, uv_b, show=False)
#     uv_a_long = (torch.t(uv_a[0].repeat(3, 1)).contiguous().view(-1,1), torch.t(uv_a[1].repeat(3, 1)).contiguous().view(-1,1))
#     uv_b_non_matches_long = (uv_b_non_matches[0].view(-1,1), uv_b_non_matches[1].view(-1,1) )
#     correspondence_plotter.plot_correspondences_direct(img_a_rgb, img_a_depth_numpy, img_b_rgb, img_b_depth_numpy, uv_a_long, uv_b_non_matches_long, use_previous_plot=(fig,axes),