Пример #1
0
def getAffmaps_from_Affnet(patches_np):
    sp1, sp2 = np.shape(patches_np[0])
    subpatches = torch.autograd.Variable(
        torch.zeros([len(patches_np), 1, 32, 32], dtype=torch.float32),
        volatile=True).view(len(patches_np), 1, 32, 32)
    for k in range(0, len(patches_np)):
        subpatch = patches_np[k][int(sp1 / 2) - 16:int(sp2 / 2) + 16,
                                 int(sp1 / 2) - 16:int(sp2 / 2) + 16].reshape(
                                     1, 1, 32, 32)
        subpatches[k, :, :, :] = torch.from_numpy(subpatch.astype(
            np.float32))  #=subpatch

    x, y = subpatches.shape[3] / 2.0 + 2, subpatches.shape[2] / 2.0 + 2
    LAFs = normalizeLAFs(
        torch.tensor([[AffNetPix.PS / 2, 0, x], [0, AffNetPix.PS / 2,
                                                 y]]).reshape(1, 2, 3),
        subpatches.shape[3], subpatches.shape[2])
    baseLAFs = torch.zeros([subpatches.shape[0], 2, 3], dtype=torch.float32)
    for m in range(subpatches.shape[0]):
        baseLAFs[m, :, :] = LAFs

    if USE_CUDA:
        # or ---> A = AffNetPix(subpatches.cuda()).cpu()
        with torch.no_grad():
            A = batched_forward(AffNetPix, subpatches.cuda(), 256).cpu()
    else:
        with torch.no_grad():
            A = AffNetPix(subpatches)
    LAFs = torch.cat([torch.bmm(A, baseLAFs[:, :, 0:2]), baseLAFs[:, :, 2:]],
                     dim=2)
    dLAFs = denormalizeLAFs(LAFs, subpatches.shape[3], subpatches.shape[2])
    Alist = convertLAFs_to_A23format(dLAFs.detach().cpu().numpy().astype(
        np.float32))
    return Alist
Пример #2
0
def AffNetHardNet_describeFromKeys(img_np, KPlist):
    img = torch.autograd.Variable(torch.from_numpy(img_np.astype(np.float32)),
                                  volatile=True)
    img = img.view(1, 1, img.size(0), img.size(1))
    HessianAffine = ScaleSpaceAffinePatchExtractor(mrSize=5.192,
                                                   num_features=0,
                                                   border=0,
                                                   num_Baum_iters=0)
    if USE_CUDA:
        HessianAffine = HessianAffine.cuda()
        img = img.cuda()
    with torch.no_grad():
        HessianAffine.createScaleSpace(
            img)  # to generate scale pyramids and stuff
    descriptors = []
    Alist = []
    n = 0
    # for patch_np in patches:
    for kp in KPlist:
        x, y = np.float32(kp.pt)
        LAFs = normalizeLAFs(
            torch.tensor([[AffNetPix.PS / 2, 0, x], [0, AffNetPix.PS / 2,
                                                     y]]).reshape(1, 2, 3),
            img.size(3), img.size(2))
        with torch.no_grad():
            patch = HessianAffine.extract_patches_from_pyr(denormalizeLAFs(
                LAFs, img.size(3), img.size(2)),
                                                           PS=AffNetPix.PS)
        if WRITE_IMGS_DEBUG:
            SaveImageWithKeys(patch.detach().cpu().numpy().reshape([32, 32]),
                              [], 'p2/' + str(n) + '.png')
        if USE_CUDA:
            # or ---> A = AffNetPix(subpatches.cuda()).cpu()
            with torch.no_grad():
                A = batched_forward(AffNetPix, patch.cuda(), 256).cpu()
        else:
            with torch.no_grad():
                A = AffNetPix(patch)
        new_LAFs = torch.cat([torch.bmm(A, LAFs[:, :, 0:2]), LAFs[:, :, 2:]],
                             dim=2)
        dLAFs = denormalizeLAFs(new_LAFs, img.size(3), img.size(2))
        with torch.no_grad():
            patchaff = HessianAffine.extract_patches_from_pyr(dLAFs, PS=32)
            if WRITE_IMGS_DEBUG:
                SaveImageWithKeys(
                    patchaff.detach().cpu().numpy().reshape([32, 32]), [],
                    'p1/' + str(n) + '.png')
                SaveImageWithKeys(img_np, [kp], 'im1/' + str(n) + '.png')
            descriptors.append(
                HardNetDescriptor(patchaff).cpu().numpy().astype(np.float32))
            Alist.append(
                convertLAFs_to_A23format(LAFs.detach().cpu().numpy().astype(
                    np.float32)))
    n = n + 1
    return descriptors, Alist
Пример #3
0
 def getAffineShape(self, final_resp, LAFs, final_pyr_idxs, final_level_idxs, num_features = 0):
     pe_time = 0
     affnet_time = 0
     pyr_inv_idxs = get_inverted_pyr_index(self.scale_pyr, final_pyr_idxs, final_level_idxs)
     t = time.time()
     patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS = self.AffNet.PS)
     pe_time+=time.time() - t
     t = time.time()
     base_A = torch.eye(2).unsqueeze(0).expand(final_pyr_idxs.size(0),2,2)
     if final_resp.is_cuda:
         base_A = base_A.cuda()
     base_A = Variable(base_A)
     is_good = None
     n_patches = patches_small.size(0)
     for i in range(self.num_Baum_iters):
         t = time.time()
         A = batched_forward(self.AffNet, patches_small, 512)
         is_good_current = 1
         affnet_time += time.time() - t
         if is_good is None:
             is_good = is_good_current
         else:
             is_good = is_good * is_good_current
         base_A = torch.bmm(A, base_A); 
         new_LAFs = torch.cat([torch.bmm(base_A,LAFs[:,:,0:2]), LAFs[:,:,2:] ], dim =2)
         #print torch.sqrt(new_LAFs[0,0,0]*new_LAFs[0,1,1] - new_LAFs[0,1,0] *new_LAFs[0,0,1]) * scale_pyr[0][0].size(2)
         if i != self.num_Baum_iters - 1:
             pe_time+=time.time() - t
             t = time.time()
             patches_small =  extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, new_LAFs, PS = self.AffNet.PS)
             pe_time+= time.time() - t
             l1,l2 = batch_eig2x2(A)      
             ratio1 =  torch.abs(l1 / (l2 + 1e-8))
             converged_mask = (ratio1 <= 1.2) * (ratio1 >= (0.8)) 
     l1,l2 = batch_eig2x2(base_A)
     ratio = torch.abs(l1 / (l2 + 1e-8))
     idxs_mask = ((ratio < 6.0) * (ratio > (1./6.)))# * converged_mask.float()) > 0
     num_survived = idxs_mask.float().sum()
     if (num_features > 0) and (num_survived.data[0] > num_features):
         final_resp =  final_resp * idxs_mask.float() #zero bad points
         final_resp, idxs = torch.topk(final_resp, k = num_features);
     else:
         idxs = torch.nonzero(idxs_mask.data).view(-1).long()
         final_resp = final_resp[idxs]
     final_pyr_idxs = final_pyr_idxs[idxs]
     final_level_idxs = final_level_idxs[idxs]
     base_A = torch.index_select(base_A, 0, idxs)
     LAFs = torch.index_select(LAFs, 0, idxs)
     new_LAFs = torch.cat([torch.bmm(rectifyAffineTransformationUpIsUp(base_A), LAFs[:,:,0:2]),
                            LAFs[:,:,2:]], dim =2)
     print 'affnet_time',affnet_time
     print 'pe_time', pe_time
     return final_resp, new_LAFs, final_pyr_idxs, final_level_idxs  
Пример #4
0
def AffNetHardNet_describe(patches):
    descriptors = np.zeros(shape=[patches.shape[0], 128], dtype=np.float32)
    HessianAffine = []
    subpatches = torch.autograd.Variable(torch.zeros([len(patches), 1, 32, 32],
                                                     dtype=torch.float32),
                                         volatile=True).view(
                                             len(patches), 1, 32, 32)
    baseLAFs = torch.zeros([len(patches), 2, 3], dtype=torch.float32)
    for m in range(patches.shape[0]):
        patch_np = patches[m, :, :, 0].reshape(np.shape(patches)[1:3])
        HessianAffine.append(
            ScaleSpaceAffinePatchExtractor(mrSize=5.192,
                                           num_features=0,
                                           border=0,
                                           num_Baum_iters=0))
        with torch.no_grad():
            var_image = torch.autograd.Variable(torch.from_numpy(
                patch_np.astype(np.float32)),
                                                volatile=True)
            patch = var_image.view(1, 1, var_image.size(0), var_image.size(1))
        with torch.no_grad():
            HessianAffine[m].createScaleSpace(
                patch)  # to generate scale pyramids and stuff
        x, y = patch.size(3) / 2.0 + 2, patch.size(2) / 2.0 + 2
        LAFs = normalizeLAFs(
            torch.tensor([[AffNetPix.PS / 2, 0, x], [0, AffNetPix.PS / 2,
                                                     y]]).reshape(1, 2, 3),
            patch.size(3), patch.size(2))
        baseLAFs[m, :, :] = LAFs
        with torch.no_grad():
            subpatch = HessianAffine[m].extract_patches_from_pyr(
                denormalizeLAFs(LAFs, patch.size(3), patch.size(2)),
                PS=AffNetPix.PS)
            if WRITE_IMGS_DEBUG:
                SaveImageWithKeys(
                    subpatch.detach().cpu().numpy().reshape([32, 32]), [],
                    'p1/' + str(n) + '.png')
            # This subpatch has been blured by extract_patches _from_pyr...
            # let't us crop it manually to obtain fair results agains other methods
            subpatch = patch_np[16:48, 16:48].reshape(1, 1, 32, 32)
            #var_image = torch.autograd.Variable(torch.from_numpy(subpatch.astype(np.float32)), volatile = True)
            #subpatch = var_image.view(1, 1, 32,32)
            subpatches[m, :, :, :] = torch.from_numpy(
                subpatch.astype(np.float32))  #=subpatch
            if WRITE_IMGS_DEBUG:
                SaveImageWithKeys(
                    subpatch.detach().cpu().numpy().reshape([32, 32]), [],
                    'p2/' + str(n) + '.png')
    if USE_CUDA:
        # or ---> A = AffNetPix(subpatches.cuda()).cpu()
        with torch.no_grad():
            A = batched_forward(AffNetPix, subpatches.cuda(), 256).cpu()
    else:
        with torch.no_grad():
            A = AffNetPix(subpatches)
    LAFs = torch.cat([torch.bmm(A, baseLAFs[:, :, 0:2]), baseLAFs[:, :, 2:]],
                     dim=2)
    dLAFs = denormalizeLAFs(LAFs, patch.size(3), patch.size(2))
    Alist = convertLAFs_to_A23format(dLAFs.detach().cpu().numpy().astype(
        np.float32))
    for m in range(patches.shape[0]):
        with torch.no_grad():
            patchaff = HessianAffine[m].extract_patches_from_pyr(
                dLAFs[m, :, :].reshape(1, 2, 3), PS=32)
            if WRITE_IMGS_DEBUG:
                SaveImageWithKeys(
                    patchaff.detach().cpu().numpy().reshape([32, 32]), [],
                    'im1/' + str(n) + '.png')
                SaveImageWithKeys(patch_np, [], 'im2/' + str(n) + '.png')
            subpatches[m, :, :, :] = patchaff
    if USE_CUDA:
        with torch.no_grad():
            # descriptors = HardNetDescriptor(subpatches.cuda()).detach().cpu().numpy().astype(np.float32)
            descriptors = batched_forward(HardNetDescriptor, subpatches.cuda(),
                                          256).cpu().numpy().astype(np.float32)
    else:
        with torch.no_grad():
            descriptors = HardNetDescriptor(
                subpatches).detach().cpu().numpy().astype(np.float32)
    return descriptors, Alist
Пример #5
0
    def optimize(self, laf1, laf2, img1, img2, n_iters=10):
        if self.cuda:
            img1, img2 = img1.cuda(), img2.cuda()
        w1, h1 = img1.size(3), img1.size(2)
        w2, h2 = img2.size(3), img2.size(2)
        self.scale_pyr1, self.sigmas1, self.pix_dists1 = self.ScalePyrGen(img1)
        self.scale_pyr2, self.sigmas2, self.pix_dists2 = self.ScalePyrGen(img2)

        for d_idx in range(len(self.names_list)):
            D = self.desc_list[d_idx]
            try:
                if self.cuda:
                    D = D.cuda()
            except:
                pass
            N = self.names_list[d_idx]
            print N
            l1 = deepcopy(Variable(deepcopy(laf1)))
            l2 = deepcopy(Variable(deepcopy(laf2)))
            if self.cuda:
                l1 = l1.cuda()
                l2 = l2.cuda()
            l1o, opt1 = get_input_param_optimizer(l1[:, :2, :2], self.lr)
            l2o, opt2 = get_input_param_optimizer(l2[:, :2, :2], self.lr)
            self.out_lafs1[N].append(
                deepcopy(
                    torch.cat([l1o.data, l1.data[:, :, 2:]], dim=2).cpu()))
            self.out_lafs2[N].append(
                deepcopy(
                    torch.cat([l2o.data, l2.data[:, :, 2:]], dim=2).cpu()))
            self.shape_diff[N].append(
                deepcopy(
                    FrobNorm(deepcopy(l1o.detach()), deepcopy(l2o.detach()))))
            for it in range(n_iters):
                #p1 = extract_patches(img1, normalizeLAFs(torch.cat([l1o, l1[:,:,2:]],dim = 2), w1, h1));
                p1 = self.extract_patches_from_pyr(torch.cat(
                    [l1o, l1[:, :, 2:]], dim=2),
                                                   self.scale_pyr1,
                                                   self.sigmas1,
                                                   self.pix_dists1,
                                                   PS=32)
                desc1 = batched_forward(D, p1, 32)
                p2 = self.extract_patches_from_pyr(torch.cat(
                    [l2o, l2[:, :, 2:]], dim=2),
                                                   self.scale_pyr2,
                                                   self.sigmas2,
                                                   self.pix_dists2,
                                                   PS=32)
                #p2 = extract_patches(img2, normalizeLAFs(torch.cat([l2o, l2[:,:,2:]],dim = 2), w2, h2));
                desc2 = batched_forward(D, p2, 32)
                loss = self.loss_func(desc1, desc2)
                if it % 10 == 0:
                    print loss.data.cpu().numpy()
                opt1.zero_grad()
                opt2.zero_grad()
                loss.backward()
                opt1.step()
                opt2.step()
                self.out_lafs1[N].append(
                    deepcopy(
                        torch.cat([l1o.data, l1.data[:, :, 2:]], dim=2).cpu()))
                self.out_lafs2[N].append(
                    deepcopy(
                        torch.cat([l2o.data, l2.data[:, :, 2:]], dim=2).cpu()))
                self.out_loss_mean[N].append(deepcopy(loss.data.mean()))
                self.cent_diff[N].append(
                    0
                )  #deepcopy(torch.sqrt(((l1o.data[:,:,2] - l2o.data[:,:,2])**2).sum(dim=1)+1e-12).mean()))
                self.shape_diff[N].append(
                    deepcopy(
                        FrobNorm(deepcopy(l1o.detach()),
                                 deepcopy(l2o.detach()))))
                with torch.no_grad():
                    self.snn[N].append(
                        deepcopy(get_snn(desc1, desc2).cpu().view(1, -1)))
            del l1, l2, opt1, opt2
            gc.collect()
            torch.cuda.empty_cache()
        self.img1 = img1.data.cpu().numpy()
        self.img2 = img2.data.cpu().numpy()
        return