Beispiel #1
0
 def getAffineShape(self, final_resp, LAFs, final_pyr_idxs, final_level_idxs, num_features = 0):
     pe_time = 0
     affnet_time = 0
     pyr_inv_idxs = get_inverted_pyr_index(self.scale_pyr, final_pyr_idxs, final_level_idxs)
     t = time.time()
     patches_small = extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, LAFs, PS = self.AffNet.PS)
     pe_time+=time.time() - t
     t = time.time()
     base_A = torch.eye(2).unsqueeze(0).expand(final_pyr_idxs.size(0),2,2)
     if final_resp.is_cuda:
         base_A = base_A.cuda()
     base_A = Variable(base_A)
     is_good = None
     n_patches = patches_small.size(0)
     for i in range(self.num_Baum_iters):
         t = time.time()
         A = batched_forward(self.AffNet, patches_small, 512)
         is_good_current = 1
         affnet_time += time.time() - t
         if is_good is None:
             is_good = is_good_current
         else:
             is_good = is_good * is_good_current
         base_A = torch.bmm(A, base_A); 
         new_LAFs = torch.cat([torch.bmm(base_A,LAFs[:,:,0:2]), LAFs[:,:,2:] ], dim =2)
         #print torch.sqrt(new_LAFs[0,0,0]*new_LAFs[0,1,1] - new_LAFs[0,1,0] *new_LAFs[0,0,1]) * scale_pyr[0][0].size(2)
         if i != self.num_Baum_iters - 1:
             pe_time+=time.time() - t
             t = time.time()
             patches_small =  extract_patches_from_pyramid_with_inv_index(self.scale_pyr, pyr_inv_idxs, new_LAFs, PS = self.AffNet.PS)
             pe_time+= time.time() - t
             l1,l2 = batch_eig2x2(A)      
             ratio1 =  torch.abs(l1 / (l2 + 1e-8))
             converged_mask = (ratio1 <= 1.2) * (ratio1 >= (0.8)) 
     l1,l2 = batch_eig2x2(base_A)
     ratio = torch.abs(l1 / (l2 + 1e-8))
     idxs_mask = ((ratio < 6.0) * (ratio > (1./6.)))# * converged_mask.float()) > 0
     num_survived = idxs_mask.float().sum()
     if (num_features > 0) and (num_survived.data[0] > num_features):
         final_resp =  final_resp * idxs_mask.float() #zero bad points
         final_resp, idxs = torch.topk(final_resp, k = num_features);
     else:
         idxs = torch.nonzero(idxs_mask.data).view(-1).long()
         final_resp = final_resp[idxs]
     final_pyr_idxs = final_pyr_idxs[idxs]
     final_level_idxs = final_level_idxs[idxs]
     base_A = torch.index_select(base_A, 0, idxs)
     LAFs = torch.index_select(LAFs, 0, idxs)
     new_LAFs = torch.cat([torch.bmm(rectifyAffineTransformationUpIsUp(base_A), LAFs[:,:,0:2]),
                            LAFs[:,:,2:]], dim =2)
     print 'affnet_time',affnet_time
     print 'pe_time', pe_time
     return final_resp, new_LAFs, final_pyr_idxs, final_level_idxs  
    def getAffineShape(self,scale_pyr, final_resp, LAFs, final_pyr_idxs, final_level_idxs, num_features = 0, n_iters = 1):
        pyr_inv_idxs = get_inverted_pyr_index(scale_pyr, final_pyr_idxs, final_level_idxs)
        patches_small = extract_patches_from_pyramid_with_inv_index(scale_pyr, pyr_inv_idxs, LAFs, PS = self.AffNet.PS)
        base_A = torch.eye(2).unsqueeze(0).expand(final_pyr_idxs.size(0),2,2)
        if final_resp.is_cuda:
            base_A = base_A.cuda()
        base_A = Variable(base_A)
        is_good = None
        for i in range(n_iters):
            A, is_good_current = self.AffNet(patches_small)
            if is_good is None:
                is_good = is_good_current
            else:
                is_good = is_good * is_good_current
            base_A = torch.bmm(A, base_A); 
            new_LAFs = torch.cat([torch.bmm(base_A,LAFs[:,:,0:2]), LAFs[:,:,2:] ], dim =2)
            if i != self.num_Baum_iters - 1:
                patches_small =  extract_patches_from_pyramid_with_inv_index(scale_pyr, pyr_inv_idxs, new_LAFs, PS = self.AffNet.PS)
                l1,l2 = batch_eig2x2(A)      
                ratio1 =  torch.abs(l1 / (l2 + 1e-8))
                converged_mask = (ratio1 <= 1.2) * (ratio1 >= (0.8)) 
        l1,l2 = batch_eig2x2(base_A)
        #print l1,l2
        ratio = torch.abs(l1 / (l2 + 1e-8))
        #print new_LAFs[0:2,:,:]
        #print '***'
        #print ((ratio < 6.0) * (ratio > (1./6.))).float().sum()
        #print converged_mask.float().sum()
        #print is_good.float().sum()

        ratio = 1.0 + 0 * torch.abs(l1 / (l2 + 1e-8)) #CHANGE after training
        #idxs_mask = (ratio < 6.0) * (ratio > (1./6.)) * (is_good > 0.5)#  * converged_mask
        idxs_mask = ((ratio < 6.0) * (ratio > (1./6.)))# * converged_mask.float()) > 0
        num_survived = idxs_mask.float().sum()
        #print num_survived
        if (num_features > 0) and (num_survived.data[0] > num_features):
            final_resp =  final_resp * idxs_mask.float() #zero bad points
            final_resp, idxs = torch.topk(final_resp, k = num_features);
        else:
            idxs = torch.nonzero(idxs_mask.data).view(-1).long()
            if (len(idxs.size()) == 0) or (idxs.size(0) == idxs_mask.size(0)):
                idxs = None
        if idxs is not None:
            final_resp = torch.index_select(final_resp, 0, idxs)
            final_pyr_idxs = final_pyr_idxs[idxs]
            final_level_idxs = final_level_idxs[idxs]
            base_A = torch.index_select(base_A, 0, idxs)
            LAFs = torch.index_select(LAFs, 0, idxs)
        #new_LAFs = torch.cat([torch.bmm(rectifyAffineTransformationUpIsUp(base_A), LAFs[:,:,0:2]),
        #                       LAFs[:,:,2:]], dim =2)
        new_LAFs = torch.cat([torch.bmm(base_A, LAFs[:,:,0:2]),
                               LAFs[:,:,2:]], dim =2)
        return final_resp, new_LAFs, final_pyr_idxs, final_level_idxs  
Beispiel #3
0
    def forward(self, x):
        ### Generate scale space
        scale_pyr, sigmas, pix_dists = self.ScalePyrGen(x)
        ### Detect keypoints in scale space
        aff_matrices = []
        top_responces = []
        pyr_idxs = []
        level_idxs = []
        for oct_idx in range(len(sigmas)):
            print oct_idx
            octave = scale_pyr[oct_idx]
            sigmas_oct = sigmas[oct_idx]
            pix_dists_oct = pix_dists[oct_idx]
            for level_idx in range(1, len(octave) - 1):
                low = float(sigmas_oct[level_idx - 1]**4) * self.Hes(
                    octave[level_idx - 1])
                cur = float(sigmas_oct[level_idx]**4) * self.Hes(
                    octave[level_idx])
                high = float(sigmas_oct[level_idx + 1]**4) * self.Hes(
                    octave[level_idx + 1])
                nms_f = NMS3dAndComposeA(scales=sigmas_oct[level_idx -
                                                           1:level_idx + 2],
                                         border=self.b,
                                         mrSize=self.mrSize)
                top_resp, aff_matrix = nms_f(low, cur, high, self.num)
                if top_resp is None:
                    break
                aff_matrices.append(aff_matrix), top_responces.append(top_resp)
                pyr_id = Variable(oct_idx * torch.ones(aff_matrix.size(0)))
                lev_id = Variable(level_idx * torch.ones(aff_matrix.size(0)))
                if self.use_cuda:
                    pyr_id = pyr_id.cuda()
                    lev_id = lev_id.cuda()
                pyr_idxs.append(pyr_id)
                level_idxs.append(lev_id)
        top_resp_scales = torch.cat(top_responces, dim=0)
        aff_m_scales = torch.cat(aff_matrices, dim=0)
        pyr_idxs_scales = torch.cat(pyr_idxs, dim=0)
        level_idxs_scale = torch.cat(level_idxs, dim=0)
        #print top_resp_scales

        final_resp, idxs = torch.topk(top_resp_scales,
                                      k=max(
                                          1,
                                          min(self.num,
                                              top_resp_scales.size(0))))
        final_aff_m = torch.index_select(aff_m_scales, 0, idxs)
        final_pyr_idxs = torch.index_select(pyr_idxs_scales, 0, idxs)
        final_level_idxs = torch.index_select(level_idxs_scale, 0, idxs)

        pyr_inv_idxs = []
        ### Precompute octave inverted indexes
        for i in range(len(scale_pyr)):
            pyr_inv_idxs.append([])
            cur_idxs = final_pyr_idxs == i  #torch.nonzero((pyr_idxs == i).data)
            for j in range(0, len(final_level_idxs)):
                cur_lvl_idxs = torch.nonzero(
                    ((final_level_idxs == j) * cur_idxs).data)
                if len(cur_lvl_idxs.size()) == 0:
                    pyr_inv_idxs[-1].append(None)
                else:
                    pyr_inv_idxs[-1].append(cur_lvl_idxs.squeeze(1))
        ###
        #final_aff_m[:,0:2,0:2] =  final_aff_m[:,0:2,0:2] / self.init_sigma
        patches_small = self.extract_patches(scale_pyr,
                                             final_aff_m,
                                             final_pyr_idxs,
                                             final_level_idxs,
                                             PS=19,
                                             pyr_inv_idxs=pyr_inv_idxs)
        ###
        base_A = Variable(
            torch.eye(2).unsqueeze(0).expand(final_pyr_idxs.size(0), 2, 2))
        if self.use_cuda:
            base_A = base_A.cuda()
        ### Estimate affine shape
        for i in range(self.num_Baum_iters):
            print i
            A = self.AffShape(patches_small)
            base_A = torch.bmm(A, base_A)
            temp_final = torch.cat([
                torch.bmm(base_A, final_aff_m[:, :, 0:2]), final_aff_m[:, :,
                                                                       2:]
            ],
                                   dim=2)
            if i != self.num_Baum_iters - 1:
                patches_small = self.extract_patches(scale_pyr,
                                                     temp_final,
                                                     final_pyr_idxs,
                                                     final_level_idxs,
                                                     PS=19,
                                                     pyr_inv_idxs=pyr_inv_idxs)
            else:
                l1, l2 = batch_eig2x2(base_A)
                ratio = torch.abs(l1 / (l2 + 1e-8))
                idxs_mask = (ratio <= 6.0) * (ratio >= 1. / 6.)
                idxs_mask = torch.nonzero(idxs_mask.data).view(-1)
                temp_final = temp_final[idxs_mask, :, :]
                final_pyr_idxs = final_pyr_idxs[idxs_mask]
                final_level_idxs = final_level_idxs[idxs_mask]
        pyr_inv_idxs = []
        ### Precompute octave inverted indexes
        for i in range(len(scale_pyr)):
            pyr_inv_idxs.append([])
            cur_idxs = final_pyr_idxs == i  #torch.nonzero((pyr_idxs == i).data)
            for j in range(0, len(final_level_idxs)):
                cur_lvl_idxs = torch.nonzero(
                    ((final_level_idxs == j) * cur_idxs).data)
                if len(cur_lvl_idxs.size()) == 0:
                    pyr_inv_idxs[-1].append(None)
                else:
                    pyr_inv_idxs[-1].append(cur_lvl_idxs.squeeze(1))
        ###
        if self.num_Baum_iters > 0:
            final_aff_m = temp_final
        #####
        final_aff_m[:, :, 0:2] = self.mrSize * final_aff_m[:, :, 0:2]
        patches_small = self.extract_patches(scale_pyr,
                                             final_aff_m,
                                             final_pyr_idxs,
                                             final_level_idxs,
                                             PS=19,
                                             pyr_inv_idxs=pyr_inv_idxs)
        ######
        ### Detect orientation
        for i in range(0):
            ori = self.OriDet(patches_small)
            #print np.degrees(ori.data.cpu().numpy().ravel()[1])
            #print final_aff_m[1,:,:]
            #print '*****'
            final_aff_m = self.rotateLAFs(final_aff_m, ori)
            #print final_aff_m[0,:,:]
            patches_small = self.extract_patches(scale_pyr,
                                                 final_aff_m,
                                                 final_pyr_idxs,
                                                 final_level_idxs,
                                                 PS=19,
                                                 pyr_inv_idxs=pyr_inv_idxs)
        ###
        patches = self.extract_patches(scale_pyr,
                                       final_aff_m,
                                       final_pyr_idxs,
                                       final_level_idxs,
                                       PS=self.PS,
                                       pyr_inv_idxs=pyr_inv_idxs)
        return final_aff_m, patches, final_resp, scale_pyr
Beispiel #4
0
    def forward(self, x):
        ### Generate scale space
        scale_pyr, sigmas, pix_dists = self.ScalePyrGen(x)
        ### Detect keypoints in scale space
        aff_matrices = []
        top_responces = []
        pyr_idxs = []
        level_idxs = []
        for oct_idx in range(len(sigmas)):
            print oct_idx
            octave = scale_pyr[oct_idx]
            sigmas_oct = sigmas[oct_idx]
            pix_dists_oct = pix_dists[oct_idx]
            for level_idx in range(1, len(octave) - 1):
                low = float(sigmas_oct[level_idx - 1]**4) * self.Hes(
                    octave[level_idx - 1])
                cur = float(sigmas_oct[level_idx]**4) * self.Hes(
                    octave[level_idx])
                high = float(sigmas_oct[level_idx + 1]**4) * self.Hes(
                    octave[level_idx + 1])
                nms_f = NMS3dAndComposeA(scales=sigmas_oct[level_idx -
                                                           1:level_idx + 2],
                                         mrSize=1.0,
                                         border=self.b)
                top_resp, aff_matrix = nms_f(low, cur, high, self.num / 2)

                aff_matrices.append(aff_matrix), top_responces.append(top_resp)
                pyr_id = Variable(oct_idx * torch.ones(aff_matrix.size(0)))
                lev_id = Variable(level_idx * torch.ones(aff_matrix.size(0)))
                if self.use_cuda:
                    pyr_id = pyr_id.cuda()
                    lev_id = lev_id.cuda()
                pyr_idxs.append(pyr_id)
                level_idxs.append(lev_id)
        top_resp_scales = torch.cat(top_responces, dim=0)
        aff_m_scales = torch.cat(aff_matrices, dim=0)
        pyr_idxs_scales = torch.cat(pyr_idxs, dim=0)
        level_idxs_scale = torch.cat(level_idxs, dim=0)
        #print top_resp_scales
        final_resp, idxs = torch.topk(top_resp_scales,
                                      k=max(
                                          1,
                                          min(self.num,
                                              top_resp_scales.size(0))))
        final_aff_m = torch.index_select(aff_m_scales, 0, idxs)
        final_pyr_idxs = torch.index_select(pyr_idxs_scales, 0, idxs)
        final_level_idxs = torch.index_select(level_idxs_scale, 0, idxs)
        ###
        #final_aff_m[:,:,0:2] =  final_aff_m[:,:,0:2] / self.init_sigma
        patches_small = self.extract_patches(scale_pyr,
                                             final_aff_m,
                                             final_pyr_idxs,
                                             final_level_idxs,
                                             PS=19,
                                             gauss_mask=False)
        ###

        base_A = Variable(
            torch.eye(2).unsqueeze(0).expand(final_pyr_idxs.size(0), 2, 2))
        if self.use_cuda:
            base_A = base_A.cuda()
        ### Estimate affine shape
        for i in range(self.num_Baum_iters):
            print i
            a, b, c, ratio_in_patch = self.AffShape(patches_small)
            base_A_new = self.ApplyAffine(base_A, a, b, c)
            l1, l2 = batch_eig2x2(base_A_new)
            ratio = torch.abs(l1 / (l2 + 1e-8))
            mask = (ratio <= 6.0) * (ratio >= 1. / 6.)
            #print mask.sum()
            mask = mask.unsqueeze(1).unsqueeze(1).float().expand(
                mask.size(0), 2, 2)
            base_A = base_A_new * mask + base_A * (1.0 - mask)
            #idxs_mask = mask.data.nonzero().view(-1)
            #base_A = base_A_new[idxs_mask,:,:]
            #final_aff_m = final_aff_m[idxs_mask, :, :]
            #final_pyr_idxs = final_pyr_idxs[idxs_mask]

            temp_final = torch.cat([
                torch.bmm(base_A, final_aff_m[:, :, :2]), final_aff_m[:, :, 2:]
            ],
                                   dim=2)
            if i != self.num_Baum_iters - 1:
                patches_small = self.extract_patches(scale_pyr,
                                                     temp_final,
                                                     final_pyr_idxs,
                                                     final_level_idxs,
                                                     PS=19,
                                                     gauss_mask=False)
            else:
                idxs_mask = torch.nonzero(
                    ((ratio <= 6.0) * (ratio >= 1. / 6.)).data).view(-1)
                temp_final = temp_final[idxs_mask, :, :]
                final_pyr_idxs = final_pyr_idxs[idxs_mask]
                final_level_idxs = final_level_idxs[idxs_mask]

        #
        if self.num_Baum_iters > 0:
            final_aff_m = temp_final
        #####
        #final_aff_m[:,:,0:2] = self.init_sigma * self.mrSize * final_aff_m[:,:,0:2]
        final_aff_m[:, :, 0:2] = self.mrSize * final_aff_m[:, :, 0:2]
        patches_small = self.extract_patches(scale_pyr,
                                             final_aff_m,
                                             final_pyr_idxs,
                                             final_level_idxs,
                                             PS=19,
                                             gauss_mask=False)
        ######
        ### Detect orientation
        for i in range(0):
            ori = self.OriDet(patches_small)
            #print np.degrees(ori.data.cpu().numpy().ravel()[1])
            #print final_aff_m[1,:,:]
            #print '*****'
            final_aff_m = self.rotateLAFs(final_aff_m, ori)
            #print final_aff_m[0,:,:]
            patches_small = self.extract_patches(scale_pyr,
                                                 final_aff_m,
                                                 final_pyr_idxs,
                                                 final_level_idxs,
                                                 PS=19,
                                                 gauss_mask=False)
        ###
        patches = self.extract_patches(scale_pyr,
                                       final_aff_m,
                                       final_pyr_idxs,
                                       final_level_idxs,
                                       PS=self.PS)
        return final_aff_m, patches, final_resp, scale_pyr