示例#1
0
def HessAff_Detect(img, PatchSize=60, Nfeatures=500):
    var_image = torch.autograd.Variable(torch.from_numpy(img.astype(
        np.float32)),
                                        volatile=True)
    var_image_reshape = var_image.view(1, 1, var_image.size(0),
                                       var_image.size(1))
    HessianAffine = ScaleSpaceAffinePatchExtractor(mrSize=5.192,
                                                   num_features=Nfeatures,
                                                   border=PatchSize / 2,
                                                   num_Baum_iters=1)
    # if USE_CUDA:
    #     HessianAffine = HessianAffine.cuda()
    #     var_image_reshape = var_image_reshape.cuda()

    with torch.no_grad():
        LAFs, responses = HessianAffine(var_image_reshape, do_ori=True)
        patches = HessianAffine.extract_patches_from_pyr(LAFs,
                                                         PS=PatchSize).cpu()

    # these are my affine maps to work with
    Alist = convertLAFs_to_A23format(LAFs).cpu().numpy().astype(np.float32)
    KPlist = [
        cv2.KeyPoint(x=A[0, 2],
                     y=A[1, 2],
                     _size=10,
                     _angle=0.0,
                     _response=1,
                     _octave=packSIFTOctave(0, 0),
                     _class_id=1) for A in Alist
    ]
    return KPlist, np.array(patches), Alist, responses.cpu()
def test(model,epoch):
    torch.cuda.empty_cache()
    # switch to evaluate mode
    model.eval()
    from architectures import AffNetFast
    affnet = AffNetFast()
    model_weights = 'pretrained/AffNet.pth'
    hncheckpoint = torch.load(model_weights)
    affnet.load_state_dict(hncheckpoint['state_dict'])
    affnet.eval()
    detector = ScaleSpaceAffinePatchExtractor( mrSize = 5.192, num_features = 3000,
                                          border = 5, num_Baum_iters = 1, 
                                          AffNet = affnet, OriNet = model)
    descriptor = HardNet()
    model_weights = 'HardNet++.pth'
    hncheckpoint = torch.load(model_weights)
    descriptor.load_state_dict(hncheckpoint['state_dict'])
    descriptor.eval()
    if args.cuda:
        detector = detector.cuda()
        descriptor = descriptor.cuda()
    input_img_fname1 = 'test-graf/img1.png'#sys.argv[1]
    input_img_fname2 = 'test-graf/img6.png'#sys.argv[1]
    H_fname = 'test-graf/H1to6p'#sys.argv[1]
    output_img_fname = 'graf_match.png'#sys.argv[3]
    img1 = load_grayscale_var(input_img_fname1)
    img2 = load_grayscale_var(input_img_fname2)
    H = np.loadtxt(H_fname)    
    H1to2 = Variable(torch.from_numpy(H).float())
    SNN_threshold = 0.8
    with torch.no_grad():
        LAFs1, descriptors1 = get_geometry_and_descriptors(img1, detector, descriptor)
        torch.cuda.empty_cache()
        LAFs2, descriptors2 = get_geometry_and_descriptors(img2, detector, descriptor)
        visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAFs1.detach().cpu().numpy().squeeze(), 'b', show = False, save_to = LOG_DIR + "/detections1_" + str(epoch) + '.png')
        visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAFs2.detach().cpu().numpy().squeeze(), 'g', show = False, save_to = LOG_DIR + "/detection2_" + str(epoch) + '.png')
        dist_matrix = distance_matrix_vector(descriptors1, descriptors2)
        min_dist, idxs_in_2 = torch.min(dist_matrix,1)
        dist_matrix[:,idxs_in_2] = 100000;# mask out nearest neighbour to find second nearest
        min_2nd_dist, idxs_2nd_in_2 = torch.min(dist_matrix,1)
        mask = (min_dist / (min_2nd_dist + 1e-8)) <= SNN_threshold
        tent_matches_in_1 = indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad = False).cuda()[mask]
        tent_matches_in_2 = idxs_in_2[mask]
        tent_matches_in_1 = tent_matches_in_1.long()
        tent_matches_in_2 = tent_matches_in_2.long()
        LAF1s_tent = LAFs1[tent_matches_in_1,:,:]
        LAF2s_tent = LAFs2[tent_matches_in_2,:,:]
        min_dist, plain_indxs_in1, idxs_in_2 = get_GT_correspondence_indexes(LAF1s_tent, LAF2s_tent,H1to2.cuda(), dist_threshold = 6) 
        plain_indxs_in1 = plain_indxs_in1.long()
        inl_ratio = float(plain_indxs_in1.size(0)) / float(tent_matches_in_1.size(0))
        print 'Test epoch', str(epoch) 
        print 'Test on graf1-6,', tent_matches_in_1.size(0), 'tentatives', plain_indxs_in1.size(0), 'true matches', str(inl_ratio)[:5], ' inl.ratio'
        visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAF1s_tent[plain_indxs_in1.long(),:,:].detach().cpu().numpy().squeeze(), 'g', show = False, save_to = LOG_DIR + "/inliers1_" + str(epoch) + '.png')
        visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAF2s_tent[idxs_in_2.long(),:,:].detach().cpu().numpy().squeeze(), 'g', show = False, save_to = LOG_DIR + "/inliers2_" + str(epoch) + '.png')
    return
示例#3
0
def AffNetHardNet_describeFromKeys(img_np, KPlist):
    img = torch.autograd.Variable(torch.from_numpy(img_np.astype(np.float32)),
                                  volatile=True)
    img = img.view(1, 1, img.size(0), img.size(1))
    HessianAffine = ScaleSpaceAffinePatchExtractor(mrSize=5.192,
                                                   num_features=0,
                                                   border=0,
                                                   num_Baum_iters=0)
    if USE_CUDA:
        HessianAffine = HessianAffine.cuda()
        img = img.cuda()
    with torch.no_grad():
        HessianAffine.createScaleSpace(
            img)  # to generate scale pyramids and stuff
    descriptors = []
    Alist = []
    n = 0
    # for patch_np in patches:
    for kp in KPlist:
        x, y = np.float32(kp.pt)
        LAFs = normalizeLAFs(
            torch.tensor([[AffNetPix.PS / 2, 0, x], [0, AffNetPix.PS / 2,
                                                     y]]).reshape(1, 2, 3),
            img.size(3), img.size(2))
        with torch.no_grad():
            patch = HessianAffine.extract_patches_from_pyr(denormalizeLAFs(
                LAFs, img.size(3), img.size(2)),
                                                           PS=AffNetPix.PS)
        if WRITE_IMGS_DEBUG:
            SaveImageWithKeys(patch.detach().cpu().numpy().reshape([32, 32]),
                              [], 'p2/' + str(n) + '.png')
        if USE_CUDA:
            # or ---> A = AffNetPix(subpatches.cuda()).cpu()
            with torch.no_grad():
                A = batched_forward(AffNetPix, patch.cuda(), 256).cpu()
        else:
            with torch.no_grad():
                A = AffNetPix(patch)
        new_LAFs = torch.cat([torch.bmm(A, LAFs[:, :, 0:2]), LAFs[:, :, 2:]],
                             dim=2)
        dLAFs = denormalizeLAFs(new_LAFs, img.size(3), img.size(2))
        with torch.no_grad():
            patchaff = HessianAffine.extract_patches_from_pyr(dLAFs, PS=32)
            if WRITE_IMGS_DEBUG:
                SaveImageWithKeys(
                    patchaff.detach().cpu().numpy().reshape([32, 32]), [],
                    'p1/' + str(n) + '.png')
                SaveImageWithKeys(img_np, [kp], 'im1/' + str(n) + '.png')
            descriptors.append(
                HardNetDescriptor(patchaff).cpu().numpy().astype(np.float32))
            Alist.append(
                convertLAFs_to_A23format(LAFs.detach().cpu().numpy().astype(
                    np.float32)))
    n = n + 1
    return descriptors, Alist
            print 'skip'
            continue
        loss = fro_dists.mean()
        total_feats += fro_dists.size(0)
        total_loss += loss.data.cpu().numpy()[0]
        print 'test img', batch_idx, loss.data.cpu().numpy(
        )[0], fro_dists.size(0)
    print 'Total loss:', total_loss / float(batch_idx + 1), 'features', float(
        total_feats) / float(batch_idx + 1)


train_loader, test_loader = create_loaders()

HA = ScaleSpaceAffinePatchExtractor(mrSize=3.0,
                                    num_features=350,
                                    border=5,
                                    num_Baum_iters=1,
                                    AffNet=BaumResNet())

model = HA
if USE_CUDA:
    model = model.cuda()

optimizer1 = create_optimizer(model.AffNet.features, BASE_LR, 5e-5)

test(test_loader, model, cuda=USE_CUDA)
for epoch in range(n_epochs):
    print 'epoch', epoch
    if USE_CUDA:
        model = model.cuda()
    train(train_loader, model, optimizer1, epoch, cuda=USE_CUDA)
示例#5
0
            continue
        loss = fro_dists.mean()
        total_feats += fro_dists.size(0)
        total_loss += loss.data.cpu().numpy()[0]
        print 'test img', batch_idx, loss.data.cpu().numpy(
        )[0], fro_dists.size(0)
    print 'Total loss:', total_loss / float(batch_idx + 1), 'features', float(
        total_feats) / float(batch_idx + 1)
    model.num = model_num_feats


train_loader, test_loader = create_loaders()

HA = ScaleSpaceAffinePatchExtractor(mrSize=5.192,
                                    num_features=350,
                                    border=5,
                                    num_Baum_iters=2,
                                    AffNet=BaumNet())

model = HA
if USE_CUDA:
    model = model.cuda()

optimizer1 = create_optimizer(model.AffNet.features, BASE_LR, 5e-5)

#test(test_loader, model, cuda = USE_CUDA)
for epoch in range(n_epochs):
    print 'epoch', epoch
    if USE_CUDA:
        model = model.cuda()
    train(train_loader, model, optimizer1, epoch, cuda=USE_CUDA)
示例#6
0
        LAFs1, aff_norm_patches1, resp1, pyr1 = HA(img1 / 255.)
        LAFs2, aff_norm_patches2, resp2, pyr2 = HA(img2 / 255.)
        if (len(LAFs1) == 0) or (len(LAFs2) == 0):
            continue
        fro_dists, idxs_in1, idxs_in2 = get_GT_correspondence_indexes_Fro(LAFs1, LAFs2, H, dist_threshold = 10, use_cuda = cuda);
        if  len(fro_dists.size()) == 0:
            print 'skip'
            continue
        loss = fro_dists.mean()
        total_loss += loss.data.cpu().numpy()[0]
        print 'test img', batch_idx, loss.data.cpu().numpy()[0]
    print 'Total loss:', total_loss / float(batch_idx+1)

train_loader, test_loader = create_loaders()

HA = ScaleSpaceAffinePatchExtractor( mrSize = 5.0, num_features = 3000, border = 1, num_Baum_iters = 5, AffNet = BaumNet())


model = HA
if USE_CUDA:
    model = model.cuda()

optimizer1 = create_optimizer(model.AffShape, BASE_LR, 5e-5)


start = 0
end = 100
for epoch in range(start, end):
    print 'epoch', epoch
    if USE_CUDA:
        model = model.cuda()
示例#7
0
img = np.mean(np.array(img), axis=2)

var_image = torch.autograd.Variable(torch.from_numpy(img.astype(np.float32)),
                                    volatile=True)
var_image_reshape = var_image.view(1, 1, var_image.size(0), var_image.size(1))

AffNetPix = AffNetFast(PS=32)
weightd_fname = '../../pretrained/AffNet.pth'

checkpoint = torch.load(weightd_fname)
AffNetPix.load_state_dict(checkpoint['state_dict'])

AffNetPix.eval()

HA = ScaleSpaceAffinePatchExtractor(mrSize=5.192,
                                    num_features=nfeats,
                                    border=5,
                                    num_Baum_iters=1,
                                    th=th,
                                    AffNet=AffNetPix)
if USE_CUDA:
    HA = HA.cuda()
    var_image_reshape = var_image_reshape.cuda()
with torch.no_grad():
    LAFs, resp = HA(var_image_reshape)
ells = LAFs2ell(LAFs.data.cpu().numpy())

np.savetxt(output_fname, ells, delimiter=' ', fmt='%10.10f')
line_prepender(output_fname, str(len(ells)))
line_prepender(output_fname, '1.0')
示例#8
0
def AffNetHardNet_describe(patches):
    descriptors = np.zeros(shape=[patches.shape[0], 128], dtype=np.float32)
    HessianAffine = []
    subpatches = torch.autograd.Variable(torch.zeros([len(patches), 1, 32, 32],
                                                     dtype=torch.float32),
                                         volatile=True).view(
                                             len(patches), 1, 32, 32)
    baseLAFs = torch.zeros([len(patches), 2, 3], dtype=torch.float32)
    for m in range(patches.shape[0]):
        patch_np = patches[m, :, :, 0].reshape(np.shape(patches)[1:3])
        HessianAffine.append(
            ScaleSpaceAffinePatchExtractor(mrSize=5.192,
                                           num_features=0,
                                           border=0,
                                           num_Baum_iters=0))
        with torch.no_grad():
            var_image = torch.autograd.Variable(torch.from_numpy(
                patch_np.astype(np.float32)),
                                                volatile=True)
            patch = var_image.view(1, 1, var_image.size(0), var_image.size(1))
        with torch.no_grad():
            HessianAffine[m].createScaleSpace(
                patch)  # to generate scale pyramids and stuff
        x, y = patch.size(3) / 2.0 + 2, patch.size(2) / 2.0 + 2
        LAFs = normalizeLAFs(
            torch.tensor([[AffNetPix.PS / 2, 0, x], [0, AffNetPix.PS / 2,
                                                     y]]).reshape(1, 2, 3),
            patch.size(3), patch.size(2))
        baseLAFs[m, :, :] = LAFs
        with torch.no_grad():
            subpatch = HessianAffine[m].extract_patches_from_pyr(
                denormalizeLAFs(LAFs, patch.size(3), patch.size(2)),
                PS=AffNetPix.PS)
            if WRITE_IMGS_DEBUG:
                SaveImageWithKeys(
                    subpatch.detach().cpu().numpy().reshape([32, 32]), [],
                    'p1/' + str(n) + '.png')
            # This subpatch has been blured by extract_patches _from_pyr...
            # let't us crop it manually to obtain fair results agains other methods
            subpatch = patch_np[16:48, 16:48].reshape(1, 1, 32, 32)
            #var_image = torch.autograd.Variable(torch.from_numpy(subpatch.astype(np.float32)), volatile = True)
            #subpatch = var_image.view(1, 1, 32,32)
            subpatches[m, :, :, :] = torch.from_numpy(
                subpatch.astype(np.float32))  #=subpatch
            if WRITE_IMGS_DEBUG:
                SaveImageWithKeys(
                    subpatch.detach().cpu().numpy().reshape([32, 32]), [],
                    'p2/' + str(n) + '.png')
    if USE_CUDA:
        # or ---> A = AffNetPix(subpatches.cuda()).cpu()
        with torch.no_grad():
            A = batched_forward(AffNetPix, subpatches.cuda(), 256).cpu()
    else:
        with torch.no_grad():
            A = AffNetPix(subpatches)
    LAFs = torch.cat([torch.bmm(A, baseLAFs[:, :, 0:2]), baseLAFs[:, :, 2:]],
                     dim=2)
    dLAFs = denormalizeLAFs(LAFs, patch.size(3), patch.size(2))
    Alist = convertLAFs_to_A23format(dLAFs.detach().cpu().numpy().astype(
        np.float32))
    for m in range(patches.shape[0]):
        with torch.no_grad():
            patchaff = HessianAffine[m].extract_patches_from_pyr(
                dLAFs[m, :, :].reshape(1, 2, 3), PS=32)
            if WRITE_IMGS_DEBUG:
                SaveImageWithKeys(
                    patchaff.detach().cpu().numpy().reshape([32, 32]), [],
                    'im1/' + str(n) + '.png')
                SaveImageWithKeys(patch_np, [], 'im2/' + str(n) + '.png')
            subpatches[m, :, :, :] = patchaff
    if USE_CUDA:
        with torch.no_grad():
            # descriptors = HardNetDescriptor(subpatches.cuda()).detach().cpu().numpy().astype(np.float32)
            descriptors = batched_forward(HardNetDescriptor, subpatches.cuda(),
                                          256).cpu().numpy().astype(np.float32)
    else:
        with torch.no_grad():
            descriptors = HardNetDescriptor(
                subpatches).detach().cpu().numpy().astype(np.float32)
    return descriptors, Alist
示例#9
0
hncheckpoint = torch.load(model_weights)
d2.load_state_dict(hncheckpoint['state_dict'])
d2.eval()
d3 = SIFTNet(patch_size=32)

model_weights = 'HardTFeat.pth'
d4 = HardTFeatNet(sm=SIFTNet(patch_size=32))
checkpoint = torch.load(model_weights)
d4.load_state_dict(checkpoint['state_dict'])
d4 = nn.Sequential(d4, L2Norm())

desc_list = [d1, d2, d3, d4]
desc_names = ['Pixels', 'HardNet', 'SIFT', 'TFeat']
USE_CUDA = False
detector = ScaleSpaceAffinePatchExtractor(mrSize=5.12,
                                          num_features=200,
                                          border=32,
                                          num_Baum_iters=0)
descriptor = HardNet()
model_weights = '../../HardNet++.pth'
hncheckpoint = torch.load(model_weights)
descriptor.load_state_dict(hncheckpoint['state_dict'])
descriptor.eval()
if USE_CUDA:
    detector = detector.cuda()
    descriptor = descriptor.cuda()


def get_geometry(img, det):
    with torch.no_grad():
        LAFs, resp = det(img)
    return LAFs  #, descriptors
        fro_dists, idxs_in1, idxs_in2 = get_GT_correspondence_indexes_Fro(
            LAFs1, LAFs2, H1to2, dist_threshold=10, use_cuda=cuda)
        if len(fro_dists.size()) == 0:
            print 'skip'
            continue
        loss = fro_dists.mean()
        total_loss += loss.data.cpu().numpy()[0]
        print 'test img', batch_idx, loss.data.cpu().numpy()[0]
    print 'Total loss:', total_loss / float(batch_idx + 1)


train_loader, test_loader = create_loaders()

HA = ScaleSpaceAffinePatchExtractor(mrSize=3.0,
                                    num_features=500,
                                    border=5,
                                    num_Baum_iters=1,
                                    AffNet=BaumNet(),
                                    use_cuda=USE_CUDA)

model = HA
if USE_CUDA:
    model = model.cuda()

optimizer1 = create_optimizer(model.AffNet, BASE_LR, 5e-5)

start = 0
end = 100
for epoch in range(start, end):
    print 'epoch', epoch
    if USE_CUDA:
        model = model.cuda()
示例#11
0
            H1to2 = H1to2.cpu()
            model = model.cuda()
        if (len(LAFs1) == 0) or (len(LAFs2) == 0):
            continue
        fro_dists, idxs_in1, idxs_in2 = get_GT_correspondence_indexes_Fro_and_center(LAFs1,LAFs2, H1to2, 
                                                                                     dist_threshold = 3., 
                                                                                      scale_diff_coef = 0.4, 
                                                                             center_dist_th = 7.0,
                                                                            skip_center_in_Fro = True,
                                                                            do_up_is_up = True);
        if  len(fro_dists.size()) == 0:
            print 'skip'
            continue
        loss = fro_dists.mean()
        total_loss += loss.data.cpu().numpy()[0]
        total_feats += fro_dists.size(0)
        print 'test img', batch_idx, loss.data.cpu().numpy()[0], fro_dists.size(0)
    print 'Total loss:', total_loss / float(batch_idx+1), 'features', float(total_feats) / float(batch_idx+1)

train_loader, test_loader = create_loaders()

HA = ScaleSpaceAffinePatchExtractor( mrSize = 5.192, num_features = 1500, border = 5, num_Baum_iters = 0)


model = HA
if USE_CUDA:
    model = model.cuda()

test(test_loader, model, cuda = USE_CUDA)
sys.exit(0)
示例#12
0
    input_img_fname = sys.argv[1]
    output_fname = sys.argv[2]
    nfeats = int(sys.argv[3])
except:
    print "Wrong input format. Try python hesaffBaum.py imgs/cat.png cat.txt 2000"
    sys.exit(1)

img = Image.open(input_img_fname).convert('RGB')
img = np.mean(np.array(img), axis=2)

var_image = torch.autograd.Variable(torch.from_numpy(img.astype(np.float32)),
                                    volatile=True)
var_image_reshape = var_image.view(1, 1, var_image.size(0), var_image.size(1))

HA = ScaleSpaceAffinePatchExtractor(mrSize=5.192,
                                    num_features=nfeats,
                                    border=5,
                                    num_Baum_iters=16,
                                    threshold=th,
                                    AffNet=AffineShapeEstimator(patch_size=19))
if USE_CUDA:
    HA = HA.cuda()
    var_image_reshape = var_image_reshape.cuda()

LAFs, resp = HA(var_image_reshape)
ells = LAFs2ell(LAFs.data.cpu().numpy())

np.savetxt(output_fname, ells, delimiter=' ', fmt='%10.10f')
line_prepender(output_fname, str(len(ells)))
line_prepender(output_fname, '1.0')
示例#13
0
                                                                            skip_center_in_Fro = True,
                                                                            do_up_is_up = False,
                                                                            inv_to_eye = False);
        if  len(fro_dists.size()) == 0:
            print 'skip'
            continue
        loss = fro_dists.mean()
        total_feats += fro_dists.size(0)
        total_loss += loss.data.cpu().numpy()[0]
        print 'test img', batch_idx, loss.data.cpu().numpy()[0], fro_dists.size(0)
    print 'Total loss:', total_loss / float(batch_idx+1), 'features', float(total_feats) / float(batch_idx+1)
    model.num = model_num_feats

train_loader, test_loader = create_loaders()

HA = ScaleSpaceAffinePatchExtractor( mrSize = 5.192, num_features = 350, border = 5, num_Baum_iters = 1, AffNet = LAFNet())
from pytorch_sift import SIFTNet
SIFT = SIFTNet(patch_size = 32)


if USE_CUDA:
    HA = HA.cuda()

optimizer1 = create_optimizer(HA.AffNet.features, BASE_LR, 1e-4)

#test(test_loader, model, cuda = USE_CUDA)
for epoch in range(n_epochs):
    print 'epoch', epoch
    train(train_loader, HA, optimizer1, epoch, cuda = USE_CUDA)
    test(test_loader, HA, cuda = USE_CUDA)