コード例 #1
0
def main():
    if not torch.cuda.is_available():
        print("CUDA is not available. Using CPU.")

    training_set_name = 'liberty'
    testing_set_names = [
        name for name in PairPhotoTour.NAMES if name != training_set_name
    ]
    batch_size = 1024
    data_path = 'data/sets/'
    model_path = 'data/models/'
    learning_rate = 10.0
    weight_decay = 1e-4
    momentum = 0.9
    dampening = 0.9
    margin = 1
    epochs = 10

    logger_console = LoggerConsole()
    training_loader_factory = __init_training_loader_factory(
        training_set_name, batch_size, data_path, logger_console, 10000)
    testing_loader_factories = __init_testing_loader_factories(
        testing_set_names, batch_size, data_path)
    optimizer_factory = SGDOptimizerFactory(learning_rate, weight_decay,
                                            momentum, dampening)
    loss_triplet_margin = LossHardNetTripletMargin(margin)
    hard_net = HardNet(HardNetModule(), model_path)

    experiment_tag = platform.uname()[1] + "_run_1"

    hard_net.train(training_loader_factory, testing_loader_factories,
                   optimizer_factory, loss_triplet_margin, epochs,
                   experiment_tag, logger_console, 10)

    return
コード例 #2
0
def test(model,epoch):
    torch.cuda.empty_cache()
    # switch to evaluate mode
    model.eval()
    from architectures import AffNetFast
    affnet = AffNetFast()
    model_weights = 'pretrained/AffNet.pth'
    hncheckpoint = torch.load(model_weights)
    affnet.load_state_dict(hncheckpoint['state_dict'])
    affnet.eval()
    detector = ScaleSpaceAffinePatchExtractor( mrSize = 5.192, num_features = 3000,
                                          border = 5, num_Baum_iters = 1, 
                                          AffNet = affnet, OriNet = model)
    descriptor = HardNet()
    model_weights = 'HardNet++.pth'
    hncheckpoint = torch.load(model_weights)
    descriptor.load_state_dict(hncheckpoint['state_dict'])
    descriptor.eval()
    if args.cuda:
        detector = detector.cuda()
        descriptor = descriptor.cuda()
    input_img_fname1 = 'test-graf/img1.png'#sys.argv[1]
    input_img_fname2 = 'test-graf/img6.png'#sys.argv[1]
    H_fname = 'test-graf/H1to6p'#sys.argv[1]
    output_img_fname = 'graf_match.png'#sys.argv[3]
    img1 = load_grayscale_var(input_img_fname1)
    img2 = load_grayscale_var(input_img_fname2)
    H = np.loadtxt(H_fname)    
    H1to2 = Variable(torch.from_numpy(H).float())
    SNN_threshold = 0.8
    with torch.no_grad():
        LAFs1, descriptors1 = get_geometry_and_descriptors(img1, detector, descriptor)
        torch.cuda.empty_cache()
        LAFs2, descriptors2 = get_geometry_and_descriptors(img2, detector, descriptor)
        visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAFs1.detach().cpu().numpy().squeeze(), 'b', show = False, save_to = LOG_DIR + "/detections1_" + str(epoch) + '.png')
        visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAFs2.detach().cpu().numpy().squeeze(), 'g', show = False, save_to = LOG_DIR + "/detection2_" + str(epoch) + '.png')
        dist_matrix = distance_matrix_vector(descriptors1, descriptors2)
        min_dist, idxs_in_2 = torch.min(dist_matrix,1)
        dist_matrix[:,idxs_in_2] = 100000;# mask out nearest neighbour to find second nearest
        min_2nd_dist, idxs_2nd_in_2 = torch.min(dist_matrix,1)
        mask = (min_dist / (min_2nd_dist + 1e-8)) <= SNN_threshold
        tent_matches_in_1 = indxs_in1 = torch.autograd.Variable(torch.arange(0, idxs_in_2.size(0)), requires_grad = False).cuda()[mask]
        tent_matches_in_2 = idxs_in_2[mask]
        tent_matches_in_1 = tent_matches_in_1.long()
        tent_matches_in_2 = tent_matches_in_2.long()
        LAF1s_tent = LAFs1[tent_matches_in_1,:,:]
        LAF2s_tent = LAFs2[tent_matches_in_2,:,:]
        min_dist, plain_indxs_in1, idxs_in_2 = get_GT_correspondence_indexes(LAF1s_tent, LAF2s_tent,H1to2.cuda(), dist_threshold = 6) 
        plain_indxs_in1 = plain_indxs_in1.long()
        inl_ratio = float(plain_indxs_in1.size(0)) / float(tent_matches_in_1.size(0))
        print 'Test epoch', str(epoch) 
        print 'Test on graf1-6,', tent_matches_in_1.size(0), 'tentatives', plain_indxs_in1.size(0), 'true matches', str(inl_ratio)[:5], ' inl.ratio'
        visualize_LAFs(img1.detach().cpu().numpy().squeeze(), LAF1s_tent[plain_indxs_in1.long(),:,:].detach().cpu().numpy().squeeze(), 'g', show = False, save_to = LOG_DIR + "/inliers1_" + str(epoch) + '.png')
        visualize_LAFs(img2.detach().cpu().numpy().squeeze(), LAF2s_tent[idxs_in_2.long(),:,:].detach().cpu().numpy().squeeze(), 'g', show = False, save_to = LOG_DIR + "/inliers2_" + str(epoch) + '.png')
    return
コード例 #3
0
ファイル: train_ONet.py プロジェクト: toanhvu/affnet
    torch.cuda.manual_seed_all(args.seed)

# create loggin directory
if not os.path.exists(args.log_dir):
    os.makedirs(args.log_dir)

# set random seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)

if args.descriptor == 'SIFT':
    descriptor = SIFTNet(patch_size=32)
    if not args.no_cuda:
        descriptor = descriptor.cuda()
elif args.descriptor == 'HardNet':
    descriptor = HardNet()
    #descriptor = HardNetNarELU(SIFTNet(patch_size=32))
    if not args.no_cuda:
        descriptor = descriptor.cuda()
    model_weights = 'HardNet++.pth'
    #model_weights = 'HardNetELU_Narr.pth'
    hncheckpoint = torch.load(model_weights)
    descriptor.load_state_dict(hncheckpoint['state_dict'])
    descriptor.train()
else:
    descriptor = lambda x: x.view(x.size(0),-1)

suffix='ONet_' + args.merge + '_' + args.descriptor + '_' + str(args.lr) + '_' + str(args.n_pairs) 
##########################################3
def create_loaders():
コード例 #4
0
import torch.backends.cudnn as cudnn
import torch.optim as optim
from tqdm import tqdm
import math
import torch.nn.functional as F

USE_CUDA = False
from copy import deepcopy

LOG_DIR = 'log_snaps'
BASE_LR = 0.00000001
from SpatialTransformer2D import SpatialTransformer2d
from HardNet import HardNet
from Utils import CircularGaussKernel

hardnet = HardNet()
checkpoint = torch.load('HardNetLib.pth')
hardnet.load_state_dict(checkpoint['state_dict'])


def line_prepender(filename, line):
    with open(filename, 'r+') as f:
        content = f.read()
        f.seek(0, 0)
        f.write(line.rstrip('\r\n') + '\n' + content)


try:
    input_img_fname = sys.argv[1]
    output_fname = sys.argv[2]
except:
コード例 #5
0
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.optim as optim
from tqdm import tqdm
import math
import torch.nn.functional as F
USE_CUDA = False
from copy import deepcopy

LOG_DIR = 'log_snaps'
BASE_LR = 0.00000001
from SpatialTransformer2D import SpatialTransformer2d
from HardNet import HardNet
from Utils import CircularGaussKernel

hardnet = HardNet()
#checkpoint = torch.load('HardNetLib.pth')
#hardnet.load_state_dict(checkpoint['state_dict'])

def line_prepender(filename, line):
    with open(filename, 'r+') as f:
        content = f.read()
        f.seek(0, 0)
        f.write(line.rstrip('\r\n') + '\n' + content)
try:
    input_img_fname = sys.argv[1]
    output_fname = sys.argv[2]
except:
    print "Wrong input format. Try ./extract_hardnet_desc_from_hpatches_file.py imgs/ref.png out.txt"
    sys.exit(1)
コード例 #6
0
from HardNet import HardNet

# USE_CUDA = False
USE_CUDA = torch.cuda.is_available()
WRITE_IMGS_DEBUG = False

AffNetPix = AffNetFast(PS=32)
weightd_fname = 'hesaffnet/pretrained/AffNet.pth'
if USE_CUDA:
    checkpoint = torch.load(weightd_fname, map_location='cuda:0')
else:
    checkpoint = torch.load(weightd_fname, map_location='cpu')
AffNetPix.load_state_dict(checkpoint['state_dict'])
AffNetPix.eval()

HardNetDescriptor = HardNet()
model_weights = 'hesaffnet/pretrained/HardNet++.pth'
if USE_CUDA:
    hncheckpoint = torch.load(model_weights, map_location='cuda:0')
else:
    hncheckpoint = torch.load(model_weights, map_location='cpu')
HardNetDescriptor.load_state_dict(hncheckpoint['state_dict'])
HardNetDescriptor.eval()

if USE_CUDA:
    AffNetPix = AffNetPix.cuda()
    HardNetDescriptor = HardNetDescriptor.cuda()

from library import *
import cv2
コード例 #7
0
                                        volatile=True)
    var_image_reshape = var_image.view(1, 1, var_image.size(0),
                                       var_image.size(1))
    if False:
        var_image_reshape = var_image_reshape.cuda()
    return var_image_reshape


from HardNet import HardNet, HardTFeatNet, L2Norm
from pytorch_sift import SIFTNet
import math

d1 = lambda x: L2Norm()(x.view(x.size(0), -1) - x.view(x.size(0), -1).mean(
    dim=1, keepdim=True).expand(x.size(0),
                                x.size(1) * x.size(2) * x.size(3)).detach())
d2 = HardNet()
model_weights = '../../HardNet++.pth'
hncheckpoint = torch.load(model_weights)
d2.load_state_dict(hncheckpoint['state_dict'])
d2.eval()
d3 = SIFTNet(patch_size=32)

model_weights = 'HardTFeat.pth'
d4 = HardTFeatNet(sm=SIFTNet(patch_size=32))
checkpoint = torch.load(model_weights)
d4.load_state_dict(checkpoint['state_dict'])
d4 = nn.Sequential(d4, L2Norm())

desc_list = [d1, d2, d3, d4]
desc_names = ['Pixels', 'HardNet', 'SIFT', 'TFeat']
USE_CUDA = False
コード例 #8
0
model_path = 'data/models/'

training_set_name = 'liberty'
testing_set_names = [name for name in PairPhotoTour.NAMES if name != training_set_name]

testing_loader_factories = __init_testing_loader_factories(testing_set_names, batch_size, data_path)

checkpoints = glob.glob("./data/models/*/*9.pth", recursive=True)

best_fpr_norm = 100000
best_fprs = []
best_checkpoint = ""

for checkpoint_path in checkpoints:
    print("Running {}".format(checkpoint_path))
    hnet = HardNet(HardNetModule(), "")
    hnet.load_checkpoint(checkpoint_path)
    fprs = np.array(hnet.get_fprs(testing_loader_factories))
    this_fpr_norm = np.sum(fprs ** 2) ** .5
    print("FPRs for {}: {}, {}".format(checkpoint_path, fprs[0], fprs[1]))
    if this_fpr_norm < best_fpr_norm:
        best_fpr_norm = this_fpr_norm
        best_fprs = fprs
        best_checkpoint = checkpoint_path

print("Best checkpoint: {}".format(best_checkpoint))

for i in range(len(testing_set_names)):
    name = testing_set_names[i]
    fpr = best_fprs[i]
    print("Best FPR for {}: {}".format(name, fpr))
コード例 #9
0
import math
from HardNet import HardNet
from PIL import Image
import pandas as pd
from wiswUtils import (read_circle_patches,
                       crop_round_patches,
                       rotate_circle_patches,
                       resize_patches,
                       describe_with_default_ori)

if __name__ == '__main__':
    DO_CUDA=True
    model_weights = 'pretrained/HardNet++.pth'
    INPUT_DATA_DIR = 'input_data/'
    OUT_DIR = 'aux_data/plain_hardnet'
    model = HardNet()
    checkpoint = torch.load(model_weights)
    model.load_state_dict(checkpoint['state_dict'])
    model.eval()
    if DO_CUDA:
        model = model.cuda()
        print('Extracting on GPU')
    else:
        print('Extracting on CPU')
        model = model.cpu()
    
    fnames = sorted([f for f in os.listdir(os.path.join(INPUT_DATA_DIR))  if f.endswith('csv') ])
    patches_fnames = [os.path.join(INPUT_DATA_DIR, f) for f in fnames if 'patches' in f]
    ori_fnames = [os.path.join(INPUT_DATA_DIR, f) for f  in fnames if 'ori' in f]
    if not os.path.isdir(OUT_DIR):
        os.makedirs(OUT_DIR)
コード例 #10
0
def main():
    img1_path = "/home/logan/Documents/classes/computer-vision/lembke_project_2/images/bikes/img1.ppm"
    img2_path = "/home/logan/Documents/classes/computer-vision/lembke_project_2/images/bikes/img4.ppm"

    img1 = cv2.cvtColor(cv2.imread(img1_path), cv2.COLOR_BGR2GRAY)
    img2 = cv2.cvtColor(cv2.imread(img2_path), cv2.COLOR_BGR2GRAY)

    # Initiate SIFT detector
    orb = cv2.ORB_create()

    # find the keypoints and descriptors with SIFT
    key_points1, orb_desc1 = orb.detectAndCompute(img1, None)
    key_points2, orb_desc2 = orb.detectAndCompute(img2, None)

    img1_patches = []
    for point in map(lambda kp: kp.pt, key_points1):
        x_min = int(point[0] - 16)
        x_max = int(point[0] + 16)
        y_min = int(point[1] - 16)
        y_max = int(point[1] + 16)

        img1_patches.append(img1[y_min:y_max, x_min:x_max])

    img2_patches = []
    for point in map(lambda kp: kp.pt, key_points2):
        x_min = int(point[0] - 16)
        x_max = int(point[0] + 16)
        y_min = int(point[1] - 16)
        y_max = int(point[1] + 16)

        img2_patches.append(img2[y_min:y_max, x_min:x_max])

    img1_patch_tensor = torch.FloatTensor(
        np.expand_dims(np.stack(img1_patches), axis=1))
    img2_patch_tensor = torch.FloatTensor(
        np.expand_dims(np.stack(img1_patches), axis=1))

    hardNet = HardNet(HardNetModule(), "")
    hardNet.load_checkpoint("./data/models/linux13_run_1/checkpoint_9.pth")
    hn_desc1 = hardNet.create_descriptors(img1_patch_tensor).numpy()
    hn_desc2 = hardNet.create_descriptors(img2_patch_tensor).numpy()

    orb_matcher = cv2.BFMatcher(normType=cv2.NORM_HAMMING, crossCheck=True)
    hn_matcher = cv2.BFMatcher(normType=cv2.NORM_L2, crossCheck=True)

    orb_matches = orb_matcher.match(orb_desc1, orb_desc2)
    hn_matches = hn_matcher.match(hn_desc1, hn_desc2)

    orb_matches = sorted(orb_matches, key=lambda x: x.distance)[:100]
    hn_matches = sorted(hn_matches, key=lambda x: x.distance)[:100]

    orb_img = cv2.drawMatches(img1,
                              key_points1,
                              img2,
                              key_points2,
                              orb_matches,
                              None,
                              flags=2)

    hn_img = cv2.drawMatches(img1,
                             key_points1,
                             img2,
                             key_points2,
                             hn_matches,
                             None,
                             flags=2)

    cv2.imshow("ORB Matches", orb_img)
    cv2.imshow("HardNet Matches", hn_img)
    cv2.waitKey()