def pre_init_with_sift(trainPhotoTourDataset):

    patch_size = 65
    ON_GPU = True
    SIFT = SIFTNet(patch_size=patch_size, do_cuda=ON_GPU)
    SIFT.eval()

    if ON_GPU:
        SIFT.cuda()

    transformed = []
    for img in trainPhotoTourDataset.data:
        transformed.append(
            np.expand_dims(cv2.resize(img.cpu().numpy(), (65, 65)), axis=0))

    phototour_loader = data_utils.DataLoader(
        PhototourTrainingData(transformed), batch_size=256, shuffle=False)
    descriptors = []

    pbar = tqdm(enumerate(phototour_loader))

    for batch_idx, data_a in pbar:
        if ON_GPU:
            torch_patches = Variable(data_a.type(torch.FloatTensor).cuda(),
                                     volatile=True)
        else:
            torch_patches = Variable(data_a.type(torch.FloatTensor),
                                     volatile=True)

        res = SIFT(torch_patches)
        sift = np.round(512. * res.data.cpu().numpy()).astype(np.float32)
        descriptors.extend(sift)

    return np.array(descriptors)
示例#2
0
if args.cuda:
    cudnn.benchmark = True
    torch.cuda.manual_seed_all(args.seed)

# create loggin directory
if not os.path.exists(args.log_dir):
    os.makedirs(args.log_dir)

# set random seeds
torch.manual_seed(args.seed)
np.random.seed(args.seed)

if args.descriptor == 'SIFT':
    descriptor = SIFTNet(patch_size=32)
    if not args.no_cuda:
        descriptor = descriptor.cuda()
elif args.descriptor == 'HardNet':
    descriptor = HardNet()
    #descriptor = HardNetNarELU(SIFTNet(patch_size=32))
    if not args.no_cuda:
        descriptor = descriptor.cuda()
    model_weights = 'HardNet++.pth'
    #model_weights = 'HardNetELU_Narr.pth'
    hncheckpoint = torch.load(model_weights)
    descriptor.load_state_dict(hncheckpoint['state_dict'])
    descriptor.train()
else:
    descriptor = lambda x: x.view(x.size(0),-1)

suffix='ONet_' + args.merge + '_' + args.descriptor + '_' + str(args.lr) + '_' + str(args.n_pairs) 
##########################################3
示例#3
0
            im_path = os.path.join(base, t + '.png')
            im = cv2.imread(im_path, 0)
            self.N = im.shape[0] / 65
            setattr(self, t, np.split(im, self.N))


seqs = glob.glob(sys.argv[1] + '/*')
seqs = [os.path.abspath(p) for p in seqs]

descr_name_vl = 'pytorch-sift-vlfeat-' + str(OUT_W)
descr_name_mp = 'pytorch-sift-mp-' + str(OUT_W)

descr_names = [descr_name_vl, descr_name_mp]

model_vl = SIFTNet(OUT_W, mask_type='Gauss', sigma_type='vlfeat')
model_vl.cuda()
print(model_vl)

model_mp = SIFTNet(OUT_W, mask_type='CircularGauss', sigma_type='hesamp')
model_mp.cuda()
print(model_mp)

models = [model_vl, model_mp]

for seq_path in seqs:
    seq = hpatches_sequence(seq_path)
    for descr_id, descr_name in enumerate(descr_names):
        model = models[descr_id]
        path = os.path.join(descr_name, seq.name)
        if not os.path.exists(path):
            os.makedirs(path)
    patch_idxs = []
    patches = []
    current_patch_idx = start_patch_idx
    for y in range(0, height, patch_h):
        patch_idxs.append([])
        for x in range(0, width, patch_w):
            patch = np.array(img.crop(
                (x, y, x + patch_w, y + patch_h))).astype(np.uint8)
            patches.append(patch)
            patch_idxs[-1].append(current_patch_idx)
            current_patch_idx += 1
    return np2torch(np.array(patches)), patch_idxs, patch_idxs[-1][-1]


model = SIFTNet(patch_size=41)
model.cuda()
model.eval()

try:
    input_img_fname = sys.argv[1]
    output_fname = sys.argv[2]
except:
    print "Wrong input format. Try ./rank_scale_patches.py imgs/ref.png out.txt"
    sys.exit(1)

patches, idxs, max_idx = read_patch_file(input_img_fname,
                                         patch_w=73,
                                         patch_h=41)
bw_patches = patches.float().mean(dim=1, keepdim=True)
print bw_patches.shape
n_patches = bw_patches.size(0)