ref_im = scene_list[j].strip() query_im = scene_list[0].strip() scene_dir = ref_im.split('/') scene_dir = '/'.join(scene_dir[:-1]) # completely define various path query_im = os.path.join(args.root, query_im) query_im_ = query_im ref_im = os.path.join(args.root, ref_im) scene_dir = os.path.join(args.root, scene_dir) H_file = os.path.join(args.root, scene_dir, 'H_1_%s' % str(j + 1)) query_im = imreadth(query_im) hA, wA = query_im.shape[-2:] query_im = resize(normalize(query_im), args.image_size, scale_factor) hA_, wA_ = query_im.shape[-2:] ref_im = imreadth(ref_im) hB, wB = ref_im.shape[-2:] ref_im = resize(normalize(ref_im), args.image_size, scale_factor) hB_, wB_ = ref_im.shape[-2:] # create batch batch = {} batch['source_image'] = query_im.cuda() batch['target_image'] = ref_im.cuda() start = time.time() matches, score, _ = matcher(batch,
pair_names_chunk = pair_names_split[args.chunk_idx] pair_names_chunk = list(pair_names_chunk) if args.skip_up_to != '': pair_names_chunk = pair_names_chunk[pair_names_chunk.index(args.skip_up_to ) + 1:] for pair in tqdm(pair_names_chunk): src_fn = os.path.join(args.aachen_path, 'database_and_query', 'images_upright', pair.split(' ')[0]) src_image = plt.imread(src_fn) src = imreadth(src_fn) hA, wA = src.shape[-2:] src = resize(normalize(src), args.image_size, scale_factor) hA_, wA_ = src.shape[-2:] tgt_fn = os.path.join(args.aachen_path, 'database_and_query', 'images_upright', pair.split(' ')[1]) tgt_image = plt.imread(tgt_fn) tgt = imreadth(tgt_fn) hB, wB = tgt.shape[-2:] tgt = resize(normalize(tgt), args.image_size, scale_factor) hB_, wB_ = tgt.shape[-2:] with torch.no_grad(): result, scores, features = matcher( { 'source_image': src,