def learning_lw(net): net.cuda() net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta["mean"], std=net.meta["std"]) transform = transforms.Compose([transforms.ToTensor(), normalize]) test_whiten = "retrieval-SfM-30k" print(">> {}: Learning whitening...".format(test_whiten)) # loading db db_root = os.path.join(get_data_root(), "train", test_whiten) ims_root = os.path.join(db_root, "ims") db_fn = os.path.join(db_root, "{}-whiten.pkl".format(test_whiten)) with open(db_fn, "rb") as f: db = pickle.load(f) images = [cid2filename(db["cids"][i], ims_root) for i in range(len(db["cids"]))] # extract whitening vectors print(">> {}: Extracting...".format(args.test_whiten)) wvecs = extract_vectors(net, images, 1024, transform) # learning whitening print(">> {}: Learning...".format(args.test_whiten)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db["qidxs"], db["pidxs"]) Lw = {"m": m, "P": P} return Lw
def _compute_whitening(whitening, net, image_size, transform, ms, msp): # compute whitening start = time.time() print('>> {}: Learning whitening...'.format(whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(whitening)) wvecs = extract_vectors(net, images, image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} elapsed = time.time() - start print('>> {}: elapsed time: {}'.format(whitening, htime(elapsed))) return Lw, elapsed
def get_scores_whitening(whiten_key, net, transform, ms, msp, image_size, setup_network=True, gpu=True): """Learn scores whitening for the given network.""" if whiten_key in LEARNED_WHITENING: return LEARNED_WHITENING[whiten_key] else: print("Learning scores whitening...") # extract whitening vectors wvecs = extract_vectors(net, train_images, image_size, transform, ms=ms, msp=msp, setup_network=setup_network, gpu=gpu) # learning whitening wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} # cache learned whitening LEARNED_WHITENING[whiten_key] = Lw print("Whitening learned and cached.") return Lw
def learn_lw_whitening(params, data): """Learn Lw whitening""" assert not params names, values, queries, positives = data assert len(names) == len(values) assert len(queries) == len(positives) # Handle data values = values.astype(np.float64).T name_index = {x: i for i, x in enumerate(names)} qidxs = np.array([name_index[x] for x in queries]) pidxs = np.array([name_index[x] for x in positives]) resources = stats.ResourceUsage() # Learn the whitening on a shuffled subset of the data if the matrix is not positive definite time0 = time.time() max_trials = 100 max_excluded = 0.95 trial = 0 while True: try: if trial == 0: qwhit, pwhit = qidxs, pidxs else: idxs = np.random.permutation( len(qidxs) )[:int(len(qidxs) * (1 - trial / max_trials * max_excluded))] print("Using subset of queries (%s/%s) trial %s" % (len(idxs), len(qidxs), trial), file=sys.stderr) qwhit, pwhit = qidxs[idxs], pidxs[idxs] whit_m, whit_p = whitenlearn(values, qwhit, pwhit) break except np.linalg.linalg.LinAlgError as e: if str( e ) != "Matrix is not positive definite" or trial >= max_trials - 1: raise trial += 1 timing = time.time() - time0 metadata = { "stats": { "failed_times": trial, "vectors_used": round(len(qwhit) / float(len(qidxs)), 2), "vectors_total": len(qidxs) }, "timings": { "whitening_learn": round(timing, 2) }, "resource_usage": resources.take_current_stats().get_resources() } return metadata, {'m': whit_m, 'P': whit_p}
def main(): args = parser.parse_args() # loading network from path if args.network_path is not None: print(">> Loading network:\n>>>> '{}'".format(args.network_path)) if args.network_path in PRETRAINED: # pretrained networks (downloaded automatically) state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks')) else: # fine-tuned network from path state = torch.load(args.network_path) # parsing net params from meta # architecture, pooling, mean, std required # the rest has default values, in case that is doesnt exist net_params = {} net_params['architecture'] = state['meta']['architecture'] net_params['pooling'] = state['meta']['pooling'] net_params['local_whitening'] = state['meta'].get( 'local_whitening', False) net_params['regional'] = state['meta'].get('regional', False) net_params['whitening'] = state['meta'].get('whitening', False) net_params['mean'] = state['meta']['mean'] net_params['std'] = state['meta']['std'] net_params['pretrained'] = False # load network net = init_network(net_params) net.load_state_dict(state['state_dict']) # if whitening is precomputed if 'Lw' in state['meta']: net.meta['Lw'] = state['meta']['Lw'] print(">>>> loaded network: ") if "epoch" in state: print("Model after {} epochs".format(state["epoch"])) print(net.meta_repr()) # loading offtheshelf network elif args.network_offtheshelf is not None: # parse off-the-shelf parameters offtheshelf = args.network_offtheshelf.split('-') net_params = {} net_params['architecture'] = offtheshelf[0] net_params['pooling'] = offtheshelf[1] net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:] net_params['regional'] = 'reg' in offtheshelf[2:] net_params['whitening'] = 'whiten' in offtheshelf[2:] net_params['pretrained'] = True # load off-the-shelf network print(">> Loading off-the-shelf network:\n>>>> '{}'".format( args.network_offtheshelf)) net = init_network(net_params) print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters: test both single scale and multiscale ms_singlescale = [1] msp_singlescale = 1 ms_multiscale = list(eval(args.multiscale)) msp_multiscale = 1 if len(ms_multiscale ) > 1 and net.meta['pooling'] == 'gem' and not net.meta[ 'regional'] and not net.meta['whitening']: msp_multiscale = net.pool.p.item() print(">> Set-up multiscale:") print(">>>> ms: {}".format(ms_multiscale)) print(">>>> msp: {}".format(msp_multiscale)) # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta['mean'], std=net.meta['std']) transform = transforms.Compose([transforms.ToTensor(), normalize]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) Lw = net.meta['Lw'][args.whitening] else: # if we evaluate networks from path we should save/load whitening # not to compute it every time if args.network_path is not None: whiten_fn = args.network_path + '_{}_whiten'.format( args.whitening) whiten_fn += '.pth' else: whiten_fn = None if whiten_fn is not None and os.path.isfile(whiten_fn): print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) Lw = torch.load(whiten_fn) else: Lw = {} for whiten_type, ms, msp in zip( ["ss", "ms"], [ms_singlescale, ms_multiscale], [msp_singlescale, msp_multiscale]): print('>> {0}: Learning whitening {1}...'.format( args.whitening, whiten_type)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join( db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw[whiten_type] = {'m': m, 'P': P} print('>> {}: elapsed time: {}'.format( args.whitening, htime(time.time() - start))) # saving whitening if whiten_fn exists if whiten_fn is not None: print('>> {}: Saving to {}...'.format( args.whitening, whiten_fn)) torch.save(Lw, whiten_fn) else: Lw = None # evaluate on test datasets datasets = args.datasets.split(',') for dataset in datasets: start = time.time() for whiten_type, ms, msp in zip(["ss", "ms"], [ms_singlescale, ms_multiscale], [msp_singlescale, msp_multiscale]): print('>> Extracting feature on {0}, whitening {1}'.format( dataset, whiten_type)) # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])] bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp) print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset, ranks, cfg['gnd']) if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw[whiten_type]['m'], Lw[whiten_type]['P']) qvecs_lw = whitenapply(qvecs, Lw[whiten_type]['m'], Lw[whiten_type]['P']) # search, rank, and print scores = np.dot(vecs_lw.T, qvecs_lw) ranks = np.argsort(-scores, axis=0) compute_map_and_print( dataset + ' + whiten {}'.format(whiten_type), ranks, cfg['gnd']) print('>> {}: elapsed time: {}'.format(dataset, htime(time.time() - start)))
def test(datasets, net, wandb_enabled=False, epoch=-1): global global_step print('>> Evaluating network on test datasets...') # for testing we use image size of max 1024 image_size = 1024 # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize( mean=net.meta['mean'], std=net.meta['std'] ) transform = transforms.Compose([ transforms.ToTensor(), normalize ]) # compute whitening if args.test_whiten: start = time.time() print('>> {}: Learning whitening...'.format(args.test_whiten)) # loading db db_root = os.path.join(get_data_root(), 'train', args.test_whiten) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.test_whiten)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))] # extract whitening vectors print('>> {}: Extracting...'.format(args.test_whiten)) wvecs = extract_vectors(net, images, image_size, transform) # implemented with torch.no_grad # learning whitening print('>> {}: Learning...'.format(args.test_whiten)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} print('>> {}: elapsed time: {}'.format(args.test_whiten, htime(time.time()-start))) else: Lw = None # evaluate on test datasets datasets = args.test_datasets.split(',') for dataset in datasets: start = time.time() print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [cfg['im_fname'](cfg,i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])] bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, image_size, transform) # implemented with torch.no_grad print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, image_size, transform, bbxs) # implemented with torch.no_grad print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset, ranks, cfg['gnd'], wandb_enabled=wandb_enabled, epoch=epoch, global_step=global_step) if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw['m'], Lw['P']) qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P']) # search, rank, and print scores = np.dot(vecs_lw.T, qvecs_lw) ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd'], wandb_enabled=wandb_enabled, epoch=epoch, global_step=global_step) print('>> {}: elapsed time: {}'.format(dataset, htime(time.time()-start)))
def main(): args = parser.parse_args() # check if there are unknown datasets for dataset in args.datasets.split(','): if dataset not in datasets_names: raise ValueError( 'Unsupported or unknown dataset: {}!'.format(dataset)) # check if test dataset are downloaded # and download if they are not #download_train(get_data_root()) download_test(get_data_root()) # setting up the visible GPU os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id # loading network from path if args.network_path is not None: print(">> Loading network:\n>>>> '{}'".format(args.network_path)) if args.network_path in PRETRAINED: # pretrained networks (downloaded automatically) state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks')) else: # fine-tuned network from path state = torch.load(args.network_path) # parsing net params from meta # architecture, pooling, mean, std required # the rest has default values, in case that is doesnt exist net_params = {} net_params['architecture'] = state['meta']['architecture'] net_params['pooling'] = state['meta']['pooling'] net_params['local_whitening'] = state['meta'].get( 'local_whitening', False) net_params['regional'] = state['meta'].get('regional', False) net_params['whitening'] = state['meta'].get('whitening', False) net_params['mean'] = state['meta']['mean'] net_params['std'] = state['meta']['std'] net_params['pretrained'] = False # load network net = init_network(net_params) net.load_state_dict(state['state_dict']) # if whitening is precomputed if 'Lw' in state['meta']: net.meta['Lw'] = state['meta']['Lw'] print(">>>> loaded network: ") print(net.meta_repr()) # loading offtheshelf network elif args.network_offtheshelf is not None: # parse off-the-shelf parameters offtheshelf = args.network_offtheshelf.split('-') net_params = {} net_params['architecture'] = offtheshelf[0] net_params['pooling'] = offtheshelf[1] net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:] net_params['regional'] = 'reg' in offtheshelf[2:] net_params['whitening'] = 'whiten' in offtheshelf[2:] net_params['pretrained'] = True # load off-the-shelf network print(">> Loading off-the-shelf network:\n>>>> '{}'".format( args.network_offtheshelf)) net = init_network(net_params) print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters ms = list(eval(args.multiscale)) if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[ 'regional'] and not net.meta['whitening']: msp = net.pool.p.item() print(">> Set-up multiscale:") print(">>>> ms: {}".format(ms)) print(">>>> msp: {}".format(msp)) else: msp = 1 # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta['mean'], std=net.meta['std']) transform = transforms.Compose([transforms.ToTensor(), normalize]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) if len(ms) > 1: Lw = net.meta['Lw'][args.whitening]['ms'] else: Lw = net.meta['Lw'][args.whitening]['ss'] else: # if we evaluate networks from path we should save/load whitening # not to compute it every time if args.network_path is not None: whiten_fn = args.network_path + '_{}_whiten'.format( args.whitening) if len(ms) > 1: whiten_fn += '_ms' whiten_fn += '.pth' else: whiten_fn = None if whiten_fn is not None and os.path.isfile(whiten_fn): print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) Lw = torch.load(whiten_fn) else: print('>> {}: Learning whitening...'.format(args.whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} # saving whitening if whiten_fn exists if whiten_fn is not None: print('>> {}: Saving to {}...'.format( args.whitening, whiten_fn)) torch.save(Lw, whiten_fn) print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time() - start))) else: Lw = None # evaluate on test datasets datasets = args.datasets.split(',') for dataset in datasets: start = time.time() """ print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [cfg['im_fname'](cfg,i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])] try: bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] except: bbxs = None # for holidaysmanrot and copydays # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp) print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() print (vecs.shape) print (qvecs.shape) # search, rank, and print scores = np.dot(vecs.T, qvecs) print (scores.shape) # to save scores (single query) # oxford #f = 'oxf_single.npy' # paris #f = 'par_single.npy' # roxford #f = 'roxf_single.npy' # rparis f = 'rpar_single.npy' ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset, ranks, cfg['gnd']) if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw['m'], Lw['P']) qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P']) # search, rank, and print scores = np.dot(vecs_lw.T, qvecs_lw) # save np.save(f, scores) ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd']) """ ############################################################ # Test # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])] print(qimages) # to load scores # oxford #f = 'oxf_single.npy' #f = 'oxf_mq_avg.npy' #f = 'oxf_mq_max.npy' #f = 'oxf_sc_imf.npy' # paris #f = 'par_single.npy' #f = 'par_mq_avg.npy' #f = 'par_mq_max.npy' f = 'par_sc_imf.npy' # roxford #f = 'roxf_single.npy' #f = 'roxf_mq_avg.npy' #f = 'roxf_mq_max.npy' #f = 'roxf_sc_imf.npy' # rparis #f = 'rpar_single.npy' #f = 'rpar_mq_avg.npy' #f = 'rpar_mq_max.npy' #f = 'rpar_sc_imf.npy' # load scores = np.load(f) ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd']) print('>> {}: elapsed time: {}'.format(dataset, htime(time.time() - start)))
def main(): args = parser.parse_args() # check if there are unknown datasets for dataset in args.datasets.split(','): if dataset not in datasets_names: raise ValueError( 'Unsupported or unknown dataset: {}!'.format(dataset)) # check if test dataset are downloaded # and download if they are not download_train(get_data_root()) download_test(get_data_root()) # setting up the visible GPU os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id # loading network from path if args.network_path is not None: print(">> Loading network:\n>>>> '{}'".format(args.network_path)) if args.network_path in PRETRAINED: # pretrained networks (downloaded automatically) state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks')) else: # fine-tuned network from path state = torch.load(args.network_path) # parsing net params from meta # architecture, pooling, mean, std required # the rest has default values, in case that is doesnt exist net_params = {} net_params['architecture'] = state['meta']['architecture'] net_params['pooling'] = state['meta']['pooling'] net_params['local_whitening'] = state['meta'].get( 'local_whitening', False) net_params['regional'] = state['meta'].get('regional', False) net_params['whitening'] = state['meta'].get('whitening', False) net_params['mean'] = state['meta']['mean'] net_params['std'] = state['meta']['std'] net_params['pretrained'] = False # load network net = init_network(net_params) net.load_state_dict(state['state_dict']) # if whitening is precomputed if 'Lw' in state['meta']: net.meta['Lw'] = state['meta']['Lw'] print(">>>> loaded network: ") print(net.meta_repr()) # loading offtheshelf network elif args.network_offtheshelf is not None: # parse off-the-shelf parameters offtheshelf = args.network_offtheshelf.split('-') net_params = {} net_params['architecture'] = offtheshelf[0] net_params['pooling'] = offtheshelf[1] net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:] net_params['regional'] = 'reg' in offtheshelf[2:] net_params['whitening'] = 'whiten' in offtheshelf[2:] net_params['pretrained'] = True # load off-the-shelf network print(">> Loading off-the-shelf network:\n>>>> '{}'".format( args.network_offtheshelf)) net = init_network(net_params) print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters ms = list(eval(args.multiscale)) if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[ 'regional'] and not net.meta['whitening']: msp = net.pool.p.item() print(">> Set-up multiscale:") print(">>>> ms: {}".format(ms)) print(">>>> msp: {}".format(msp)) else: msp = 1 # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta['mean'], std=net.meta['std']) transform = transforms.Compose([transforms.ToTensor(), normalize]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) if len(ms) > 1: Lw = net.meta['Lw'][args.whitening]['ms'] else: Lw = net.meta['Lw'][args.whitening]['ss'] else: # if we evaluate networks from path we should save/load whitening # not to compute it every time if args.network_path is not None: whiten_fn = args.network_path + '_{}_whiten'.format( args.whitening) if len(ms) > 1: whiten_fn += '_ms' whiten_fn += '.pth' else: whiten_fn = None if whiten_fn is not None and os.path.isfile(whiten_fn): print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) Lw = torch.load(whiten_fn) else: print('>> {}: Learning whitening...'.format(args.whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} # saving whitening if whiten_fn exists if whiten_fn is not None: print('>> {}: Saving to {}...'.format( args.whitening, whiten_fn)) torch.save(Lw, whiten_fn) print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time() - start))) else: Lw = None # evaluate on test datasets datasets = args.datasets.split(',') for dataset in datasets: start = time.time() print('>> {}: Extracting...'.format(dataset)) # extract database and query vectors print('>> {}: database images...'.format(dataset)) images = get_imlist("E:\\PycharmProjects\\image-retrieval\\holiday2\\") names = [] for i, img_path in enumerate(images): img_name = os.path.split(img_path)[1] print(img_name) names.append(img_name) # prepare config structure for the test dataset # cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) # images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] # try: # bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] # except: # bbxs = None # for holidaysmanrot and copydays # names = [] # for i, img_path in enumerate(images): # img_name = os.path.split(img_path)[1] # print(img_name) # names.append(img_name) # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # convert to numpy vecs = vecs.numpy() vecs = vecs.T print("--------------------------------------------------") print(" writing feature extraction results ...") print("--------------------------------------------------") output = "gem_res_holiday_3.h5" h5f = h5py.File(output, 'w') h5f.create_dataset('dataset_1', data=vecs) h5f.create_dataset('dataset_2', data=np.string_(names)) h5f.close() print('>> {}: elapsed time: {}'.format(dataset, htime(time.time() - start)))
for i in range(split_num): with open( os.path.join(get_data_root(), 'whitening_vecs{}_of_{}.pkl'.format(i + 1, split_num)), 'rb') as f: wvecs_temp = pickle.load(f) if i == 0: wvecs = wvecs_temp else: wvecs = np.hstack((wvecs, wvecs_temp[:, :])) del wvecs_temp gc.collect() print('\r>>>> whitening_vecs{}_of_{}.pkl load done...'.format( i + 1, split_num), end='') print('') #%% # learning whitening print('>> {}: Learning...'.format(whitening)) m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} # saving whitening if whiten_fn exists whiten_fn = os.path.join(get_data_root(), 'whiten', 'R101_M2_120k_IS1024_MS1_WL.pth') if whiten_fn is not None: print('>> {}: Saving to {}...'.format(whitening, whiten_fn)) torch.save(Lw, whiten_fn)
def main(): args = parser.parse_args() # check if test dataset are downloaded # and download if they are not #download_train(get_data_root()) #download_test(get_data_root()) # setting up the visible GPU os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id # loading network from path if args.network_path is not None: print(">> Loading network:\n>>>> '{}'".format(args.network_path)) if args.network_path in PRETRAINED: # pretrained networks (downloaded automatically) state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks')) else: state = torch.load(args.network_path) net = init_network(model=state['meta']['architecture'], pooling=state['meta']['pooling'], whitening=state['meta']['whitening'], mean=state['meta']['mean'], std=state['meta']['std'], pretrained=False) net.load_state_dict(state['state_dict']) # if whitening is precomputed if 'Lw' in state['meta']: net.meta['Lw'] = state['meta']['Lw'] print(">>>> loaded network: ") print(net.meta_repr()) # loading offtheshelf network elif args.network_offtheshelf is not None: offtheshelf = args.network_offtheshelf.split('-') if len(offtheshelf) == 3: if offtheshelf[2] == 'whiten': offtheshelf_whiten = True else: raise (RuntimeError( "Incorrect format of the off-the-shelf network. Examples: resnet101-gem | resnet101-gem-whiten" )) else: offtheshelf_whiten = False print(">> Loading off-the-shelf network:\n>>>> '{}'".format( args.network_offtheshelf)) net = init_network(model=offtheshelf[0], pooling=offtheshelf[1], whitening=offtheshelf_whiten) print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters ms = [1] msp = 1 if args.multiscale: ms = [1, 1. / math.sqrt(2), 1. / 2] if net.meta['pooling'] == 'gem' and net.whiten is None: msp = net.pool.p.data.tolist()[0] # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta['mean'], std=net.meta['std']) transform = transforms.Compose([transforms.ToTensor(), normalize]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) if args.multiscale: Lw = net.meta['Lw'][args.whitening]['ms'] else: Lw = net.meta['Lw'][args.whitening]['ss'] else: print('>> {}: Learning whitening...'.format(args.whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time() - start))) else: Lw = None # evaluate on test datasets datasets = args.datasets.split(',') for dataset in datasets: start = time.time() print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])] bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp) print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset, ranks, cfg['gnd']) if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw['m'], Lw['P']) qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P']) # search, rank, and print scores = np.dot(vecs_lw.T, qvecs_lw) ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd']) print('>> {}: elapsed time: {}'.format(dataset, htime(time.time() - start)))
def main(): args = parser.parse_args() # check if test dataset are downloaded # and download if they are not download_train(get_data_root()) download_test(get_data_root()) # setting up the visible GPU os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id # loading network from path if args.network_path is not None: print(">> Loading network:\n>>>> '{}'".format(args.network_path)) state = torch.load(args.network_path) net = init_network(model=state['meta']['architecture'], pooling=state['meta']['pooling'], whitening=state['meta']['whitening'], mean=state['meta']['mean'], std=state['meta']['std'], pretrained=False) net.load_state_dict(state['state_dict']) print(">>>> loaded network: ") print(net.meta_repr()) # loading offtheshelf network elif args.network_offtheshelf is not None: offtheshelf = args.network_offtheshelf.split('-') if len(offtheshelf) == 3: if offtheshelf[2] == 'whiten': offtheshelf_whiten = True else: raise (RuntimeError( "Incorrect format of the off-the-shelf network. Examples: resnet101-gem | resnet101-gem-whiten" )) else: offtheshelf_whiten = False print(">> Loading off-the-shelf network:\n>>>> '{}'".format( args.network_offtheshelf)) net = init_network(model=offtheshelf[0], pooling=offtheshelf[1], whitening=offtheshelf_whiten) print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters ms = [1] msp = 1 if args.multiscale: ms = [1, 1. / math.sqrt(2), 1. / 2] if net.meta['pooling'] == 'gem' and net.whiten is None: msp = net.pool.p.data.tolist()[0] # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta['mean'], std=net.meta['std']) transform = transforms.Compose([transforms.ToTensor(), normalize]) # compute whitening if args.whitening is not None: start = time.time() print('>> {}: Learning whitening...'.format(args.whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time() - start))) else: Lw = None datasets = args.datasets.split(',') for dataset in datasets: start = time.time() print('>> {}: Extracting...'.format(dataset)) if dataset == 'reco': images, qimages = landmark_recognition_dataset() bbxs = [None for x in qimages] elif dataset == 'retr': images, _ = landmark_retrieval_dataset() qimages = [] bbxs = [None for x in qimages] else: # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])] bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] with open('%s_fnames.pkl' % dataset, 'wb') as f: pickle.dump([images, qimages], f) # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) vecs = vecs.numpy() print('>> saving') np.save('{}_vecs.npy'.format(dataset), vecs) if len(qimages) > 0: print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp) qvecs = qvecs.numpy() np.save('{}_qvecs.npy'.format(dataset), qvecs) if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw['m'], Lw['P']) qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P']) # TODO print('>> {}: elapsed time: {}'.format(dataset, htime(time.time() - start)))
def main(): args = parser.parse_args() # check if test dataset are downloaded # and download if they are not download_train(get_data_root()) download_test(get_data_root()) # setting up the visible GPU os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id # loading network from path if args.network_path is not None: net = load_network(args.network_path) # loading offtheshelf network elif args.network_offtheshelf is not None: net = load_offtheshelf(args.network_offtheshelf) # setting up the multi-scale parameters ms = [1] msp = 1 if args.multiscale: ms = [1, 1./math.sqrt(2), 1./2] if net.meta['pooling'] == 'gem' and net.whiten is None: msp = net.pool.p.data.tolist()[0] # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize( mean=net.meta['mean'], std=net.meta['std'] ) transform = transforms.Compose([ transforms.ToTensor(), normalize ]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening)) if args.multiscale: Lw = net.meta['Lw'][args.whitening]['ms'] else: Lw = net.meta['Lw'][args.whitening]['ss'] else: print('>> {}: Learning whitening...'.format(args.whitening)) if args.whitening == "scores": # special logic for scores database from score_retrieval.exports import ( db, train_images as images, ) else: # loading db db_root = os.path.join(get_data_root(), 'train', args.test_whiten) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.test_whiten)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time()-start))) else: Lw = None # evaluate on test datasets datasets = args.datasets.split(',') for dataset in datasets: start = time.time() print('>> {}: Extracting...'.format(dataset)) if dataset == "scores": # Special added logic to handle loading our score dataset from score_retrieval.exports import ( images, qimages, gnd, ) print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, args.image_size, transform, ms=ms, msp=msp) else: # extract ground truth cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) gnd = cfg['gnd'] # prepare config structure for the test dataset images = [cfg['im_fname'](cfg,i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])] bbxs = [tuple(gnd[i]['bbx']) for i in range(cfg['nq'])] # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp) # validation print(">> {}: gnd stats: {}, {}, {}".format( dataset, len(gnd), [len(x["ok"]) for x in gnd[10:]], [len(x["junk"]) for x in gnd[10:]], )) print(">> {}: image stats: {}, {}".format(dataset, len(images), len(qimages))) assert len(gnd) == len(qimages), (len(gnd), len(qimages)) print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() print(">> {}: qvecs.shape: {}".format(dataset, qvecs.shape)) # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) print(">> {}: ranks (shape {}) head: {}".format(dataset, ranks.shape, ranks[10:,10:])) print(">> {}: gnd head: {}".format(dataset, gnd[5:])) # Compute and print metrics compute_acc(ranks, gnd, dataset) compute_mrr(ranks, gnd, dataset) compute_map_and_print(dataset, ranks, gnd) if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw['m'], Lw['P']) qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P']) # search, rank, and print scores = np.dot(vecs_lw.T, qvecs_lw) ranks = np.argsort(-scores, axis=0) compute_acc(ranks, gnd, dataset + " + whiten") compute_mrr(ranks, gnd, dataset + " + whiten") compute_map_and_print(dataset + " + whiten", ranks, gnd) print('>> {}: elapsed time: {}'.format(dataset, htime(time.time()-start)))
def main(): args = parser.parse_args() # check if there are unknown datasets for dataset in args.datasets.split(','): if dataset not in datasets_names: raise ValueError('Unsupported or unknown dataset: {}!'.format(dataset)) # check if test dataset are downloaded # and download if they are not # download_train(get_data_root()) # download_test(get_data_root()) # setting up the visible GPU os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id # loading network from path if args.network_path is not None: print(">> Loading network:\n>>>> '{}'".format(args.network_path)) if args.network_path in PRETRAINED: # pretrained networks (downloaded automatically) state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks')) else: # fine-tuned network from path state = torch.load(args.network_path) # parsing net params from meta # architecture, pooling, mean, std required # the rest has default values, in case that is doesnt exist net_params = {} net_params['architecture'] = state['meta']['architecture'] net_params['pooling'] = state['meta']['pooling'] net_params['local_whitening'] = state['meta'].get('local_whitening', False) net_params['regional'] = state['meta'].get('regional', False) net_params['whitening'] = state['meta'].get('whitening', False) net_params['mean'] = state['meta']['mean'] net_params['std'] = state['meta']['std'] net_params['pretrained'] = False net_params['multi_layer_cat'] = state['meta']['multi_layer_cat'] # load network net = init_network(net_params) net.load_state_dict(state['state_dict']) # if whitening is precomputed if 'Lw' in state['meta']: net.meta['Lw'] = state['meta']['Lw'] print(">>>> loaded network: ") print(net.meta_repr()) # loading offtheshelf network elif args.network_offtheshelf is not None: # parse off-the-shelf parameters offtheshelf = args.network_offtheshelf.split('-') net_params = {} net_params['architecture'] = offtheshelf[0] net_params['pooling'] = offtheshelf[1] net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:] net_params['regional'] = 'reg' in offtheshelf[2:] net_params['whitening'] = 'whiten' in offtheshelf[2:] net_params['pretrained'] = True # load off-the-shelf network print(">> Loading off-the-shelf network:\n>>>> '{}'".format(args.network_offtheshelf)) net = init_network(net_params) print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters print(">> image size: {}".format(args.image_size)) ms = list(eval(args.multiscale)) if len(ms)>1 and net.meta['pooling'] == 'gem' and not net.meta['regional'] and not net.meta['whitening']: msp = net.pool.p.item() print(">> Set-up multiscale:") print(">>>> ms: {}".format(ms)) print(">>>> msp: {}".format(msp)) else: msp = 1 print(">> Set-up multiscale:") print(">>>> ms: {}".format(ms)) print(">>>> msp: {}".format(msp)) # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize( mean=net.meta['mean'], std=net.meta['std'] ) transform = transforms.Compose([ transforms.ToTensor(), normalize ]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening)) if len(ms)>1: Lw = net.meta['Lw'][args.whitening]['ms'] else: Lw = net.meta['Lw'][args.whitening]['ss'] else: # if we evaluate networks from path we should save/load whitening # not to compute it every time if args.network_path is not None: whiten_fn = args.network_path + '_{}_whiten'.format(args.whitening) if len(ms) > 1: whiten_fn += '_ms' whiten_fn += '.pth' else: whiten_fn = None if whiten_fn is not None and os.path.isfile(whiten_fn): print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening)) Lw = torch.load(whiten_fn) else: print('>> {}: Learning whitening...'.format(args.whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} # saving whitening if whiten_fn exists if whiten_fn is not None: print('>> {}: Saving to {}...'.format(args.whitening, whiten_fn)) torch.save(Lw, whiten_fn) print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time()-start))) else: Lw = None # evaluate on test datasets datasets = args.datasets.split(',') for dataset in datasets: start = time.time() print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [cfg['im_fname'](cfg,i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])] # bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] print('>> not use bbxs...') bbxs = None # key_url_list = ParseData(os.path.join(get_data_root(), 'index.csv')) # index_image_path = os.path.join(get_data_root(), 'resize_index_image') # images = [os.path.join(index_image_path, key_url_list[i][0]) for i in range(len(key_url_list))] # key_url_list = ParseData(os.path.join(get_data_root(), 'test.csv')) # test_image_path = os.path.join(get_data_root(), 'resize_test_image') # qimages = [os.path.join(test_image_path, key_url_list[i][0]) for i in range(len(key_url_list))] # # bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] # csvfile = open(os.path.join(get_data_root(), 'index_clear.csv'), 'r') # csvreader = csv.reader(csvfile) # images = [line[:1][0] for line in csvreader] # # csvfile = open(os.path.join(get_data_root(), 'test_clear.csv'), 'r') # csvreader = csv.reader(csvfile) # qimages = [line[:1][0] for line in csvreader] # bbxs = None # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # vecs = torch.randn(2048, 5063) # vecs = torch.randn(2048, 4993) # hxq modified # bbxs = None # print('>> set no bbxs...') print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp) print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) # hxq modified, test add features map for retrieval # vecs = [vecs[i].numpy() for i in range(len(vecs))] # qvecs_temp = np.zeros((qvecs[0].shape[0], len(qvecs))) # for i in range(len(qvecs)): # qvecs_temp[:, i] = qvecs[i][:, 0].numpy() # qvecs = qvecs_temp # # scores = np.zeros((len(vecs), qvecs.shape[-1])) # for i in range(len(vecs)): # scores[i, :] = np.amax(np.dot(vecs[i].T, qvecs), 0) ranks = np.argsort(-scores, axis=0) mismatched_info = compute_map_and_print(dataset, ranks, cfg['gnd'], kappas=[1, 5, 10, 100]) # hxq added show_false_img = False if show_false_img == True: print('>> Save mismatched image tuple...') for info in mismatched_info: mismatched_img_show_save(info, qimages, images, args, bbxs=bbxs) if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw['m'], Lw['P']) qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P']) # search, rank, and print scores = np.dot(vecs_lw.T, qvecs_lw) ranks = np.argsort(-scores, axis=0) mismatched_info = compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd']) # hxq added # show_false_img = False if show_false_img == True: print('>> Save mismatched image tuple...') for info in mismatched_info: mismatched_img_show_save(info, qimages, images, args, bbxs=bbxs) print('>> {}: elapsed time: {}'.format(dataset, htime(time.time()-start)))
def main(): #def process(network_path, datasets='oxford5k,paris6k', whitening=None, image_size=1024, multiscale = '[1]', query=None): args = parser.parse_args() #args.query = None # check if there are unknown datasets for dataset in args.datasets.split(','): if dataset not in datasets_names: raise ValueError( 'Unsupported or unknown dataset: {}!'.format(dataset)) # check if test dataset are downloaded # and download if they are not #download_train(get_data_root()) #download_test(get_data_root()) # setting up the visible GPU #os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id # loading network from path if args.network_path is not None: print(">> Loading network:\n>>>> '{}'".format(args.network_path)) if args.network_path in PRETRAINED: # pretrained networks (downloaded automatically) state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks')) else: # fine-tuned network from path state = torch.load(args.network_path) # parsing net params from meta # architecture, pooling, mean, std required # the rest has default values, in case that is doesnt exist net_params = {} net_params['architecture'] = state['meta']['architecture'] net_params['pooling'] = state['meta']['pooling'] net_params['local_whitening'] = state['meta'].get( 'local_whitening', False) net_params['regional'] = state['meta'].get('regional', False) net_params['whitening'] = state['meta'].get('whitening', False) net_params['mean'] = state['meta']['mean'] net_params['std'] = state['meta']['std'] net_params['pretrained'] = False # load network net = init_network(net_params) net.load_state_dict(state['state_dict']) # if whitening is precomputed if 'Lw' in state['meta']: net.meta['Lw'] = state['meta']['Lw'] print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters ms = list(eval(args.multiscale)) if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[ 'regional'] and not net.meta['whitening']: msp = net.pool.p.item() print(">> Set-up multiscale:") print(">>>> ms: {}".format(ms)) print(">>>> msp: {}".format(msp)) else: msp = 1 # moving network to gpu and eval mode #net.cuda() #net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta['mean'], std=net.meta['std']) transform = transforms.Compose([transforms.ToTensor(), normalize]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) if len(ms) > 1: Lw = net.meta['Lw'][args.whitening]['ms'] else: Lw = net.meta['Lw'][args.whitening]['ss'] else: # if we evaluate networks from path we should save/load whitening # not to compute it every time if args.network_path is not None: whiten_fn = args.network_path + '_{}_whiten'.format( args.whitening) if len(ms) > 1: whiten_fn += '_ms' whiten_fn += '.pth' else: whiten_fn = None print(whiten_fn) return if whiten_fn is not None and os.path.isfile(whiten_fn): print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) Lw = torch.load(whiten_fn) else: print('>> {}: Learning whitening...'.format(args.whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} # saving whitening if whiten_fn exists if whiten_fn is not None: print('>> {}: Saving to {}...'.format( args.whitening, whiten_fn)) torch.save(Lw, whiten_fn) print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time() - start))) else: Lw = None # evaluate on test datasets datasets = args.datasets.split(',') # query type for dataset in datasets: start = time.time() print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) #for i in cfg: print(i) #print(cfg['gnd'][0]['bbx']) #return # extract database and query vectors print('>> {}: database images...'.format(dataset)) feas_dir = os.path.join(cfg['dir_data'], 'features') if not os.path.isdir(feas_dir): os.mkdir(feas_dir) feas_sv = os.path.join( feas_dir, dataset + '_' + args.network_path + '_features.pkl') if not os.path.isfile(feas_sv): images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) with open(feas_sv, 'wb') as f: pickle.dump(vecs, f) else: with open(feas_sv, 'rb') as f: vecs = pickle.load(f) print('>> {}: query images...'.format(dataset)) if args.query is not None: qimages = [args.query] qvecs = extract_vectors(net, qimages, args.image_size, transform, ms=ms, msp=msp) else: qfeas_dir = feas_dir qfeas_sv = os.path.join( qfeas_dir, dataset + '_' + args.network_path + '_qfeatures.pkl') if not os.path.isfile(qfeas_sv): qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])] try: bbxs = [ tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq']) ] except: bbxs = None qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=bbxs, ms=ms, msp=msp) with open(qfeas_sv, 'wb') as f: pickle.dump(qvecs, f) else: with open(qfeas_sv, 'rb') as f: qvecs = pickle.load(f) print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() #qvecs = qvecs[:, 0].reshape(-1, 1) #args.query = True # search, rank, and print if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw['m'], Lw['P']) qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P']) # search, rank, and print scores = np.dot(vecs_lw.T, qvecs_lw) ranksw = np.argsort(-scores, axis=0) if args.query is None: #compute_map_and_print(dataset + ' + whiten', ranksw, cfg['gnd']) compute_map_and_print1(dataset + ' + whiten', ranksw, cfg['gnd']) scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) # compute_map_and_print(dataset, ranks, cfg['gnd']) compute_map_and_print1(dataset, ranks, cfg['gnd']) else: a = [] for i in ranksw: a.append( os.path.join(cfg['dir_images'], cfg['imlist'][i[0]]) + cfg['ext']) print(a[:10]) result = cfg['dir_data'] + '_result' with open(result + '.pkl', 'wb') as f: pickle.dump(a[:10], f) print('>> {}: elapsed time: {}'.format(dataset, htime(time.time() - start)))
def main(): args = parser.parse_args() # check if there are unknown datasets for dataset in args.datasets.split(','): if dataset not in datasets_names: raise ValueError( 'Unsupported or unknown dataset: {}!'.format(dataset)) # check if test dataset are downloaded # and download if they are not #download_train(get_data_root()) #download_test(get_data_root()) # setting up the visible GPU os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id # loading network from path if args.network_path is not None: print(">> Loading network:\n>>>> '{}'".format(args.network_path)) if args.network_path in PRETRAINED: # pretrained networks (downloaded automatically) state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks')) else: # fine-tuned network from path state = torch.load(args.network_path) # parsing net params from meta # architecture, pooling, mean, std required # the rest has default values, in case that is doesnt exist net_params = {} net_params['architecture'] = state['meta']['architecture'] net_params['pooling'] = state['meta']['pooling'] net_params['local_whitening'] = state['meta'].get( 'local_whitening', False) net_params['regional'] = state['meta'].get('regional', False) net_params['whitening'] = state['meta'].get('whitening', False) net_params['mean'] = state['meta']['mean'] net_params['std'] = state['meta']['std'] net_params['pretrained'] = False # load network net = init_network(net_params) net.load_state_dict(state['state_dict']) # if whitening is precomputed if 'Lw' in state['meta']: net.meta['Lw'] = state['meta']['Lw'] print(">>>> loaded network: ") print(net.meta_repr()) # loading offtheshelf network elif args.network_offtheshelf is not None: # parse off-the-shelf parameters offtheshelf = args.network_offtheshelf.split('-') net_params = {} net_params['architecture'] = offtheshelf[0] net_params['pooling'] = offtheshelf[1] net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:] net_params['regional'] = 'reg' in offtheshelf[2:] net_params['whitening'] = 'whiten' in offtheshelf[2:] net_params['pretrained'] = True # load off-the-shelf network print(">> Loading off-the-shelf network:\n>>>> '{}'".format( args.network_offtheshelf)) net = init_network(net_params) print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters ms = list(eval(args.multiscale)) if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[ 'regional'] and not net.meta['whitening']: msp = net.pool.p.item() print(">> Set-up multiscale:") print(">>>> ms: {}".format(ms)) print(">>>> msp: {}".format(msp)) else: msp = 1 # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta['mean'], std=net.meta['std']) transform = transforms.Compose([transforms.ToTensor(), normalize]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) if len(ms) > 1: Lw = net.meta['Lw'][args.whitening]['ms'] else: Lw = net.meta['Lw'][args.whitening]['ss'] else: # if we evaluate networks from path we should save/load whitening # not to compute it every time if args.network_path is not None: whiten_fn = args.network_path + '_{}_whiten'.format( args.whitening) if len(ms) > 1: whiten_fn += '_ms' whiten_fn += '.pth' else: whiten_fn = None if whiten_fn is not None and os.path.isfile(whiten_fn): print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) Lw = torch.load(whiten_fn) else: print('>> {}: Learning whitening...'.format(args.whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} # saving whitening if whiten_fn exists if whiten_fn is not None: print('>> {}: Saving to {}...'.format( args.whitening, whiten_fn)) torch.save(Lw, whiten_fn) print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time() - start))) else: Lw = None # evaluate on test datasets datasets = args.datasets.split(',') for dataset in datasets: start = time.time() print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset #cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [] gallery_file = open( '/home/zzd/University1652-Baseline/gallery_name.txt') for line in gallery_file: images.append('/home/zzd/University1652-Baseline/' + line.replace('\n', '')[2:]) #qimages = [cfg['qim_fname'](cfg,i) for i in range(cfg['nq'])] qimages = [] query_file = open('/home/zzd/University1652-Baseline/query_name.txt') for line in query_file: qimages.append('/home/zzd/University1652-Baseline/' + line.replace('\n', '')[2:]) gallery_label = get_id(images) query_label = get_id(qimages) # extract database and query vectors print('>> {}: database images...'.format(dataset)) gallery_feature = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) gallery_feature = torch.transpose(gallery_feature, 0, 1) print('>> {}: query images...'.format(dataset)) query_feature = extract_vectors(net, qimages, args.image_size, transform, ms=ms, msp=msp) query_feature = torch.transpose(query_feature, 0, 1) result = { 'gallery_f': gallery_feature.numpy(), 'gallery_label': gallery_label, 'query_f': query_feature.numpy(), 'query_label': query_label } scipy.io.savemat('pytorch_result.mat', result) os.system('python evaluate_gpu.py') print('>> {}: Evaluating...'.format(dataset))
def test(datasets, net): print('>> Evaluating network on test datasets...') # for testing we use image size of max 1024 image_size = 1024 # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta['mean'], std=net.meta['std']) transform = transforms.Compose([transforms.ToTensor(), normalize]) # compute whitening if args.test_whiten: start = time.time() print('>> {}: Learning whitening...'.format(args.test_whiten)) # loading db db_root = os.path.join(get_data_root(), 'train', args.test_whiten) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.test_whiten)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(args.test_whiten)) wvecs = extract_vectors(net, images, image_size, transform, print_freq=10, batchsize=20) # implemented with torch.no_grad # learning whitening print('>> {}: Learning...'.format(args.test_whiten)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} print('>> {}: elapsed time: {}'.format(args.test_whiten, htime(time.time() - start))) else: Lw = None # evaluate on test datasets datasets = args.test_datasets.split(',') for dataset in datasets: start = time.time() print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])] if dataset == 'cdvs_test_retrieval': bbxs = None else: bbxs = None print('>> {}: database images...'.format(dataset)) if args.pool == 'gem': ms = [1, 1 / 2**(1 / 2), 1 / 2] else: ms = [1] if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta[ 'regional'] and not net.meta['whitening']: msp = net.pool.p.item() print(">> Set-up multiscale:") print(">>>> ms: {}".format(ms)) print(">>>> msp: {}".format(msp)) else: msp = 1 vecs = extract_vectors(net, images, image_size, transform, bbxs, ms=ms, msp=msp, print_freq=1000, batchsize=1) # implemented with torch.no_grad print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, image_size, transform, bbxs, ms=ms, msp=msp, print_freq=1000, batchsize=1) # implemented with torch.no_grad print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) if dataset == 'cdvs_test_retrieval': compute_map_and_print(dataset, ranks, cfg['gnd_id']) else: compute_map_and_print(dataset, ranks, cfg['gnd']) if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw['m'], Lw['P']) qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P']) # search, rank, and print scores = np.dot(vecs_lw.T, qvecs_lw) ranks = np.argsort(-scores, axis=0) compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd']) print('>> {}: elapsed time: {}'.format(dataset, htime(time.time() - start)))
def main(): args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id using_cdvs = float(args.using_cdvs) # loading network from path if args.network_path is not None: print(">> Loading network:\n>>>> '{}'".format(args.network_path)) if args.network_path in PRETRAINED: # pretrained networks (downloaded automatically) state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks')) else: # fine-tuned network from path state = torch.load(args.network_path) # parsing net params from meta # architecture, pooling, mean, std required # the rest has default values, in case that is doesnt exist net_params = {} net_params['architecture'] = state['meta']['architecture'] net_params['pooling'] = state['meta']['pooling'] net_params['local_whitening'] = state['meta'].get('local_whitening', False) net_params['regional'] = state['meta'].get('regional', False) net_params['whitening'] = state['meta'].get('whitening', False) net_params['mean'] = state['meta']['mean'] net_params['std'] = state['meta']['std'] net_params['pretrained'] = False # load network net = init_network(net_params) net.load_state_dict(state['state_dict']) # if whitening is precomputed if 'Lw' in state['meta']: net.meta['Lw'] = state['meta']['Lw'] print(">>>> loaded network: ") print(net.meta_repr()) # loading offtheshelf network elif args.network_offtheshelf is not None: # parse off-the-shelf parameters offtheshelf = args.network_offtheshelf.split('-') net_params = {} net_params['architecture'] = offtheshelf[0] net_params['pooling'] = offtheshelf[1] net_params['local_whitening'] = 'lwhiten' in offtheshelf[2:] net_params['regional'] = 'reg' in offtheshelf[2:] net_params['whitening'] = 'whiten' in offtheshelf[2:] net_params['pretrained'] = True # load off-the-shelf network print(">> Loading off-the-shelf network:\n>>>> '{}'".format(args.network_offtheshelf)) net = init_network(net_params) print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters ms = list(eval(args.multiscale)) if len(ms) > 1 and net.meta['pooling'] == 'gem' and not net.meta['regional'] and not net.meta['whitening']: msp = net.pool.p.item() print(">> Set-up multiscale:") print(">>>> ms: {}".format(ms)) print(">>>> msp: {}".format(msp)) else: msp = 1 # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize( mean=net.meta['mean'], std=net.meta['std'] ) transform = transforms.Compose([ transforms.ToTensor(), normalize ]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening)) if len(ms) > 1: Lw = net.meta['Lw'][args.whitening]['ms'] else: Lw = net.meta['Lw'][args.whitening]['ss'] else: # if we evaluate networks from path we should save/load whitening # not to compute it every time if args.network_path is not None: whiten_fn = args.network_path + '_{}_whiten'.format(args.whitening) if len(ms) > 1: whiten_fn += '_ms' whiten_fn += '.pth' else: whiten_fn = None if whiten_fn is not None and os.path.isfile(whiten_fn): print('>> {}: Whitening is precomputed, loading it...'.format(args.whitening)) Lw = torch.load(whiten_fn) else: print('>> {}: Learning whitening...'.format(args.whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids']))] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} # saving whitening if whiten_fn exists if whiten_fn is not None: print('>> {}: Saving to {}...'.format(args.whitening, whiten_fn)) torch.save(Lw, whiten_fn) print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time() - start))) else: Lw = None # evaluate on test datasets datasets = args.datasets.split(',') result_dir=args.network_path[0:-8] epoch_lun=args.network_path[0:-8].split('/')[-1].replace('model_epoch','') print(">> Creating directory if it does not exist:\n>> '{}'".format(result_dir)) if not os.path.exists(result_dir): os.makedirs(result_dir) for dataset in datasets: start = time.time() # search, rank, and print print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) tuple_bbxs_qimlist=None tuple_bbxs_imlist=None images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])] # extract database and query vectors print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, args.image_size, transform, bbxs=tuple_bbxs_qimlist, ms=ms, msp=msp, batchsize=1) qvecs = qvecs.numpy() qvecs = qvecs.astype(np.float32) np.save(os.path.join(result_dir, "{}_qvecs_ep{}_resize.npy".format(dataset,epoch_lun)), qvecs) print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, ms=ms, bbxs=tuple_bbxs_imlist, msp=msp, batchsize=1) vecs = vecs.numpy() vecs = vecs.astype(np.float32) np.save(os.path.join(result_dir, "{}_vecs_ep{}_resize.npy".format(dataset,epoch_lun)), vecs) scores = np.dot(vecs.T, qvecs) if using_cdvs!=0: print('>> {}: cdvs global descriptor loading...'.format(dataset)) qvecs_global = cfg['qimlist_global'] vecs_global = cfg['imlist_global'] scores_global = np.dot(vecs_global, qvecs_global.T) scores+=scores_global*using_cdvs ranks = np.argsort(-scores, axis=0) if args.ir_remove!='0': rank_len=10 rank_re = np.loadtxt(os.path.join(result_dir, '{}_ranks_new_relevent.txt'.format(dataset))) ## the max value of rank_len MAX_RANK_LEN = int((rank_re.shape[0]) ** 0.5) rank_re=rank_re.reshape(MAX_RANK_LEN,MAX_RANK_LEN,rank_re.shape[1]) for m in range(rank_re.shape[2]): for i in range(rank_re.shape[0]): rank_re[i][i][m]=1.0 quanzhong=[1,0.7,0.4]+[0.1]*(MAX_RANK_LEN-3) for m in range(rank_re.shape[2]): #if adaption, then change the rank_len to a adaption params according to the rank_re_q, q_aer, cons_n if args.ir_adaption: using_local_query=True cons_n = 5 q_aer = float(args.ir_adaption) if using_local_query: ## using local feature scores, please don't forget note the query_q belong to deep rank_re_q = np.loadtxt(os.path.join(result_dir, '{}_ranks_new_query.txt'.format(dataset))) query_q = rank_re_q[:, m] else: ## using deep feature scores query_q = scores[ranks[:, m], m] rank_len=0 jishu=0 for idx in range(min(len(query_q),MAX_RANK_LEN)-cons_n): if jishu<cons_n: if query_q[idx]>q_aer: rank_len=idx+1 else: jishu+=1 else: break max_dim = min(rank_len, MAX_RANK_LEN) print (max_dim) if max_dim>2: #put the image to the MAX_RANK_LEN2 location if equals max_dim then re rank in the maxdim length list2 = [] list_hou = [] MAX_RANK_LEN2 = max_dim for i in range(MAX_RANK_LEN2): if i < max_dim: fenshu = 0 for j in range(max_dim): fenshu+=rank_re[min(i,j)][max(i,j)][m]*quanzhong[j] fenshu = fenshu / (max_dim - 1) if fenshu > float(args.ir_remove): list2.append(ranks[i][m]) else: list_hou.append(ranks[i][m]) else: list2.append(ranks[i][m]) ranks[0:MAX_RANK_LEN2, m] = list2 + list_hou np.savetxt(os.path.join(result_dir, "{}_ranks.txt".format(dataset)), ranks.astype(np.int)) if dataset == 'cdvs_test_retrieval': compute_map_and_print(dataset, ranks, cfg['gnd_id']) else: compute_map_and_print(dataset, ranks, cfg['gnd'])
def testOxfordParisHolidays(net, eConfig): #datasets = eConfig['test-datasets'].split(',') #results = [] # #for dataset in datasets: # results.append((dataset, np.random.rand(1)[0])) # #return results print('>> Evaluating network on test datasets...') # for testing we use image size of max 1024 image_size = 1024 # setting up the multi-scale parameters ms = [1] msp = 1 if (eConfig['multiscale']): ms = [1, 1. / math.sqrt(2), 1. / 2] if net.meta['pooling'] == 'gem' and net.whiten is None: msp = net.pool.p.data.tolist()[0] # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta['mean'], std=net.meta['std']) transform = transforms.Compose([transforms.ToTensor(), normalize]) # compute whitening if eConfig['whitening']: start = time.time() print('>> {}: Learning whitening...'.format(eConfig['test-whiten'])) # loading db db_root = os.path.join(get_data_root(), 'train', eConfig['test-whiten']) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(eConfig['test-whiten'])) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(eConfig['test-whiten'])) wvecs = extract_vectors(net, images, image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(eConfig['test-whiten'])) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} print('>> {}: elapsed time: {}'.format(eConfig['test-whiten'], htime(time.time() - start))) else: Lw = None # evaluate on test datasets datasets = eConfig['test-datasets'].split(',') results = [] for dataset in datasets: start = time.time() if (dataset != 'holidays' and dataset != 'rholidays'): print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(get_data_root(), 'test')) images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])] bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] if (dataset == 'oxford105k' or dataset == 'paris106k'): images.extend(cfg['distractors']) # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, image_size, transform, ms=ms, msp=msp) print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, image_size, transform, bbxs, ms=ms, msp=msp) print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() # search, rank, and print scores = np.dot(vecs.T, qvecs) ranks = np.argsort(-scores, axis=0) results.append( compute_map_and_print( dataset + ('+ multiscale' if eConfig['multiscale'] else ''), ranks, cfg['gnd'])) if Lw is not None: # whiten the vectors vecs_lw = whitenapply(vecs, Lw['m'], Lw['P']) qvecs_lw = whitenapply(qvecs, Lw['m'], Lw['P']) # search, rank, and print scores = np.dot(vecs_lw.T, qvecs_lw) ranks = np.argsort(-scores, axis=0) results.append( compute_map_and_print(dataset + ' + whiten', ranks, cfg['gnd'])) else: results.append(testHolidays(net, eConfig, dataset, Lw)) print('>> {}: elapsed time: {}'.format(dataset, htime(time.time() - start))) return results
def main(): args = parser.parse_args() # setting up the visible GPU os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id # loading network from path result_dir = 'retreival_results' if args.network_path is not None: result_dir = os.path.join(result_dir, args.network_path) print(">> Loading network:\n>>>> '{}'".format(args.network_path)) if args.network_path in PRETRAINED: # pretrained networks (downloaded automatically) state = load_url(PRETRAINED[args.network_path], model_dir=os.path.join(get_data_root(), 'networks')) else: state = torch.load(args.network_path) net = init_network(model=state['meta']['architecture'], pooling=state['meta']['pooling'], whitening=state['meta']['whitening'], mean=state['meta']['mean'], std=state['meta']['std'], pretrained=False) net.load_state_dict(state['state_dict']) # if whitening is precomputed if 'Lw' in state['meta']: net.meta['Lw'] = state['meta']['Lw'] print(">>>> loaded network: ") print(net.meta_repr()) # loading offtheshelf network elif args.network_offtheshelf is not None: result_dir = os.path.join(result_dir, args.network_offtheshelf) offtheshelf = args.network_offtheshelf.split('-') if len(offtheshelf) == 3: if offtheshelf[2] == 'whiten': offtheshelf_whiten = True else: raise (RuntimeError( "Incorrect format of the off-the-shelf network. Examples: resnet101-gem | resnet101-gem-whiten" )) else: offtheshelf_whiten = False print(">> Loading off-the-shelf network:\n>>>> '{}'".format( args.network_offtheshelf)) net = init_network(model=offtheshelf[0], pooling=offtheshelf[1], whitening=offtheshelf_whiten) print(">>>> loaded network: ") print(net.meta_repr()) # setting up the multi-scale parameters ms = [1] msp = 1 if args.multiscale: ms = [1, 1. / math.sqrt(2), 1. / 2] if net.meta['pooling'] == 'gem' and net.whiten is None: msp = net.pool.p.data.tolist()[0] # moving network to gpu and eval mode net.cuda() net.eval() # set up the transform normalize = transforms.Normalize(mean=net.meta['mean'], std=net.meta['std']) transform = transforms.Compose([transforms.ToTensor(), normalize]) # compute whitening if args.whitening is not None: start = time.time() if 'Lw' in net.meta and args.whitening in net.meta['Lw']: print('>> {}: Whitening is precomputed, loading it...'.format( args.whitening)) if args.multiscale: Lw = net.meta['Lw'][args.whitening]['ms'] else: Lw = net.meta['Lw'][args.whitening]['ss'] else: # Save whitening TODO print('>> {}: Learning whitening...'.format(args.whitening)) # loading db db_root = os.path.join(get_data_root(), 'train', args.whitening) ims_root = os.path.join(db_root, 'ims') db_fn = os.path.join(db_root, '{}-whiten.pkl'.format(args.whitening)) with open(db_fn, 'rb') as f: db = pickle.load(f) images = [ cid2filename(db['cids'][i], ims_root) for i in range(len(db['cids'])) ] # extract whitening vectors print('>> {}: Extracting...'.format(args.whitening)) wvecs = extract_vectors(net, images, args.image_size, transform, ms=ms, msp=msp) # learning whitening print('>> {}: Learning...'.format(args.whitening)) wvecs = wvecs.numpy() m, P = whitenlearn(wvecs, db['qidxs'], db['pidxs']) Lw = {'m': m, 'P': P} print('>> {}: elapsed time: {}'.format(args.whitening, htime(time.time() - start))) else: Lw = None # evaluate on test datasets data_root = args.data_root datasets = datasets_names[args.dataset] result_dict = {} for dataset in datasets: start = time.time() result_dict[dataset] = {} print('>> {}: Extracting...'.format(dataset)) # prepare config structure for the test dataset images = get_imlist(data_root, dataset, args.train_txt) qimages = get_imlist(data_root, dataset, args.query_txt) # extract database and query vectors print('>> {}: database images...'.format(dataset)) vecs = extract_vectors(net, images, args.image_size, transform, root=os.path.join(data_root, dataset), ms=ms, msp=msp) print('>> {}: query images...'.format(dataset)) qvecs = extract_vectors(net, qimages, args.image_size, transform, root=os.path.join(data_root, dataset), ms=ms, msp=msp) print('>> {}: Evaluating...'.format(dataset)) # convert to numpy vecs = vecs.numpy() qvecs = qvecs.numpy() scores, ranks = cal_ranks(vecs, vecs, Lw) result_dict[dataset]['train'] = {'scores': scores, 'ranks': ranks} scores, ranks = cal_ranks(vecs, qvecs, Lw) result_dict[dataset]['test'] = {'scores': scores, 'ranks': ranks} print('>> {}: elapsed time: {}'.format(dataset, htime(time.time() - start))) # Save retrieval results if not os.path.exists(result_dir): os.makedirs(result_dir) result_file = os.path.join(result_dir, args.outfile) np.save(result_file, result_dict) print('Save retrieval results to {}'.format(result_file))