# experiment 1 # use gardens point datatset to giev the result of MCN's # unstability when input connections is less # and the more input connections the more time consumes # (*) means the params you should specify # day_right vs. night_right D1 = np.load('../../desc/netvlad/gp/day_right_desc.npy') #(*) D2 = np.load('../../desc/netvlad/gp/night_right_desc.npy') #(*) D1 = D1 / np.linalg.norm(D1, axis=0) D2 = D2 / np.linalg.norm(D2, axis=0) err = 3 #(*) GT = utils.getGroundTruthMatrix(D1.shape[0], err) nConPerCol = [100, 300, 500, 700, 1000, 1500, 2000] #(*) # traverse all nConPerCol and compute the AP and time consumption # and save dict which has key: ap and time # if the result is saved, you can comment the following code # params = MCN.MCNParams(probAddCon=0.05, # nCellPerCol=32, # nConPerCol=200, # minColActivity=0.7, # nColPerPattern=50, # kActiveCol=100) # # MCN_time_cost = [] # MCN_ap = [] #
# 证明环视数据集的优越性,通过对环视描述子压缩,并和原维度单视图描述子比较AP # 每行打(*)号的需要根据实际情况配置 # 需要omni-SCUT数据集的描述子文件.npy # ---------------------------- 说明 ---------------------------------- D1 = np.load('./netvlad/day_desc.npy') #(*) D2 = np.load('./netvlad/dawn_desc.npy') #(*) D1 = D1 / np.linalg.norm(D1, axis=0) D2 = D2 / np.linalg.norm(D2, axis=0) num = D1.shape[0] omni_D1 = D1.reshape((num // 4, -1)) omni_D2 = D2.reshape((num // 4, -1)) GT = utils.getGroundTruthMatrix(num // 4, 1) sig_D1 = D1[::4, :] sig_D2 = D2[::4, :] del D1, D2 # single view num, old_dims = sig_D1.shape new_dims = 8192 # acturally 8192 * 2 s = 0.25 P = np.random.rand(old_dims, new_dims) P /= np.linalg.norm(P, axis=1, keepdims=True) sig_D1_slsbh = utils.getLSBH(sig_D1, P, 0.25) sig_D2_slsbh = utils.getLSBH(sig_D2, P, 0.25)
# if use SCUT datasets, uncomment following code dbFeat = np.load(db_save_path)[::4, :] #(*) qFeat = np.load(q_save_path)[::4, :] # otherwise uncomment following code # dbFeat = np.load(db_save_path) # qFeat = np.load(q_save_path) dbn = dbFeat.shape[0] qn = qFeat.shape[0] # get ground truth #(*) # if not use oxford robotcar, uncomment following code err = arg_dataset['err'] gt = utils.getGroundTruth(dbn, err) gtm = utils.getGroundTruthMatrix(dbn, err) # oxford robotcar # err = arg_dataset['err'] # db_gps = np.load(join(imgdir, 'gps_' + subdir[arg_eval['compare_subdir'][0]] + '.npy')) # q_gps = np.load(join(imgdir, 'gps_' + subdir[arg_eval['compare_subdir'][1]] + '.npy')) # _, gtm = utils.getGpsGT(db_gps, q_gps, err) # following code is optional, if you want to see the recall@N # you can uncomment it # _ = utils.getResult(dbFeat, qFeat, gt, gtm) # experiment 3_3 D1 = dbFeat / np.linalg.norm(dbFeat, axis=0) D2 = qFeat / np.linalg.norm(qFeat, axis=0) del dbFeat, qFeat cannum = int(0.02 * D1.shape[0])
plt.figure() err = 9 #(*) error tolerance for nordland dataset, 9 is set in paper S_file = '../cp3_5/vis3/nd_sumwin.npz' #(*) NL similarity matrix S = np.load(S_file) S_pw = S['S_pw'] S_pwk = S['S_pwk'] S_mcn = S['S_mcn'] S_seq = loadmat('../cp3_4/seqslam/nd_sumwin.mat')['S'] #(*) tmp = np.isnan(S_seq) S_seq[tmp] = np.max(S_seq[~tmp]) S_smcn = S['S_smcn'] S_smcntf = S['S_smcntf'] S_dd = S_nd_dd del S GT = utils.getGroundTruthMatrix(S_pw.shape[0], err) P, R = utils.drawPR(S_pw, GT) ap = utils.calAvgPred(P, R) plt.plot(R, P, color=vis.color[0], label='PW: %.4f' % ap, linewidth=2) P, R = utils.drawPR(S_pwk, GT) ap = utils.calAvgPred(P, R) plt.plot(R, P, color=vis.color[1], label='PWk: %.4f' % ap, linewidth=2) P, R = utils.drawPR(S_mcn, GT) ap = utils.calAvgPred(P, R) plt.plot(R, P, color=vis.color[2], label='MCN: %.4f' % ap, linewidth=2) P, R = utils.drawPR(S_seq, GT, True) ap = utils.calAvgPred(P, R) plt.plot(R, P, color=vis.color[3], label='SeqSLAM: %.4f' % ap, linewidth=2)
if root.endswith('scut/'): qn = qn // 4 picid = sample(range(0, qn), numPicToShow) numpic = len(picid) img_format = 'Image%03d.jpg' #(*) err = 3 #(*) saveName = './vis2/gp.png' #(*) if root.endswith('robotcar/'): db_gps = np.load(root + 'gps_snow.npy') q_gps = np.load(root + 'gps_night.npy') _, gt = utils.getGpsGT(db_gps, q_gps, err) else: gt = utils.getGroundTruthMatrix(qn, err) gtl = [] for each in picid: gtl.append(list(np.where(gt[:, each])[0])) id_pw = np.argmax(S_pw[:, picid], axis=0) id_dd = np.argmax(S_dd[:, picid], axis=0) id_mcn = np.argmax(S_mcn[:, picid], axis=0) id_smcn = np.argmax(S_smcn[:, picid], axis=0) with open('./vis3/pathid.pkl', 'rb') as f: #(*) id_smcntf = pickle.load(f)['gp'] id_smcntf = [id_smcntf[x] for x in picid] id_seq = np.argmin(S_seq[:, picid], axis=0) real_id_pw = np.copy(id_pw)
# load features subdir = arg_dataset['subdir'] db_save_path = join(desc_dir, subdir[arg_eval['compare_subdir'][0]] + '_desc.npy') q_save_path = join(desc_dir, subdir[arg_eval['compare_subdir'][1]] + '_desc.npy') dbFeat = np.load(db_save_path) qFeat = np.load(q_save_path) num, dim = dbFeat.shape # get ground truth err = 3 gt = utils.getGroundTruth(num, err) gtm = utils.getGroundTruthMatrix(num, err) _ = utils.getResult(dbFeat, qFeat, gt, gtm) S = utils.MCN_pairwise(dbFeat, qFeat) P, R = utils.drawPR(S, gtm) auc = utils.calAvgPred(P, R) print("auc : %.4f" % auc) # test dimension reduction effect # r_dim = 128 # w_dbFeat = utils.pca_whitening(dbFeat, r_dim) # w_dbFeat = np.ascontiguousarray(w_dbFeat) # w_qFeat = utils.pca_whitening(qFeat, r_dim) # w_qFeat = np.ascontiguousarray(w_qFeat)
img_format = '%d.png' #(*) # name as <dataset name>.png saveName = './vis3/' + root.split('/')[-2] + '.png' #(*) ref_saveName = './vis3/ref_' + root.split('/')[-2] + '.png' #(*) # err tolerance green_tolerance = 5 #(*) blue_tolerance = 2 * green_tolerance #(*) if root.endswith('robotcar/'): db_gps = np.load(root + 'gps_snow.npy') q_gps = np.load(root + 'gps_night.npy') _, gtg = utils.getGpsGT(db_gps, q_gps, green_tolerance) _, gtb = utils.getGpsGT(db_gps, q_gps, blue_tolerance) else: gtg = utils.getGroundTruthMatrix(qn, green_tolerance) gtb = utils.getGroundTruthMatrix(qn, blue_tolerance) gtlg = list(np.where(gtg[:, numTopick])[0]) gtlb = list(np.where(gtb[:, numTopick])[0]) # color blue = np.array([[[255, 0, 0]]]) green = np.array([[[0, 255, 0]]]) red = np.array([[[0, 0, 255]]]) id_pw = np.argsort(-S_pw[:, numTopick])[:topPicToShow] id_mcn = np.argsort(-S_mcn[:, numTopick])[:topPicToShow] id_dd = np.argsort(-S_dd[:, numTopick])[:topPicToShow] id_smcn = np.argsort(-S_smcn[:, numTopick])[:topPicToShow] id_smcntf = np.argsort(S_smcntf[:, numTopick])[:topPicToShow]
# load id file with open('./id_dict.pkl', 'rb') as f: id_dict = pickle.load(f) sig_f_id = id_dict['sig_f_id'] omni_f_id = id_dict['onmi_f_id'] sig_t_id = id_dict['sig_t_id'] omni_t_id = id_dict['onmi_t_id'] omni_t_mcn_id = id_dict['onmi_t_mcn_id'] omni_t_smcn_id = id_dict['onmi_t_smcn_id'] omni_t_smcntf_id = id_dict['onmi_t_smcntf_id'] # error tolerance sc_err = 1 # make groudtruth sc_gtg = utils.getGroundTruthMatrix(sc_num, sc_err) sc_gtb = utils.getGroundTruthMatrix(sc_num, 2 * sc_err) # save video fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') fps = 1.0 video = cv2.VideoWriter('./omni.mp4', fourcc, fps, (900, 600)) for i in trange(show_num): # first row st = 4 * i img = cv2.imread(sc_qfile + sc_img_f % st) set_img(img, ex_c, ex_r) set_img(img, ex_c + img_size + ex_r, ex_r)