def compute_local_distmat(lqf, lgf): lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_qg_distmat = low_memory_local_dist(lqf.cpu().numpy(), lgf.cpu().numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.cpu().numpy(), lqf.cpu().numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.cpu().numpy(), lgf.cpu().numpy(), aligned=True) local_distmat = np.concatenate([ np.concatenate([local_qq_distmat, local_qg_distmat], axis=1), np.concatenate([local_qg_distmat.T, local_gg_distmat], axis=1) ], axis=0) return local_distmat
def compute_distmat(cfg, num_query, feats, feats_flipped, local_feats, local_feats_flipped, theta, use_local_feature, use_rerank): """ Given a pair of global feature and local feature, compute distmat. :param cfg: :param num_query: :param pids: :param camids: :param feats: :param feats_flipped: :param local_feats: :param local_feats_flipped: :param theta: :param use_local_feature: :return: """ feats = torch.cat(feats, dim=0) feats_flipped = torch.cat(feats_flipped, dim=0) if len(local_feats) > 0 and use_local_feature: local_feats = torch.cat(local_feats, dim=0) local_feats_flipped = torch.cat(local_feats_flipped, dim=0) # print('feats_flipped', len(feats_flipped), feats_flipped[0]) if cfg.TEST.NORM: feats = F.normalize(feats, p=2, dim=1) feats_flipped = F.normalize(feats_flipped, p=2, dim=1) ## torch to numpy to save memory in re_ranking #feats = feats.numpy() #feats_flipped = feats_flipped.numpy() ########### # query qf = feats[:num_query] qf_flipped = feats_flipped[:num_query] if len(local_feats) > 0 and use_local_feature: lqf = local_feats[:num_query] lqf_flipped = local_feats_flipped[:num_query] #q_pids = np.asarray(pids[:num_query]) #q_camids = np.asarray(camids[:num_query]) # gallery gf = feats[num_query:] gf_flipped = feats_flipped[num_query:] if len(local_feats) > 0: lgf = local_feats[num_query:] lgf_flipped = local_feats_flipped[num_query:] #g_pids = np.asarray(pids[num_query:]) #g_camids = np.asarray(camids[num_query:]) ############### # 初步筛选gallery中合适的样本 (有问题) if False: distmat = -torch.mm(qf, gf.t()).numpy() index = np.argsort(distmat, axis=1) # from small to large new_gallery_index = np.unique(index[:, :top_k].reshape(-1)) print('new_gallery_index', len(new_gallery_index)) original_distmat = distmat gf = gf[new_gallery_index] gf_flipped = gf_flipped[new_gallery_index] lgf = lgf[new_gallery_index] lgf_flipped = lgf_flipped[new_gallery_index] ############## if len(local_feats) > 0 and use_local_feature: # if True: # calculate the local distance lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_qg_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(), aligned=True) local_distmat = np.concatenate([ np.concatenate([local_qq_distmat, local_qg_distmat], axis=1), np.concatenate([local_qg_distmat.T, local_gg_distmat], axis=1) ], axis=0) # flipped lqf = lqf_flipped.permute(0, 2, 1) lgf = lgf_flipped.permute(0, 2, 1) local_qg_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(), aligned=True) local_distmat_flipped = np.concatenate([ np.concatenate([local_qq_distmat, local_qg_distmat], axis=1), np.concatenate([local_qg_distmat.T, local_gg_distmat], axis=1) ], axis=0) else: local_distmat = None local_distmat_flipped = None if use_rerank: distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat, theta_value=theta, only_local=False) # (current best) del qf, gf distmat_flipped = re_ranking(qf_flipped, gf_flipped, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat_flipped, theta_value=theta, only_local=False) # (current best) del qf_flipped, gf_flipped else: distmat = -torch.mm(qf, gf.t()).cpu().numpy() distmat_flipped = -torch.mm(qf_flipped, gf_flipped.t()).cpu().numpy() distmat = (distmat + distmat_flipped) / 2 #if True: # cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) # score = (cmc[0] + mAP) / 2 #return qf, gf, local_distmat, qf_flipped, gf_flipped, local_distmat_flipped return distmat
def inference_aligned(cfg, model, test_dataloader, num_query): logger = logging.getLogger("reid_baseline.inference") logger.info("Start inferencing") model.eval() feats, pids, camids = [], [], [] local_feats = [] test_prefetcher = data_prefetcher(test_dataloader) batch = test_prefetcher.next() while batch[0] is not None: img, pid, camid = batch with torch.no_grad(): feat = model(img) #feat = model(torch.flip(img, [3])) if isinstance(feat, tuple): feats.append(feat[0]) local_feats.append(feat[1]) else: feats.append(feat) pids.extend(pid.cpu().numpy()) camids.extend(np.asarray(camid)) batch = test_prefetcher.next() feats = torch.cat(feats, dim=0) if len(local_feats) > 0: local_feats = torch.cat(local_feats, dim=0) if cfg.TEST.NORM: feats = F.normalize(feats, p=2, dim=1) # 局部特征是三维的,不做归一化 (对结果没影响) #if len(local_feats) > 0: # local_feats = F.normalize(local_feats, p=2, dim=1) # query qf = feats[:num_query] if len(local_feats) > 0: lqf = local_feats[:num_query] q_pids = np.asarray(pids[:num_query]) q_camids = np.asarray(camids[:num_query]) # gallery gf = feats[num_query:] if len(local_feats) > 0: lgf = local_feats[num_query:] g_pids = np.asarray(pids[num_query:]) g_camids = np.asarray(camids[num_query:]) if len(local_feats) > 0: #if True: # calculate the local distance lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_qg_distmat = low_memory_local_dist(lqf.cpu().numpy(), lgf.cpu().numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.cpu().numpy(), lqf.cpu().numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.cpu().numpy(), lgf.cpu().numpy(), aligned=True) local_distmat = np.concatenate([ np.concatenate([local_qq_distmat, local_qg_distmat], axis=1), np.concatenate([local_qg_distmat.T, local_gg_distmat], axis=1) ], axis=0) else: local_distmat = None # use reranking logger.info("use reranking") #distmat = re_ranking(qf, gf, k1=14, k2=4, lambda_value=0.4) search_param = False search_theta = True if search_param: best_score = 0 best_param = [] for k1 in range(5, 9): for k2 in range(1, k1): for l in np.linspace(0, 0.5, 11): distmat = re_ranking(qf, gf, k1=k1, k2=k2, lambda_value=l) cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) score = (cmc[0] + mAP) / 2 #logger.info(f"mAP: {mAP:.1%}") print('k1, k2, l', k1, k2, np.around(l, 2), 'r1, mAP, score', np.around(cmc[0], 4), np.around(mAP, 4), np.around(score, 4)) if score > best_score: best_score = score best_param = [k1, k2, l] print('Best Param', best_param) distmat = re_ranking(qf, gf, k1=best_param[0], k2=best_param[1], lambda_value=best_param[2], local_distmat=local_distmat, only_local=False) elif search_theta: best_score = 0 for theta in np.linspace(0, 1.0, 11): distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat, theta_value=theta, only_local=False) # (current best) cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) score = (cmc[0] + mAP) / 2 print('theta', theta, 'r1, mAP, score', np.around(cmc[0], 4), np.around(mAP, 4), np.around(score, 4)) if score > best_score: best_score = score best_param = theta print('Best Param', best_param) distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat, theta_value=best_param, only_local=False) # (current best) else: distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat, only_local=False, theta_value=0.9) #(current best) #distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.4) # try cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) logger.info(f"mAP: {mAP:.1%}") for r in [1, 5, 10]: logger.info(f"CMC curve, Rank-{r:<3}:{cmc[r - 1]:.1%}") logger.info(f"Score: {(mAP + cmc[0]) / 2.:.1%}")
def compute_distmat(cfg, num_query, feats, feats_flipped, local_feats, local_feats_flipped, theta, use_local_feature, use_rerank): """ Given a pair of global feature and local feature, compute distmat. :param cfg: :param num_query: :param pids: :param camids: :param feats: :param feats_flipped: :param local_feats: :param local_feats_flipped: :param theta: :param use_local_feature: :return: """ feats = torch.cat(feats, dim=0) feats_flipped = torch.cat(feats_flipped, dim=0) if len(local_feats) > 0 and use_local_feature: local_feats = torch.cat(local_feats, dim=0) local_feats_flipped = torch.cat(local_feats_flipped, dim=0) # print('feats_flipped', len(feats_flipped), feats_flipped[0]) if cfg.TEST.NORM: feats = F.normalize(feats, p=2, dim=1) feats_flipped = F.normalize(feats_flipped, p=2, dim=1) ## torch to numpy to save memory in re_ranking #feats = feats.numpy() #feats_flipped = feats_flipped.numpy() ########### # query qf = feats[:num_query] qf_flipped = feats_flipped[:num_query] if len(local_feats) > 0 and use_local_feature: lqf = local_feats[:num_query] lqf_flipped = local_feats_flipped[:num_query] #q_pids = np.asarray(pids[:num_query]) #q_camids = np.asarray(camids[:num_query]) # gallery gf = feats[num_query:] gf_flipped = feats_flipped[num_query:] if len(local_feats) > 0: lgf = local_feats[num_query:] lgf_flipped = local_feats_flipped[num_query:] #g_pids = np.asarray(pids[num_query:]) #g_camids = np.asarray(camids[num_query:]) local_distmat1 = None local_distmat2 = None if use_rerank: if len(local_feats) > 0 and use_local_feature: # if True: # calculate the local distance lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_qg_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(), aligned=True) local_distmat1 = np.concatenate([ np.concatenate([local_qq_distmat, local_qg_distmat], axis=1), np.concatenate([local_qg_distmat.T, local_gg_distmat], axis=1) ], axis=0) del local_qg_distmat, local_qq_distmat, local_gg_distmat query_num = qf.size(0) gallery_num = gf.size(0) distmat = compute_distmat_using_gpu(qf, gf) distmat1 = re_ranking(distmat, query_num, gallery_num, k1=12, k2=2, lambda_value=0.3, local_distmat=local_distmat1, theta_value=theta, only_local=False) del distmat, local_distmat1 if len(local_feats) > 0 and use_local_feature: # flipped lqf = lqf_flipped.permute(0, 2, 1) lgf = lgf_flipped.permute(0, 2, 1) local_qg_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(), aligned=True) local_distmat2 = np.concatenate([ np.concatenate([local_qq_distmat, local_qg_distmat], axis=1), np.concatenate([local_qg_distmat.T, local_gg_distmat], axis=1) ], axis=0) del local_qg_distmat, local_qq_distmat, local_gg_distmat distmat = compute_distmat_using_gpu(qf_flipped, gf_flipped) distmat2 = re_ranking(distmat, query_num, gallery_num, k1=12, k2=2, lambda_value=0.3, local_distmat=local_distmat2, theta_value=theta, only_local=False) # (current best) del distmat, local_distmat2 distmat = (distmat1 + distmat2) / 2 else: distmat1 = -torch.mm(qf, gf_flipped.t()).cpu().numpy() distmat2 = -torch.mm(qf_flipped, gf.t()).cpu().numpy() distmat = (distmat1 + distmat2) / 2 #if True: # cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) # score = (cmc[0] + mAP) / 2 #return qf, gf, local_distmat, qf_flipped, gf_flipped, local_distmat_flipped return distmat
def inference_flipped(cfg, model, test_dataloader, num_query): logger = logging.getLogger("reid_baseline.inference") logger.info("Start inferencing") model.eval() feats, feats_flipped, pids, camids = [], [], [], [] local_feats, local_feats_flipped = [], [] test_prefetcher = data_prefetcher(test_dataloader) batch = test_prefetcher.next() while batch[0] is not None: img, pid, camid = batch with torch.no_grad(): feat = model(img) feat_flipped = model(torch.flip(img, [3])) if isinstance(feat, tuple): feats.append(feat[0]) local_feats.append(feat[1]) feats_flipped.append(feat_flipped[0]) local_feats_flipped.append(feat_flipped[1]) else: feats.append(feat) feats_flipped.append(feat_flipped) pids.extend(pid.cpu().numpy()) camids.extend(np.asarray(camid)) batch = test_prefetcher.next() feats = torch.cat(feats, dim=0) feats_flipped = torch.cat(feats_flipped, dim=0) if len(local_feats) > 0: local_feats = torch.cat(local_feats, dim=0) local_feats_flipped = torch.cat(local_feats_flipped, dim=0) #print('feats_flipped', len(feats_flipped), feats_flipped[0]) if cfg.TEST.NORM: feats = F.normalize(feats, p=2, dim=1) feats_flipped = F.normalize(feats_flipped, p=2, dim=1) # query qf = feats[:num_query] qf_flipped = feats_flipped[:num_query] if len(local_feats) > 0: lqf = local_feats[:num_query] lqf_flipped = local_feats_flipped[:num_query] q_pids = np.asarray(pids[:num_query]) q_camids = np.asarray(camids[:num_query]) # gallery gf = feats[num_query:] gf_flipped = feats_flipped[num_query:] if len(local_feats) > 0: lgf = local_feats[num_query:] lgf_flipped = local_feats_flipped[num_query:] g_pids = np.asarray(pids[num_query:]) g_camids = np.asarray(camids[num_query:]) # cosine distance #distmat = torch.mm(qf, gf.t()).cpu().numpy() if len(local_feats) > 0: #if True: # calculate the local distance lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_qg_distmat = low_memory_local_dist(lqf.cpu().numpy(), lgf.cpu().numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.cpu().numpy(), lqf.cpu().numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.cpu().numpy(), lgf.cpu().numpy(), aligned=True) local_distmat = np.concatenate([ np.concatenate([local_qq_distmat, local_qg_distmat], axis=1), np.concatenate([local_qg_distmat.T, local_gg_distmat], axis=1) ], axis=0) # flipped lqf = lqf_flipped.permute(0, 2, 1) lgf = lgf_flipped.permute(0, 2, 1) local_qg_distmat = low_memory_local_dist(lqf.cpu().numpy(), lgf.cpu().numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.cpu().numpy(), lqf.cpu().numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.cpu().numpy(), lgf.cpu().numpy(), aligned=True) local_distmat_flipped = np.concatenate([ np.concatenate([local_qq_distmat, local_qg_distmat], axis=1), np.concatenate([local_qg_distmat.T, local_gg_distmat], axis=1) ], axis=0) else: local_distmat = None # use reranking logger.info("use reranking") #distmat = re_ranking(qf, gf, k1=14, k2=4, lambda_value=0.4) search_theta = True if search_theta: best_score = 0 #for theta in np.linspace(0.9, 1.0, 11): for theta in np.linspace(0, 1.0, 21): distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat, theta_value=theta, only_local=False) # (current best) distmat_flipped = re_ranking(qf_flipped, gf_flipped, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat_flipped, theta_value=theta, only_local=False) # (current best) distmat = (distmat + distmat_flipped) / 2 #cmc, mAP = evaluate(distmat + distmat_flipped, q_pids, g_pids, q_camids, g_camids) cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) score = (cmc[0] + mAP) / 2 print('theta', np.around(theta, 2), 'r1, mAP, score', np.around(cmc[0], 4), np.around(mAP, 4), np.around(score, 4)) if score > best_score: best_score = score best_param = theta best_distmat = distmat # saving strtime = time.strftime("%Y%m%d_%H%M%S", time.localtime()) if abs(theta - 0.95) < 1e-4: # saving dist_mats f = h5py.File( 'dist_mats/val_%s_%s_t0.95_flip.h5' % (cfg.MODEL.NAME, strtime), 'w') f.create_dataset('dist_mat', data=distmat, compression='gzip') f.close() print('Best Param', best_param) distmat = best_distmat else: distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat, only_local=False, theta_value=0.95) #(current best) distmat_flipped = re_ranking(qf_flipped, gf_flipped, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat_flipped, theta_value=0.95, only_local=False) # (current best) distmat = (distmat + distmat_flipped) / 2 cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) logger.info(f"mAP: {mAP:.1%}") for r in [1, 5, 10]: logger.info(f"CMC curve, Rank-{r:<3}:{cmc[r - 1]:.1%}") logger.info(f"Score: {(mAP + cmc[0]) / 2.:.1%}")
def inference_aligned( cfg, model, test_dataloader, num_query, ): logger = logging.getLogger("reid_baseline.inference") logger.info("Start inferencing") model.eval() g_feats, l_feats, pids, camids = [], [], [], [] val_prefetcher = data_prefetcher(test_dataloader) batch = val_prefetcher.next() while batch[0] is not None: img, pid, camid = batch with torch.no_grad(): g_feat, l_feat = model(img) #g_feat, l_feat = model(torch.flip(img, [3])) # better g_feats.append(g_feat.data.cpu()) l_feats.append(l_feat.data.cpu()) pids.extend(pid.cpu().numpy()) camids.extend(np.asarray(camid)) batch = val_prefetcher.next() g_feats = torch.cat(g_feats, dim=0) l_feats = torch.cat(l_feats, dim=0) if cfg.TEST.NORM: g_feats = F.normalize(g_feats, p=2, dim=1) # query qf = g_feats[:num_query] lqf = l_feats[:num_query] q_pids = np.asarray(pids[:num_query]) q_camids = np.asarray(camids[:num_query]) # gallery gf = g_feats[num_query:] lgf = l_feats[num_query:] g_pids = np.asarray(pids[num_query:]) g_camids = np.asarray(camids[num_query:]) # calculate the global distance if True: logger.info("--------use re-ranking--------") lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(), aligned=True) local_dist = np.concatenate([ np.concatenate([local_qq_distmat, local_distmat], axis=1), np.concatenate([local_distmat.T, local_gg_distmat], axis=1) ], axis=0) distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_dist, theta_value=0.5, only_local=False) ## theta hyer-patameters # for theta in np.arange(0,1.1,0.1): # distmat = re_ranking(qf,gf,k1=6,k2=2,lambda_value=0.3,local_distmat=local_dist,theta=theta,only_local=False) # cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) # logger.info(f"mAP: {mAP:.1%}") # for r in [1, 5, 10]: # logger.info(f"CMC curve, Rank-{r:<3}:{cmc[r - 1]:.1%}") # logger.info("Theta:{}; Score: {}".format(theta, (mAP+cmc[0])/2.)) #score = distmat #index = np.argsort(score, axis=1) # from small to large cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) logger.info(f"mAP: {mAP:.1%}") for r in [1, 5, 10]: logger.info(f"CMC curve, Rank-{r:<3}:{cmc[r - 1]:.1%}") logger.info(f"Score: {(mAP + cmc[0]) / 2.:.1%}")
def inference_flipped(cfg, model, test_dataloader, num_query, use_re_ranking=True, distance_metric='global_local'): logger = logging.getLogger("reid_baseline.inference") logger.info("Start inferencing") model.eval() g_feats, l_feats, gf_feats, lf_feats, pids, camids = [], [], [], [], [], [] val_prefetcher = data_prefetcher(test_dataloader) batch = val_prefetcher.next() while batch[0] is not None: img, pid, camid = batch with torch.no_grad(): g_feat, l_feat = model(img) gf_feat, lf_feat = model(torch.flip(img, [3])) g_feats.append(g_feat.data.cpu()) l_feats.append(l_feat.data.cpu()) gf_feats.append(gf_feat.data.cpu()) lf_feats.append(lf_feat.data.cpu()) pids.extend(pid.cpu().numpy()) camids.extend(np.asarray(camid)) batch = val_prefetcher.next() g_feats = torch.cat(g_feats, dim=0) l_feats = torch.cat(l_feats, dim=0) gf_feats = torch.cat(gf_feats, dim=0) lf_feats = torch.cat(lf_feats, dim=0) if cfg.TEST.NORM: g_feats = F.normalize(g_feats, p=2, dim=1) gf_feats = F.normalize(gf_feats, p=2, dim=1) # query qf = g_feats[:num_query] lqf = l_feats[:num_query] qff = gf_feats[:num_query] lqff = lf_feats[:num_query] q_pids = np.asarray(pids[:num_query]) q_camids = np.asarray(camids[:num_query]) # gallery gf = g_feats[num_query:] lgf = l_feats[num_query:] gff = gf_feats[num_query:] lgff = lf_feats[num_query:] g_pids = np.asarray(pids[num_query:]) g_camids = np.asarray(camids[num_query:]) # calculate the global distance if not use_re_ranking: m, n = qf.shape[0], gf.shape[0] global_distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() global_distmat.addmm_(1, -2, qf, gf.t()) global_distmat = global_distmat.numpy() # calculate the local distance lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=True) if distance_metric == 'global': logger.info("--------use global features--------") distmat = global_distmat elif distance_metric == 'local': logger.info("--------use local features--------") distmat = local_distmat elif distance_metric == 'global_local': logger.info("--------use global and local features--------") distmat = global_distmat + local_distmat else: logger.info("--------use re-ranking--------") lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(), aligned=True) local_dist = np.concatenate([ np.concatenate([local_qq_distmat, local_distmat], axis=1), np.concatenate([local_distmat.T, local_gg_distmat], axis=1) ], axis=0) logger.info("--------use re-ranking flipped--------") lqff = lqff.permute(0, 2, 1) lgff = lgff.permute(0, 2, 1) local_distmat = low_memory_local_dist(lqff.numpy(), lgff.numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqff.numpy(), lqff.numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgff.numpy(), lgff.numpy(), aligned=True) local_dist_flipped = np.concatenate([ np.concatenate([local_qq_distmat, local_distmat], axis=1), np.concatenate([local_distmat.T, local_gg_distmat], axis=1) ], axis=0) # for theta in np.arange(0.0,1.0,0.05): # distmat = re_ranking(qf,gf,k1=6,k2=2,lambda_value=0.3,local_distmat=local_dist,theta=theta,only_local=False) # distmat_flip = re_ranking(qff,gff,k1=6,k2=2,lambda_value=0.3,local_distmat=local_dist_flipped,theta=theta,only_local=False) # distmat = distmat + distmat_flip # cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) # logger.info(f"mAP: {mAP:.1%}") # for r in [1, 5, 10]: # logger.info(f"CMC curve, Rank-{r:<3}:{cmc[r - 1]:.1%}") # logger.info("Theta:{}; Score: {}".format(theta, (mAP+cmc[0])/2.)) theta = 0.45 distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_dist, theta_value=theta, only_local=False) distmat_flip = re_ranking(qff, gff, k1=6, k2=2, lambda_value=0.3, local_distmat=local_dist_flipped, theta_value=theta, only_local=False) distmat = (distmat + distmat_flip) / 2 score = distmat index = np.argsort(score, axis=1) # from small to large cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids) logger.info(f"mAP: {mAP:.1%}") for r in [1, 5, 10]: logger.info(f"CMC curve, Rank-{r:<3}:{cmc[r - 1]:.1%}") logger.info(f"Score: {(mAP + cmc[0]) / 2.:.1%}")
def inference(cfg, model, test_dataloader, num_query, thetas): logger = logging.getLogger("reid_baseline.inference") logger.info("Start inferencing") model.eval() feats, pids, camids = [], [], [] local_feats = [] test_prefetcher = data_prefetcher(test_dataloader) batch = test_prefetcher.next() while batch[0] is not None: img, pid, camid = batch with torch.no_grad(): feat = model(img) if isinstance(feat, tuple): feats.append(feat[0]) local_feats.append(feat[1]) else: feats.append(feat) pids.extend(pid.cpu().numpy()) camids.extend(np.asarray(camid)) batch = test_prefetcher.next() feats = torch.cat(feats, dim=0) if len(local_feats) > 0: local_feats = torch.cat(local_feats, dim=0) if cfg.TEST.NORM: feats = F.normalize(feats, p=2, dim=1) if len(local_feats) > 0: local_feats = F.normalize(local_feats, p=2, dim=1) # query qf = feats[:num_query] if len(local_feats) > 0: lqf = local_feats[:num_query] # gallery gf = feats[num_query:] if len(local_feats) > 0: lgf = local_feats[num_query:] if len(local_feats) > 0: # calculate the local distance lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) #logger.info('Computing local_qg_distmat ...') local_qg_distmat = low_memory_local_dist(lqf.cpu().numpy(), lgf.cpu().numpy(), aligned=True) #logger.info('Computing local_qq_distmat ...') local_qq_distmat = low_memory_local_dist(lqf.cpu().numpy(), lqf.cpu().numpy(), aligned=True) #logger.info('Computing local_gg_distmat ...') local_gg_distmat = low_memory_local_dist(lgf.cpu().numpy(), lgf.cpu().numpy(), aligned=True) local_distmat = np.concatenate([ np.concatenate([local_qq_distmat, local_qg_distmat], axis=1), np.concatenate([local_qg_distmat.T, local_gg_distmat], axis=1) ], axis=0) else: local_distmat = None use_rerank = True if use_rerank: #thetas = [0.4, 0.5, 0.9, 0.95, 1.0] scores, indices, dist_mats = [], [], [] logger.info("use reranking") for theta in thetas: distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat, theta_value=theta) score = distmat index = np.argsort(score, axis=1) # from small to large scores.append(score) indices.append(index) dist_mats.append(distmat) return scores, indices, dist_mats else: logger.info("No reranking") distmat = -torch.mm(qf, gf.t()).cpu().numpy() score = distmat index = np.argsort(score, axis=1) # from small to large return score, index
def inference_flipped_deprecated( cfg, model, test_dataloader, num_query, theta=0.95, use_local_feature=False # 是否使用local特征 ): logger = logging.getLogger("reid_baseline.inference") logger.info("Start inferencing") model.eval() g_feats, l_feats, gf_feats, lf_feats, pids, camids = [], [], [], [], [], [] val_prefetcher = data_prefetcher(test_dataloader, cfg) batch = val_prefetcher.next() while batch[0] is not None: img, pid, camid = batch with torch.no_grad(): g_feat, bn_gf, lf, l_feat = model(img) gf_feat, bn_gff, lff, lf_feat = model(torch.flip(img, [3])) g_feats.append(g_feat.data.cpu()) l_feats.append(l_feat.data.cpu()) gf_feats.append(gf_feat.data.cpu()) lf_feats.append(lf_feat.data.cpu()) pids.extend(pid.cpu().numpy()) camids.extend(np.asarray(camid)) batch = val_prefetcher.next() g_feats = torch.cat(g_feats, dim=0) l_feats = torch.cat(l_feats, dim=0) gf_feats = torch.cat(gf_feats, dim=0) lf_feats = torch.cat(lf_feats, dim=0) if cfg.TEST.NORM: g_feats = F.normalize(g_feats, p=2, dim=1) gf_feats = F.normalize(gf_feats, p=2, dim=1) # query qf = g_feats[:num_query] lqf = l_feats[:num_query] qff = gf_feats[:num_query] lqff = lf_feats[:num_query] q_pids = np.asarray(pids[:num_query]) q_camids = np.asarray(camids[:num_query]) # gallery gf = g_feats[num_query:] lgf = l_feats[num_query:] gff = gf_feats[num_query:] lgff = lf_feats[num_query:] g_pids = np.asarray(pids[num_query:]) g_camids = np.asarray(camids[num_query:]) # calculate the global distance scores, indices, dist_mats = [], [], [] #use_local_feature = True if use_local_feature: logger.info("--------computing local features ...--------") lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(), aligned=True) local_dist = np.concatenate([ np.concatenate([local_qq_distmat, local_distmat], axis=1), np.concatenate([local_distmat.T, local_gg_distmat], axis=1) ], axis=0) logger.info("--------computing flipped local features ...--------") lqff = lqff.permute(0, 2, 1) lgff = lgff.permute(0, 2, 1) local_distmat = low_memory_local_dist(lqff.numpy(), lgff.numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqff.numpy(), lqff.numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgff.numpy(), lgff.numpy(), aligned=True) local_dist_flip = np.concatenate([ np.concatenate([local_qq_distmat, local_distmat], axis=1), np.concatenate([local_distmat.T, local_gg_distmat], axis=1) ], axis=0) else: local_dist = None local_dist_flip = None logger.info("use reranking") #for theta in thetas: if True: distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_dist, theta_value=theta, only_local=False) distmat_flip = re_ranking(qff, gff, k1=6, k2=2, lambda_value=0.3, local_distmat=local_dist_flip, theta_value=theta, only_local=False) # 合并距离 distmat = (distmat + distmat_flip) / 2 score = distmat index = np.argsort(score, axis=1) # from small to large scores.append(score) indices.append(index) dist_mats.append(distmat) return scores, indices, dist_mats
def compute_distmat(cfg, num_query, feats, local_feats, theta, use_local_feature, use_rerank): """ Given a pair of global feature and local feature, compute distmat. :param cfg: :param num_query: :param pids: :param camids: :param feats: :param feats_flipped: :param local_feats: :param local_feats_flipped: :param theta: :param use_local_feature: :return: """ feats = torch.cat(feats, dim=0) if len(local_feats) > 0 and use_local_feature: local_feats = torch.cat(local_feats, dim=0) if cfg.TEST.NORM: feats = F.normalize(feats, p=2, dim=1) ## torch to numpy to save memory in re_ranking #feats = feats.numpy() #feats_flipped = feats_flipped.numpy() ########### # query qf = feats[:num_query] if len(local_feats) > 0 and use_local_feature: lqf = local_feats[:num_query] # gallery gf = feats[num_query:] if len(local_feats) > 0: lgf = local_feats[num_query:] #g_pids = np.asarray(pids[num_query:]) #g_camids = np.asarray(camids[num_query:]) if len(local_feats) > 0 and use_local_feature: # if True: # calculate the local distance lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_qg_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=True) local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(), aligned=True) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(), aligned=True) local_distmat = np.concatenate([ np.concatenate([local_qq_distmat, local_qg_distmat], axis=1), np.concatenate([local_qg_distmat.T, local_gg_distmat], axis=1) ], axis=0) else: local_distmat = None local_distmat_flipped = None if use_rerank: distmat = re_ranking(qf, gf, k1=6, k2=2, lambda_value=0.3, local_distmat=local_distmat, theta_value=theta, only_local=False) # (current best) del qf, gf else: distmat = -torch.mm(qf, gf.t()).cpu().numpy() return distmat