def calmap(probeFeat, probeLabel, galleryFeat, galleryLabel, norm_flag=True, rerank=False): if norm_flag: probeFeat = pre.normalize(probeFeat,axis=1) galleryFeat = pre.normalize(galleryFeat,axis=1) if rerank: dist = re_ranking(probeFeat,galleryFeat,k1=10,k2=6,lambda_value=0.3) else: dist = cdist(probeFeat, galleryFeat) ap = [] for i in range(len(dist)): a = dist[i] rank = [] ind = list(np.where(galleryLabel == probeLabel[i])[0]) dp = a[ind] a.sort() for j in dp: rank.append(list(a).index(j)+1) rank.sort() thisap = 0. for k in range(len(ind)): thisap = thisap + float((k+1))/rank[k] ap.append(thisap/len(ind)) ap = np.array(ap) map = np.mean(ap) return map
def calacc(probeFeat, probeLabel, galleryFeat, galleryLabel, rerank=False, top_num=[1,5,10,20,30]): if rerank: dist = re_ranking(probeFeat,galleryFeat,k1=10,k2=6,lambda_value=0.3) else: dist = cdist(probeFeat, galleryFeat) index = [] for i in range(len(dist)): a = dist[i] ind = np.where(galleryLabel == probeLabel[i]) dp = a[ind] a.sort() index.append(list(a).index(dp)) index = np.array(index) cmc = lambda top,index : float(len(np.where(index<top)[0]))/len(probeLabel) cmc_curve = [cmc(top, index) for top in top_num] return cmc_curve
def test(self): epoch = self.scheduler.last_epoch + 1 self.ckpt.write_log('\n[INFO] Test:') self.model.eval() self.ckpt.add_log(torch.zeros(1, 5)) qf = self.extract_feature(self.query_loader).numpy() gf = self.extract_feature(self.test_loader).numpy() if self.args.re_rank: q_g_dist = np.dot(qf, np.transpose(gf)) q_q_dist = np.dot(qf, np.transpose(qf)) g_g_dist = np.dot(gf, np.transpose(gf)) dist = re_ranking(q_g_dist, q_q_dist, g_g_dist) else: dist = cdist(qf, gf) r = cmc(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras, separate_camera_set=False, single_gallery_shot=False, first_match_break=True) m_ap = mean_ap(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras) self.ckpt.log[-1, 0] = m_ap self.ckpt.log[-1, 1] = r[0] self.ckpt.log[-1, 2] = r[2] self.ckpt.log[-1, 3] = r[4] self.ckpt.log[-1, 4] = r[9] best = self.ckpt.log.max(0) self.ckpt.write_log( '[INFO] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} (Best: {:.4f} @epoch {})' .format(m_ap, r[0], r[2], r[4], r[9], best[0][0], (best[1][0] + 1) * self.args.test_every)) if not self.args.test_only: self.ckpt.save(self, epoch, is_best=((best[1][0] + 1) * self.args.test_every == epoch))
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids, lqf = [], [], [], [] for batch_idx, (imgs, pids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() # qf.append(extract_feature( # model, imgs, requires_norm=True, vectorize=True,use_pcb=args.use_pcb).cpu().data) features,local_features = model(imgs) # print('lqf shape',local_features.shape) # print('qf shape',features.shape) batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() qf.append(features) lqf.append(local_features) q_pids.extend(pids) # q_camids.extend(camids) qf = torch.cat(qf, 0) lqf = torch.cat(lqf,0) print('qf shape',qf.shape) print('lqf shape',lqf.shape) q_pids = np.asarray(q_pids) #q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1))) gf, g_pids, g_camids, lgf = [], [], [], [] end = time.time() for batch_idx, (imgs, pids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() # features, local_features = model(imgs) # gf.append(extract_feature( # model, imgs, requires_norm=True, vectorize=True,use_pcb=args.use_pcb).cpu().data) features,local_features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() gf.append(features) lgf.append(local_features) g_pids.extend(pids) # g_camids.extend(camids) gf = torch.cat(gf, 0) # lgf = torch.cat(lgf,0) print('gf shape',gf.shape) # print('lgf shape',lgf.shape) g_pids = np.asarray(g_pids) # g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch)) # feature normlization qf = 1. * qf / (torch.norm(qf, 2, dim = -1, keepdim=True).expand_as(qf) + 1e-12) gf = 1. * gf / (torch.norm(gf, 2, dim = -1, keepdim=True).expand_as(gf) + 1e-12) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() print('distmat shape1',distmat.shape) distmat.addmm_(1, -2, qf, gf.t()) print('distmat shape2',distmat.shape) distmat = distmat.cpu().numpy() # args.test_distance = 'global'(default) if not args.test_distance== 'global': print("Only using global branch") from util.distance import low_memory_local_dist #embed() lqf = lqf.permute(0,2,1) lgf = lgf.permute(0,2,1) local_distmat = low_memory_local_dist(lqf.numpy(),lgf.numpy(),aligned= not args.unaligned) if args.test_distance== 'local': print("Only using local branch") distmat = local_distmat if args.test_distance == 'global_local': print("Using global and local branches") distmat = local_distmat+distmat print("Computing CMC and mAP") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") # args.reranking = false(default) if args.reranking: from util.re_ranking import re_ranking if args.test_distance == 'global': print("Only using global branch for reranking") distmat = re_ranking(qf,gf,k1=20, k2=6, lambda_value=0.3) else: local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(),aligned= not args.unaligned) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(),aligned= not args.unaligned) local_dist = np.concatenate( [np.concatenate([local_qq_distmat, local_distmat], axis=1), np.concatenate([local_distmat.T, local_gg_distmat], axis=1)], axis=0) if args.test_distance == 'local': print("Only using local branch for reranking") distmat = re_ranking(qf,gf,k1=20,k2=6,lambda_value=0.3,local_distmat=local_dist,only_local=True) elif args.test_distance == 'global_local': print("Using global and local branches for reranking") distmat = re_ranking(qf,gf,k1=20,k2=6,lambda_value=0.3,local_distmat=local_dist,only_local=False) print("Computing CMC and mAP for re_ranking") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP(RK): {:.1%}".format(mAP)) print("CMC curve(RK)") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") return mAP
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix". format(qf.size(0), qf.size(1))) gf, g_pids, g_camids = [], [], [] end = time.time() for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix". format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, args.test_batch)) # feature normlization qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) + 1e-12) gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) + 1e-12) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() print("Computing CMC and mAP") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") if args.reranking: from util.re_ranking import re_ranking distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3) print("Computing CMC and mAP for re_ranking") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP(RK): {:.1%}".format(mAP)) print("CMC curve(RK)") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") return cmc[0]
def test(model, queryloader, galleryloader, use_gpu, dataset_q,dataset_g,track_id_tmp=None,rank=100): batch_time = AverageMeter() #embed() model.eval() with torch.no_grad(): qf, lqf, q_imgs = [], [], [] for q_idx in range(len(dataset_q)): q_img = int(dataset_q[q_idx].split('/')[-1].strip('.jpg')) q_imgs.append(q_img) for batch_idx, (imgs) in enumerate(queryloader): #embed() if use_gpu: imgs = imgs.cuda() end = time.time() features,local_features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() qf.append(features) lqf.append(local_features) qf = torch.cat(qf, 0) lqf = torch.cat(lqf,0) print('lqf shape',lqf.shape) print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1))) gf, lgf, g_imgs = [], [], [] for g_idx in range(len(dataset_g)): g_img = int(dataset_g[g_idx].split('/')[-1].strip('.jpg')) g_imgs.append(g_img) end = time.time() # embed() #obtain the track infoi for batch_idx, (imgs) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features,local_features = model(imgs) features = features.data.cpu() local_features = local_features.data.cpu() gf.append(features) lgf.append(local_features) #embed() gf = torch.cat(gf, 0) lgf = torch.cat(lgf,0) print('lgf shape',lgf.shape) gt_f,_ = track_info_average(track_id_tmp,gf,lgf) embed() print('len of gimgs',len(g_imgs)) print('Extracted features for gallery_track set,obtained {}-by-{} matrix'.format(gt_f.size(0),gt_f.size(1))) print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1))) #embed() print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch)) # feature normlization qf = 1. * qf / (torch.norm(qf, 2, dim = -1, keepdim=True).expand_as(qf) + 1e-12) #gf = 1. * gf / (torch.norm(gf, 2, dim = -1, keepdim=True).expand_as(gf) + 1e-12) gt_f = 1. * gt_f / (torch.norm(gt_f, 2, dim = -1, keepdim=True).expand_as(gt_f) + 1e-12) m, n = qf.size(0), gt_f.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gt_f, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gt_f.t()) distmat = distmat.numpy() #embed() print("------------------") if args.reranking: from util.re_ranking import re_ranking if args.test_distance == 'global': print("Only using global branch for reranking") distmat = re_ranking(qf,gt_f,k1=20, k2=6, lambda_value=0.3) else: local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(),aligned= not args.unaligned) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(),aligned= not args.unaligned) local_dist = np.concatenate( [np.concatenate([local_qq_distmat, local_distmat], axis=1), np.concatenate([local_distmat.T, local_gg_distmat], axis=1)], axis=0) if args.test_distance == 'local': print("Only using local branch for reranking") distmat = re_ranking(qf,gf,k1=20,k2=6,lambda_value=0.3,local_distmat=local_dist,only_local=True) elif args.test_distance == 'global_local': print("Using global and local branches for reranking") distmat = re_ranking(qf,gf,k1=20,k2=6,lambda_value=0.3,local_distmat=local_dist,only_local=False) #embed() print("Computing CMC and mAP for re_ranking") print("==> Test aicity dataset and write to csv") test_rank_result = test_rank100_aicity(distmat,q_imgs,g_imgs,track_id_tmp,use_track_info=True) # test_rank_result is a dict, use pandas to convert # embed() test_rank_result_df = pd.DataFrame(list(test_rank_result.items()),columns=['query_ids','gallery_ids']) test_result_df = test_rank_result_df.sort_values('query_ids') # write to csvi embed() with open('aic_res_'+args.result_dir+'.txt','w') as f: for idx in range(len(test_result_df)): sep_c = ' ' row_ranks = [] idx_row = test_result_df.iloc[idx]['gallery_ids'][:100] #embed() for item in idx_row: row_rank = str(item[0]) row_ranks.append(row_rank) sep_c = sep_c.join(row_ranks) #embed() sep_c = sep_c+'\n' #embed() f.write(sep_c) f.close()
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): print('------start testing------') cmc1 = [] cmc2 = [] batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids, lqf = [], [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features, local_features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() qf.append(features) lqf.append(local_features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) lqf = torch.cat(lqf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix". format(qf.size(0), qf.size(1))) gf, g_pids, g_camids, lgf = [], [], [], [] end = time.time() for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features, local_features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() gf.append(features) lgf.append(local_features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) lgf = torch.cat(lgf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix". format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, args.test_batch)) # feature normlization qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) + 1e-12) gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) + 1e-12) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() if not args.test_distance == 'global': print("Only using global branch") from util.distance import low_memory_local_dist lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=not args.unaligned) if args.test_distance == 'local': print("Only using local branch") distmat = local_distmat if args.test_distance == 'global_local': print("Using global and local branches") distmat = local_distmat + distmat print("Computing CMC and mAP") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("cms's shape: ", cmc.shape) print("cms's type: ", cmc.dtype) # cmc1 = [] # print("cmc2's shape: ",cmc2.shape) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) cmc1.append(cmc[r - 1]) print("------------------") # test matplot # x = np.linspace(0,2*np.pi,50) # y = np.sin(x) # print("cmc2's shape: ",cmc2.shape) # print(cmc2) if args.reranking: from util.re_ranking import re_ranking if args.test_distance == 'global': print("Only using global branch for reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3) else: local_qq_distmat = low_memory_local_dist( lqf.numpy(), lqf.numpy(), aligned=not args.unaligned) local_gg_distmat = low_memory_local_dist( lgf.numpy(), lgf.numpy(), aligned=not args.unaligned) local_dist = np.concatenate([ np.concatenate([local_qq_distmat, local_distmat], axis=1), np.concatenate([local_distmat.T, local_gg_distmat], axis=1) ], axis=0) if args.test_distance == 'local': print("Only using local branch for reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3, local_distmat=local_dist, only_local=True) elif args.test_distance == 'global_local': print("Using global and local branches for reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3, local_distmat=local_dist, only_local=False) print("Computing CMC and mAP for re_ranking") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) # cmc2 = [] print("Results ----------") print("mAP(RK): {:.1%}".format(mAP)) print("CMC curve(RK)") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) cmc2.append(cmc[r - 1]) print("------------------") # matlpot # print("----cmc2----",cmc2) # print("cmc1's value------") # print(cmc1) plt.plot(ranks, cmc1, label='ranking', color='red', marker='o', markersize=5) plt.plot(ranks, cmc2, label='re-ranking', color='blue', marker='o', markersize=5) plt.ylabel('Accuracy') plt.xlabel('Rank_num') plt.title('Result of Ranking and Re-ranking(query_tank_cam=5)') plt.legend() plt.savefig('/home/gaoziqiang/tempt/tank_cam5.png') plt.show() return cmc[0]