def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix". format(qf.size(0), qf.size(1))) gf, g_pids, g_camids = [], [], [] end = time.time() for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix". format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, args.test_batch)) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(qf, gf.t(), beta=1, alpha=-2) distmat = distmat.numpy() # m, n = qf.size(0), gf.size(0) # distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() # distmat.addmm_(1, -2, qf, gf.t()) # ???!!! warning # # torch.addmm_(qf, gf.t(), distmat, 1, -2) module 'torch' has no attribute 'addmm_' # # # addmm_() takes 2 positional arguments but 4 were given # # addmm_(Tensor mat1, Tensor mat2, *, Number beta, Number alpha) # distmat = distmat.numpy() print("Computing CMC and mAP") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") return cmc[0]
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids, lqf = [], [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features, local_features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() qf.append(features) lqf.append(local_features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) lqf = torch.cat(lqf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix". format(qf.size(0), qf.size(1))) gf, g_pids, g_camids, lgf = [], [], [], [] end = time.time() for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features, local_features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() gf.append(features) lgf.append(local_features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) lgf = torch.cat(lgf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix". format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, args.test_batch)) # feature normlization qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) + 1e-12) gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) + 1e-12) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() if not args.test_distance == 'global': print("Only using global branch") from util.distance import low_memory_local_dist lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=not args.unaligned) if args.test_distance == 'local': print("Only using local branch") distmat = local_distmat if args.test_distance == 'global_local': print("Using global and local branches") distmat = local_distmat + distmat print("Computing CMC and mAP") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") if args.reranking: from util.re_ranking import re_ranking if args.test_distance == 'global': print("Only using global branch for reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3) else: local_qq_distmat = low_memory_local_dist( lqf.numpy(), lqf.numpy(), aligned=not args.unaligned) local_gg_distmat = low_memory_local_dist( lgf.numpy(), lgf.numpy(), aligned=not args.unaligned) local_dist = np.concatenate([ np.concatenate([local_qq_distmat, local_distmat], axis=1), np.concatenate([local_distmat.T, local_gg_distmat], axis=1) ], axis=0) if args.test_distance == 'local': print("Only using local branch for reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3, local_distmat=local_dist, only_local=True) elif args.test_distance == 'global_local': print("Using global and local branches for reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3, local_distmat=local_dist, only_local=False) print("Computing CMC and mAP for re_ranking") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP(RK): {:.1%}".format(mAP)) print("CMC curve(RK)") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") return cmc[0]
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids = [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() qf.append(features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix". format(qf.size(0), qf.size(1))) gf, g_pids, g_camids = [], [], [] end = time.time() for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix". format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, args.test_batch)) # feature normlization qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) + 1e-12) gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) + 1e-12) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.numpy() print("Computing CMC and mAP") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") if args.reranking: from util.re_ranking import re_ranking distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3) print("Computing CMC and mAP for re_ranking") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP(RK): {:.1%}".format(mAP)) print("CMC curve(RK)") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") return cmc[0]
def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 8]): print('------start testing------') cmc1 = [] cmc2 = [] batch_time = AverageMeter() model.eval() with torch.no_grad(): # 计算query集的features qf, q_pids, q_camids, lqf = [], [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features, local_features = model(imgs) embed() batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() qf.append(features) lqf.append(local_features) q_pids.extend(pids) q_camids.extend(camids) # embed() # 对tensor进行拼接,axis=0表示进行竖向拼接 qf = torch.cat(qf, 0) lqf = torch.cat(lqf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix". format(qf.size(0), qf.size(1))) # 计算gallery集的features gf, g_pids, g_camids, lgf = [], [], [], [] end = time.time() for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features, local_features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() gf.append(features) lgf.append(local_features) g_pids.extend(pids) g_camids.extend(camids) # 打个断点,看一下gf # embed() gf = torch.cat(gf, 0) lgf = torch.cat(lgf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix". format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format( batch_time.avg, args.test_batch)) # 下面这些是处理要点 # feature normlization 特征标准化 qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) + 1e-12) gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) + 1e-12) # 这是啥 m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) # 矩阵相乘 distmat = distmat.numpy() # 用于测试 mm, nn = distmat.shape[0], distmat.shape[1] min = [1, 1, 1, 1, 1, 1, 1, 1] # min数组的大小应该等于mm num = 0 for i in range(mm): for j in range(nn): if distmat[i][j] < min[i]: min[i] = distmat[i][j] if min[i] < 0.4: num += 1 print('经多视角识别后的person_num为:', num) if not args.test_distance == 'global': print("Only using global branch") from util.distance import low_memory_local_dist lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) # 计算local_distmat local_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=not args.unaligned) if args.test_distance == 'local': print("Only using local branch") distmat = local_distmat if args.test_distance == 'global_local': print("Using global and local branches") # total distmat = local_distmat + distmat(global) distmat = local_distmat + distmat print("Computing CMC and mAP") # 打一个断点,对distmat进行排序 # embed() cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("cms's shape: ", cmc.shape) print("cms's type: ", cmc.dtype) #cmc1 = [] print("------Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) cmc1.append(cmc[r - 1]) print("------------------") return cmc[0]