def evaluate(self): self.model.eval() num_query = self.num_query feats, pids, camids = [], [], [] with torch.no_grad(): for batch in tqdm(self.val_dl, total=len(self.val_dl), leave=False): data, pid, camid, _ = batch data = data.cuda() # ff = torch.FloatTensor(data.size(0), 2048).zero_() # for i in range(2): # if i == 1: # data = data.index_select(3, torch.arange(data.size(3) - 1, -1, -1).long().to('cuda')) # outputs = self.model(data) # f = outputs.data.cpu() # ff = ff + f ff = self.model(data).data.cpu() fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) ff = ff.div(fnorm.expand_as(ff)) feats.append(ff) pids.append(pid) camids.append(camid) feats = torch.cat(feats, dim=0) pids = torch.cat(pids, dim=0) camids = torch.cat(camids, dim=0) query_feat = feats[:num_query] query_pid = pids[:num_query] query_camid = camids[:num_query] gallery_feat = feats[num_query:] gallery_pid = pids[num_query:] gallery_camid = camids[num_query:] distmat = euclidean_dist(query_feat, gallery_feat) cmc, mAP, _ = eval_func( distmat.numpy(), query_pid.numpy(), gallery_pid.numpy(), query_camid.numpy(), gallery_camid.numpy(), ) self.logger.info('Validation Result:') self.logger.info('mAP: {:.2%}'.format(mAP)) for r in self.cfg.TEST.CMC: self.logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1])) self.logger.info('average of mAP and rank1: {:.2%}'.format( (mAP + cmc[0]) / 2.0)) self.logger.info('-' * 20)
def validation(self, valid_loader): """ 完成模型的验证过程 :param valid_loader: 验证集的Dataloader :return rank1: rank1得分;类型为float :return mAP: 平均检索精度;类型为float :return average_score: 平均得分;类型为float """ self.model.eval() tbar = tqdm.tqdm(valid_loader) features_all, labels_all = [], [] with torch.no_grad(): for i, (images, labels, paths) in enumerate(tbar): # 完成网络的前向传播 # features = self.solver.forward((images, labels))[-1] features = self.solver.tta((images, labels)) features_all.append(features.detach().cpu()) labels_all.append(labels) features_all = torch.cat(features_all, dim=0) labels_all = torch.cat(labels_all, dim=0) query_features = features_all[:self.num_query] query_labels = labels_all[:self.num_query] gallery_features = features_all[self.num_query:] gallery_labels = labels_all[self.num_query:] if self.dist == 're_rank': distmat = re_rank(query_features, gallery_features) elif self.dist == 'cos_dist': distmat = cos_dist(query_features, gallery_features) elif self.dist == 'euclidean_dist': distmat = euclidean_dist(query_features, gallery_features) else: assert "Not implemented :{}".format(self.dist) all_rank_precison, mAP, _ = eval_func(distmat, query_labels.numpy(), gallery_labels.numpy(), use_cython=self.cython) rank1 = all_rank_precison[0] average_score = 0.5 * rank1 + 0.5 * mAP print('Rank1: {:.2%}, mAP {:.2%}, average score {:.2%}'.format(rank1, mAP, average_score)) return rank1, mAP, average_score
def evaluate(self): self.model.eval() num_query = self.num_query feats, pids, camids = [], [], [] with torch.no_grad(): for batch in tqdm(self.val_dl, total=len(self.val_dl), leave=False): data, pid, camid, _ = batch data = data.cuda() local_feat_list = self.model(data) feat = torch.cat([lf.data.cpu() for lf in local_feat_list], dim=1) feats.append(feat) pids.append(pid) camids.append(camid) feats = torch.cat(feats, dim=0) pids = torch.cat(pids, dim=0) camids = torch.cat(camids, dim=0) query_feat = feats[:num_query] query_pid = pids[:num_query] query_camid = camids[:num_query] gallery_feat = feats[num_query:] gallery_pid = pids[num_query:] gallery_camid = camids[num_query:] distmat = euclidean_dist(query_feat, gallery_feat) cmc, mAP, _ = eval_func(distmat.numpy(), query_pid.numpy(), gallery_pid.numpy(), query_camid.numpy(), gallery_camid.numpy(), use_cython=self.cfg.SOLVER.CYTHON) self.logger.info('Validation Result:') for r in self.cfg.TEST.CMC: self.logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1])) self.logger.info('mAP: {:.2%}'.format(mAP)) self.logger.info('-' * 20)
def evaluate(self): self.model.eval() num_query = self.num_query feats, pids, camids = [], [], [] histlabels = [] histpreds = [] with torch.no_grad(): for batch in tqdm(self.val_dl, total=len(self.val_dl), leave=False): data, pid, camid, _, histlabel = batch data = data.cuda() # histlabel = histlabel.cuda() # ff = torch.FloatTensor(data.size(0), 2048).zero_() # for i in range(2): # if i == 1: # data = data.index_select(3, torch.arange(data.size(3) - 1, -1, -1).long().to('cuda')) # outputs = self.model(data) # f = outputs.data.cpu() # ff = ff + f ff, histpred = self.model(data, output_feature='with_histlabel') ff = ff.data.cpu() histpred = histpred.data.cpu() fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) ff = ff.div(fnorm.expand_as(ff)) feats.append(ff) pids.append(pid) camids.append(camid) histlabels.append(histlabel) histpreds.append(histpred) feats = torch.cat(feats, dim=0) pids = torch.cat(pids, dim=0) camids = torch.cat(camids, dim=0) histpreds = torch.cat(histpreds, dim=0) histlabels = torch.cat(histlabels, dim=0) hist_acc = (histpreds[:histlabels.size()[0]].max(1)[1] == histlabels ).float().mean().item() if self.cfg.TEST.RANDOMPERM <= 0: query_feat = feats[:num_query] query_pid = pids[:num_query] query_camid = camids[:num_query] gallery_feat = feats[num_query:] gallery_pid = pids[num_query:] gallery_camid = camids[num_query:] distmat = euclidean_dist(query_feat, gallery_feat) cmc, mAP, _ = eval_func( distmat.numpy(), query_pid.numpy(), gallery_pid.numpy(), query_camid.numpy(), gallery_camid.numpy(), ) else: cmc = 0 mAP = 0 seed = torch.random.get_rng_state() torch.manual_seed(0) for i in range(self.cfg.TEST.RANDOMPERM): index = torch.randperm(feats.size()[0]) # print(index[:10]) query_feat = feats[index][:num_query] query_pid = pids[index][:num_query] query_camid = camids[index][:num_query] gallery_feat = feats[index][num_query:] gallery_pid = pids[index][num_query:] gallery_camid = camids[index][num_query:] distmat = euclidean_dist(query_feat, gallery_feat) _cmc, _mAP, _ = eval_func( distmat.numpy(), query_pid.numpy(), gallery_pid.numpy(), query_camid.numpy(), gallery_camid.numpy(), ) cmc += _cmc / self.cfg.TEST.RANDOMPERM mAP += _mAP / self.cfg.TEST.RANDOMPERM torch.random.set_rng_state(seed) self.logger.info('Validation Result:') self.logger.info('hist acc:{:.2%}'.format(hist_acc)) self.logger.info('mAP: {:.2%}'.format(mAP)) for r in self.cfg.TEST.CMC: self.logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1])) self.logger.info('average of mAP and rank1: {:.2%}'.format( (mAP + cmc[0]) / 2.0)) self.logger.info('-' * 20) if self.summary_writer: self.summary_writer.add_scalar('Valid/hist_acc', hist_acc, self.train_epoch) self.summary_writer.add_scalar('Valid/rank1', cmc[0], self.train_epoch) self.summary_writer.add_scalar('Valid/mAP', mAP, self.train_epoch) self.summary_writer.add_scalar('Valid/rank1_mAP', (mAP + cmc[0]) / 2.0, self.train_epoch)
def test(args): if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() logger = setup_logger('reid_baseline.eval', cfg.OUTPUT_DIR, 0, train=False) logger.info('Running with config:\n{}'.format(cfg)) _, val_dl, num_query, num_classes = make_dataloader(cfg) model = build_model(cfg, num_classes) if cfg.TEST.MULTI_GPU: model = nn.DataParallel(model) model = convert_model(model) logger.info('Use multi gpu to inference') para_dict = torch.load(cfg.TEST.WEIGHT) model.load_state_dict(para_dict) model.cuda() model.eval() feats, pids, camids, paths = [], [], [], [] with torch.no_grad(): for batch in tqdm(val_dl, total=len(val_dl), leave=False): data, pid, camid, path = batch paths.extend(list(path)) data = data.cuda() feat = model(data).detach().cpu() feats.append(feat) pids.append(pid) camids.append(camid) feats = torch.cat(feats, dim=0) pids = torch.cat(pids, dim=0) camids = torch.cat(camids, dim=0) query_feat = feats[:num_query] query_pid = pids[:num_query] query_camid = camids[:num_query] query_path = np.array(paths[:num_query]) gallery_feat = feats[num_query:] gallery_pid = pids[num_query:] gallery_camid = camids[num_query:] gallery_path = np.array(paths[num_query:]) distmat = euclidean_dist(query_feat, gallery_feat) cmc, mAP, all_AP = eval_func(distmat.numpy(), query_pid.numpy(), gallery_pid.numpy(), query_camid.numpy(), gallery_camid.numpy(), use_cython=True) if cfg.TEST.VIS: worst_q = np.argsort(all_AP)[:cfg.TEST.VIS_Q_NUM] qid = query_pid[worst_q] q_im = query_path[worst_q] ind = np.argsort(distmat, axis=1) gid = gallery_pid[ind[worst_q]][..., :cfg.TEST.VIS_G_NUM] g_im = gallery_path[ind[worst_q]][..., :cfg.TEST.VIS_G_NUM] for idx in range(cfg.TEST.VIS_Q_NUM): sid = qid[idx] == gid[idx] im = rank_list_to_im(range(len(g_im[idx])), sid, q_im[idx], g_im[idx]) im.save( osp.join(cfg.OUTPUT_DIR, 'worst_query_{}.jpg'.format(str(idx).zfill(2)))) logger.info('Validation Result:') for r in cfg.TEST.CMC: logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1])) logger.info('mAP: {:.2%}'.format(mAP)) logger.info('-' * 20) if not cfg.TEST.RERANK: return distmat = re_rank(query_feat, gallery_feat) cmc, mAP, all_AP = eval_func(distmat, query_pid.numpy(), gallery_pid.numpy(), query_camid.numpy(), gallery_camid.numpy(), use_cython=True) logger.info('ReRanking Result:') for r in cfg.TEST.CMC: logger.info('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1])) logger.info('mAP: {:.2%}'.format(mAP)) logger.info('-' * 20)
def inference_val(model, transform, batch_size, feature_dim, k1=20, k2=6, p=0.3, use_rerank=False): q_img_list = os.listdir(r'E:\data\reid\dataset7\query') query_list = list() qid_list = list() qcid_list = list() for q_img in q_img_list: query_list.append(os.path.join(r'E:\data\reid\dataset7\query', q_img)) qid_list.append(int(q_img.strip(".png").split("_")[0])) qcid_list.append(int(q_img.strip(".png").split("_")[1].strip("c"))) g_img_list = os.listdir(r'E:\data\reid\dataset7\gallery') gallery_list = list() gid_list = list() gcid_list = list() for g_img in g_img_list: gallery_list.append(os.path.join(r'E:\data\reid\dataset7\gallery', g_img)) gid_list.append(int(g_img.strip(".png").split("_")[0])) gcid_list.append(int(g_img.strip(".png").split("_")[1].strip("c"))) img_list = list() for q_img in query_list: q_img = read_image(q_img) q_img = transform(q_img) img_list.append(q_img) for g_img in gallery_list: g_img = read_image(g_img) g_img = transform(g_img) img_list.append(g_img) query_num = len(query_list) img_data = torch.Tensor([t.numpy() for t in img_list]) model = model.to(device) model.eval() iter_n = len(img_list) // batch_size if len(img_list) % batch_size != 0: iter_n += 1 all_feature = list() for i in range(iter_n): # print("batch ----%d----" % (i)) batch_data = img_data[i * batch_size:(i + 1) * batch_size] with torch.no_grad(): # batch_feature = model(batch_data).detach().cpu() ff = torch.FloatTensor(batch_data.size(0), 2048).zero_() for i in range(2): if i == 1: batch_data = batch_data.index_select(3, torch.arange(batch_data.size(3) - 1, -1, -1).long()) outputs = model(batch_data) f = outputs.data.cpu() ff = ff + f fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) ff = ff.div(fnorm.expand_as(ff)) all_feature.append(ff) all_feature = torch.cat(all_feature) gallery_feat = all_feature[query_num:] query_feat = all_feature[:query_num] if use_rerank: distmat = re_rank(query_feat, gallery_feat, k1, k2, p) else: distmat = euclidean_dist(query_feat, gallery_feat) # distmat = euclidean_dist(query_feat, gallery_feat) cmc, mAP, _ = eval_func(distmat, np.array(qid_list), np.array(gid_list), np.array(qcid_list), np.array(gcid_list)) print('Validation Result:') print(str(k1) + " - " + str(k2) + " - " + str(p)) for r in [1, 5, 10]: print('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1])) print('mAP: {:.2%}'.format(mAP)) with open('re_rank.txt', 'a') as f: f.write(str(k1)+" - "+str(k2)+" - "+str(p) + "\n") for r in [1, 5, 10]: f.write('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1])+"\n") f.write('mAP: {:.2%}'.format(mAP) + "\n") f.write('------------------------------------------\n') f.write('------------------------------------------\n') f.write('\n\n')
def inference_val(args, model, dataloader, num_query, save_dir, k1=20, k2=6, p=0.3, use_rerank=False, use_flip=False, n_randperm=0, bn_keys=[]): model = model.to(device) if args.adabn and len(bn_keys) > 0: print("==> using adabn for specific bn layers") specific_bn_update(model, dataloader, cumulative=not args.adabn_emv, bn_keys=bn_keys) elif args.adabn: print("==> using adabn for all bn layers") bn_update(model, dataloader, cumulative=not args.adabn_emv) model.eval() feats, pids, camids = [], [], [] with torch.no_grad(): for batch in tqdm(dataloader, total=len(dataloader)): data, pid, camid, _ = batch data = data.cuda() if use_flip: ff = torch.FloatTensor(data.size(0), 2048 * 2).zero_() for i in range(2): # flip if i == 1: data = data.index_select( 3, torch.arange(data.size(3) - 1, -1, -1).long().to('cuda')) outputs = model(data) f = outputs.data.cpu() # cat if i == 0: ff[:, :2048] = f if i == 1: ff[:, 2048:] = f # ff = F.normalize(ff, p=2, dim=1) else: ff = model(data).data.cpu() # ff = F.normalize(ff, p=2, dim=1) feats.append(ff) pids.append(pid) camids.append(camid) all_feature = torch.cat(feats, dim=0) # all_feature = all_feature[:,:1024+512] pids = torch.cat(pids, dim=0) camids = torch.cat(camids, dim=0) # DBA if args.dba: k2 = args.dba_k2 alpha = args.dba_alpha assert alpha < 0 print("==>using DBA k2:{} alpha:{}".format(k2, alpha)) st = time.time() # [todo] heap sort distmat = euclidean_dist(all_feature, all_feature) # initial_rank = distmat.numpy().argsort(axis=1) initial_rank = np.argpartition(distmat.numpy(), range(1, k2 + 1)) all_feature = all_feature.numpy() V_qe = np.zeros_like(all_feature, dtype=np.float32) weights = np.logspace(0, alpha, k2).reshape((-1, 1)) with tqdm(total=len(all_feature)) as pbar: for i in range(len(all_feature)): V_qe[i, :] = np.mean(all_feature[initial_rank[i, :k2], :] * weights, axis=0) pbar.update(1) # import pdb;pdb.set_trace() all_feature = V_qe del V_qe all_feature = torch.from_numpy(all_feature) fnorm = torch.norm(all_feature, p=2, dim=1, keepdim=True) all_feature = all_feature.div(fnorm.expand_as(all_feature)) print("DBA cost:", time.time() - st) # aQE: weight query expansion if args.aqe: k2 = args.aqe_k2 alpha = args.aqe_alpha print("==>using weight query expansion k2: {} alpha: {}".format( k2, alpha)) st = time.time() all_feature = F.normalize(all_feature, p=2, dim=1) # all_feature = all_feature.numpy() # all_features = [] # with tqdm(total=len(all_feature)) as pbar: # for i in range(len(all_feature)): # all_features.append(aqe_func(all_feature[i],all_feature=all_feature,k2=k2,alpha=alpha)) # pbar.update(1) # all_feature = np.stack(all_features,axis=0) # all_feature = torch.from_numpy(all_feature) # all_feature = F.normalize(all_feature, p=2, dim=1) # norm_feature = F.normalize(all_feature, p=2, dim=1) # norm_feature = norm_feature.numpy() # all_feature = all_feature.numpy() # all_features = [] # with tqdm(total=len(all_feature)) as pbar: # for i in range(len(all_feature)): # all_features.append(aqe_nonorm_func(norm_feature[i],all_norm_feature_T=norm_feature.T ,all_feature=all_feature,k2=k2,alpha=alpha)) # pbar.update(1) # part 2hour for val # part_norm_feat = [] # for i in range(8): # norm_feature = F.normalize(all_feature[:,i*256:(i+1)*256], p=2, dim=1) # part_norm_feat.append(norm_feature) # norm_feature = torch.cat(part_norm_feat,dim=1) # norm_feature = norm_feature.numpy() # all_feature = all_feature.numpy() # all_features = [] # with tqdm(total=len(all_feature)) as pbar: # for i in range(len(all_feature)): # all_features.append(mgn_aqe_func(norm_feature[i],all_norm_feature_T=norm_feature.T ,all_feature=all_feature,k2=k2,alpha=alpha)) # pbar.update(1) # all_feature = np.stack(all_features,axis=0) # all_feature = torch.from_numpy(all_feature) # all_feature = F.normalize(all_feature, p=2, dim=1) all_feature = aqe_func_gpu(all_feature, k2, alpha, len_slice=2000) print("aQE cost:", time.time() - st) # import pdb;pdb.set_trace() print('feature shape:', all_feature.size()) # # for k1 in range(5,10,2): # for k2 in range(2,5,1): # for l in range(5,8): # p = l*0.1 if n_randperm <= 0: k2 = args.k2 gallery_feat = all_feature[num_query:] query_feat = all_feature[:num_query] query_pid = pids[:num_query] query_camid = camids[:num_query] gallery_pid = pids[num_query:] gallery_camid = camids[num_query:] distmat = None if use_rerank: print('==> using rerank') distmat = re_rank(query_feat, gallery_feat, args.k1, args.k2, p) else: # if args.aqe: # print('==> using euclidean_dist') # distmat = euclidean_dist(query_feat, gallery_feat) # else: weights = [1, 1, 1, 1 / 2, 1 / 2, 1 / 3, 1 / 3, 1 / 3] print('==> using mgn_euclidean_dist') print('==> using weights:', weights) if use_flip: for i in range(2): dist = mgn_euclidean_dist( query_feat[:, i * 2048:(i + 1) * 2048], gallery_feat[:, i * 2048:(i + 1) * 2048], norm=True, weights=weights) if distmat is None: distmat = dist / 2.0 else: distmat += dist / 2.0 else: distmat = mgn_euclidean_dist(query_feat, gallery_feat, norm=True, weights=weights) cmc, mAP, _ = eval_func(distmat, query_pid.numpy(), gallery_pid.numpy(), query_camid.numpy(), gallery_camid.numpy()) else: k2 = args.k2 torch.manual_seed(0) cmc = 0 mAP = 0 for i in range(n_randperm): index = torch.randperm(all_feature.size()[0]) query_feat = all_feature[index][:num_query] gallery_feat = all_feature[index][num_query:] query_pid = pids[index][:num_query] query_camid = camids[index][:num_query] gallery_pid = pids[index][num_query:] gallery_camid = camids[index][num_query:] if use_rerank: print('==> using rerank') st = time.time() distmat = re_rank(query_feat, gallery_feat, args.k1, args.k2, p) print("re_rank cost:", time.time() - st) else: print('==> using euclidean_dist') st = time.time() # distmat = euclidean_dist(query_feat, gallery_feat) # weights = [1,1,1,1,1,1,1,1] weights = [1, 1, 1, 1 / 2, 1 / 2, 1 / 3, 1 / 3, 1 / 3] # weights = [2,1,1,1/2,1/2,1/3,1/3,1/3] print('==> using mgn_euclidean_dist') print('==> using weights:', weights) # distmat = euclidean_dist(query_feat, gallery_feat) distmat = None if use_flip: for i in range(2): dist = mgn_euclidean_dist( query_feat[:, i * 2048:(i + 1) * 2048], gallery_feat[:, i * 2048:(i + 1) * 2048], norm=True, weights=weights) if distmat is None: distmat = dist / 2.0 else: distmat += dist / 2.0 else: distmat = mgn_euclidean_dist(query_feat, gallery_feat, norm=True, weights=weights) print("euclidean_dist cost:", time.time() - st) _cmc, _mAP, _ = eval_func(distmat, query_pid.numpy(), gallery_pid.numpy(), query_camid.numpy(), gallery_camid.numpy()) cmc += _cmc / n_randperm mAP += _mAP / n_randperm print('Validation Result:') if use_rerank: print(str(k1) + " - " + str(k2) + " - " + str(p)) print('mAP: {:.2%}'.format(mAP)) for r in [1, 5, 10]: print('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1])) print('average of mAP and rank1: {:.2%}'.format((mAP + cmc[0]) / 2.0)) with open(save_dir + 'eval.txt', 'a') as f: if use_rerank: f.write('==> using rerank\n') f.write(str(k1) + " - " + str(k2) + " - " + str(p) + "\n") else: f.write('==> using euclidean_dist\n') f.write('mAP: {:.2%}'.format(mAP) + "\n") for r in [1, 5, 10]: f.write('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1]) + "\n") f.write('average of mAP and rank1: {:.2%}\n'.format( (mAP + cmc[0]) / 2.0)) f.write('------------------------------------------\n') f.write('------------------------------------------\n') f.write('\n\n')
def inference_val(args, model, dataloader, num_query, save_dir, k1=20, k2=6, p=0.3, use_rerank=False, use_flip=False, n_randperm=0, bn_keys=[]): model = model.to(device) if args.adabn and len(bn_keys) > 0: print("==> using adabn for specific bn layers") specific_bn_update(model, dataloader, cumulative=not args.adabn_emv, bn_keys=bn_keys) elif args.adabn: print("==> using adabn for all bn layers") bn_update(model, dataloader, cumulative=not args.adabn_emv) model.eval() feats, pids, camids = [], [], [] with torch.no_grad(): for batch in tqdm(dataloader, total=len(dataloader)): data, pid, camid, _ = batch data = data.cuda() if use_flip: ff = torch.FloatTensor(data.size(0), 2048 * 2).zero_() for i in range(2): # flip if i == 1: data = data.index_select( 3, torch.arange(data.size(3) - 1, -1, -1).long().to('cuda')) outputs = model(data) f = outputs.data.cpu() # cat if i == 0: ff[:, :2048] = F.normalize(f, p=2, dim=1) if i == 1: ff[:, 2048:] = F.normalize(f, p=2, dim=1) ff = F.normalize(ff, p=2, dim=1) # ff = torch.FloatTensor(data.size(0), 2048).zero_() # for i in range(2): # if i == 1: # data = data.index_select(3, torch.arange(data.size(3) - 1, -1, -1).long().to('cuda')) # outputs = model(data) # f = outputs.data.cpu() # ff = ff + f # fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) # ff = ff.div(fnorm.expand_as(ff)) else: ff = model(data).data.cpu() ff = F.normalize(ff, p=2, dim=1) feats.append(ff) pids.append(pid) camids.append(camid) all_feature = torch.cat(feats, dim=0) # all_feature = all_feature[:,:1024+512] pids = torch.cat(pids, dim=0) camids = torch.cat(camids, dim=0) # DBA if args.dba: k2 = args.dba_k2 alpha = args.dba_alpha assert alpha < 0 print("==>using DBA k2:{} alpha:{}".format(k2, alpha)) st = time.time() # [todo] heap sort distmat = euclidean_dist(all_feature, all_feature) # initial_rank = distmat.numpy().argsort(axis=1) initial_rank = np.argpartition(distmat.numpy(), range(1, k2 + 1)) all_feature = all_feature.numpy() V_qe = np.zeros_like(all_feature, dtype=np.float32) weights = np.logspace(0, alpha, k2).reshape((-1, 1)) with tqdm(total=len(all_feature)) as pbar: for i in range(len(all_feature)): V_qe[i, :] = np.mean(all_feature[initial_rank[i, :k2], :] * weights, axis=0) pbar.update(1) # import pdb;pdb.set_trace() all_feature = V_qe del V_qe all_feature = torch.from_numpy(all_feature) fnorm = torch.norm(all_feature, p=2, dim=1, keepdim=True) all_feature = all_feature.div(fnorm.expand_as(all_feature)) print("DBA cost:", time.time() - st) # aQE: weight query expansion if args.aqe: k2 = args.aqe_k2 alpha = args.aqe_alpha print("==>using weight query expansion k2: {} alpha: {}".format( k2, alpha)) st = time.time() # # [todo] remove norma; normalize is used to to make sure the similiar one is itself # all_feature = F.normalize(all_feature, p=2, dim=1) # sims = torch.mm(all_feature, all_feature.t()).numpy() # # [todo] heap sort # # initial_rank = sims.argsort(axis=1)[:,::-1] # initial_rank = np.argpartition(-sims,range(1,k2+1)) # all_feature = all_feature.numpy() # V_qe = np.zeros_like(all_feature,dtype=np.float32) # # [todo] update query feature only? # with tqdm(total=len(all_feature)) as pbar: # for i in range(len(all_feature)): # # get weights from similarity # weights = sims[i,initial_rank[i,:k2]].reshape((-1,1)) # # weights = (weights-weights.min())/(weights.max()-weights.min()) # weights = np.power(weights,alpha) # # import pdb;pdb.set_trace() # V_qe[i,:] = np.mean(all_feature[initial_rank[i,:k2],:]*weights,axis=0) # pbar.update(1) # # import pdb;pdb.set_trace() # all_feature = V_qe # del V_qe # all_feature = torch.from_numpy(all_feature) # all_feature = F.normalize(all_feature, p=2, dim=1) # func = functools.partial(aqe_func,all_feature=all_feature,k2=k2,alpha=alpha) # all_features = mmcv.track_parallel_progress(func, all_feature, 6) # cpu # all_feature = F.normalize(all_feature, p=2, dim=1) # all_feature = all_feature.numpy() # all_features = [] # with tqdm(total=len(all_feature)) as pbar: # for i in range(len(all_feature)): # all_features.append(aqe_func(all_feature[i],all_feature=all_feature,k2=k2,alpha=alpha)) # pbar.update(1) # all_feature = np.stack(all_features,axis=0) # all_feature = torch.from_numpy(all_feature) # all_feature = F.normalize(all_feature, p=2, dim=1) all_feature = aqe_func_gpu(all_feature, k2, alpha, len_slice=2000) print("aQE cost:", time.time() - st) # import pdb;pdb.set_trace() if args.pseudo: print("==> using pseudo eps:{} minPoints:{} maxpoints:{}".format( args.pseudo_eps, args.pseudo_minpoints, args.pseudo_maxpoints)) st = time.time() # cal sparse distmat all_feature = F.normalize(all_feature, p=2, dim=1) # all_distmat = euclidean_dist(all_feature, all_feature).numpy() # print(all_distmat[0]) # pred1 = predict_pseudo_label(all_distmat,args.pseudo_eps,args.pseudo_minpoints,args.pseudo_maxpoints,args.pseudo_algorithm) # print(list(pred1.keys())[:10]) if args.pseudo_visual: all_distmat, kdist = get_sparse_distmat( all_feature, eps=args.pseudo_eps + 0.1, len_slice=2000, use_gpu=True, dist_k=args.pseudo_minpoints) plt.plot(list(range(len(kdist))), np.sort(kdist), linewidth=0.5) plt.savefig('eval_kdist.png') plt.savefig(save_dir + 'eval_kdist.png') else: all_distmat = get_sparse_distmat(all_feature, eps=args.pseudo_eps + 0.1, len_slice=2000, use_gpu=True) # print(all_distmat.todense()[0]) pseudolabels = predict_pseudo_label(all_distmat, args.pseudo_eps, args.pseudo_minpoints, args.pseudo_maxpoints, args.pseudo_algorithm) print("pseudo cost: {}s".format(time.time() - st)) print("pseudo id cnt:", len(pseudolabels)) print("pseudo img cnt:", len([x for k, v in pseudolabels.items() for x in v])) print("pseudo cost: {}s".format(time.time() - st)) # print(list(pred.keys())[:10]) print('feature shape:', all_feature.size()) # # for k1 in range(5,10,2): # for k2 in range(2,5,1): # for l in range(5,8): # p = l*0.1 if n_randperm <= 0: k2 = args.k2 gallery_feat = all_feature[num_query:] query_feat = all_feature[:num_query] query_pid = pids[:num_query] query_camid = camids[:num_query] gallery_pid = pids[num_query:] gallery_camid = camids[num_query:] if use_rerank: print('==> using rerank') # distmat = re_rank(query_feat, gallery_feat, k1, k2, p) distmat = re_ranking_batch_gpu( torch.cat([query_feat, gallery_feat], dim=0), num_query, args.k1, args.k2, p) else: print('==> using euclidean_dist') distmat = euclidean_dist(query_feat, gallery_feat) cmc, mAP, _ = eval_func(distmat, query_pid.numpy(), gallery_pid.numpy(), query_camid.numpy(), gallery_camid.numpy()) else: k2 = args.k2 torch.manual_seed(0) cmc = 0 mAP = 0 for i in range(n_randperm): index = torch.randperm(all_feature.size()[0]) query_feat = all_feature[index][:num_query] gallery_feat = all_feature[index][num_query:] query_pid = pids[index][:num_query] query_camid = camids[index][:num_query] gallery_pid = pids[index][num_query:] gallery_camid = camids[index][num_query:] if use_rerank: print('==> using rerank') st = time.time() # distmat = re_rank(query_feat, gallery_feat, k1, k2, p) distmat = re_ranking_batch_gpu( torch.cat([query_feat, gallery_feat], dim=0), num_query, args.k1, args.k2, p) print("re_rank cost:", time.time() - st) else: print('==> using euclidean_dist') st = time.time() distmat = euclidean_dist(query_feat, gallery_feat) print("euclidean_dist cost:", time.time() - st) _cmc, _mAP, _ = eval_func(distmat, query_pid.numpy(), gallery_pid.numpy(), query_camid.numpy(), gallery_camid.numpy()) cmc += _cmc / n_randperm mAP += _mAP / n_randperm print('Validation Result:') if use_rerank: print(str(k1) + " - " + str(k2) + " - " + str(p)) print('mAP: {:.2%}'.format(mAP)) for r in [1, 5, 10]: print('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1])) print('average of mAP and rank1: {:.2%}'.format((mAP + cmc[0]) / 2.0)) with open(save_dir + 'eval.txt', 'a') as f: if use_rerank: f.write('==> using rerank\n') f.write(str(k1) + " - " + str(k2) + " - " + str(p) + "\n") else: f.write('==> using euclidean_dist\n') f.write('mAP: {:.2%}'.format(mAP) + "\n") for r in [1, 5, 10]: f.write('CMC Rank-{}: {:.2%}'.format(r, cmc[r - 1]) + "\n") f.write('average of mAP and rank1: {:.2%}\n'.format( (mAP + cmc[0]) / 2.0)) f.write('------------------------------------------\n') f.write('------------------------------------------\n') f.write('\n\n')