def compute(self): feats = torch.cat(self.feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) try: q_camids = np.asarray(self.camids[:self.num_query]) except: pass # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) try: g_camids = np.asarray(self.camids[self.num_query:]) except: pass m, n = qf.shape[0], gf.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.cpu().numpy() try: cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) except: cmc, mAP = eval_func(distmat, q_pids, g_pids) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] if self.type == 'euclidean': print('Compute euclidean distance...') distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) else: print('Compute cosine distance...') distmat = torch.mm(qf, gf.t()) print(distmat.max()) distmat = distmat * (-1) + 5 distmat = distmat.cpu().numpy() print('distmat.shape:', distmat.shape) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.cpu().numpy() cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids, remove_camera=self.remove_camera, isviaual=self.isvisual) if self.extract_feat: return distmat, cmc, mAP else: return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) # m, n = qf.shape[0], gf.shape[0] # distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() # distmat.addmm_(1, -2, qf, gf.t()) # distmat = distmat.cpu().numpy() print("Enter reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3) cmc, mAP, max_200_indices, num_q, num_g = eval_func( distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP, max_200_indices, num_q, num_g
def compute(self): feats = torch.cat(self.feats, dim=0) if self.if_feat_norm: feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) logger.info("Enter reranking") distmat = re_ranking(qf, gf, k1=24, k2=6, lambda_value=0.3) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) # for k1 in range(24, 30, 2): # for k2 in range(6, 10): # self.logger.info(f"Enter reranking k1 = {k1} k2 = {k2}") # distmat = re_ranking(qf, gf, k1=k1, k2=k2, lambda_value=0.3) # cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) # self.logger.info('-' * 60) # self.logger.info('Validation Results') # self.logger.info("mAP: {:.1%}".format(mAP)) # for r in [1, 5, 10]: # self.logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) # self.logger.info('-' * 60) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) gallery_tids = np.asarray(self.tids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] qf = qf.cpu().numpy() gf = gf.cpu().numpy() distmat = self.track_ranking(qf, gf, gallery_tids, self.unique_tids) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids, max_rank=self.max_rank) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) if self.feat_norm == 'yes': #print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) distmat = euclidean_dist(qf, gf).cpu().numpy() if self.validation_flag: cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) else: cmc, mAP = np.zeros((self.max_rank,)), 0.0 if self.output_flag: indices = np.argsort(distmat, axis=1) np.savetxt("result.txt", indices[:, :100], fmt="%05d") return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) feats_flip = torch.cat(self.feats_flip, dim=0) feats = torch.cat((feats, feats_flip), 1) local_feats = torch.cat(self.local_feats, dim=0) local_feats_flip = torch.cat(self.local_feats_flip, dim=0) local_feats = torch.cat((local_feats, local_feats_flip), 1) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) local_feats = torch.nn.functional.normalize(local_feats, dim=1, p=2) # query qf = feats[:self.num_query] local_qf = local_feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] local_gf = local_feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) print("Enter reranking") distmat_global = re_ranking(qf, gf, k1=6, k2=3, lambda_value=0.8) distmat_local = re_ranking(local_qf, local_gf, k1=6, k2=3, lambda_value=0.8) distmat = 0.96 * distmat_global + (1 - 0.96) * distmat_local cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP
def adjust_rerank_function(dist_list, query_pid_list, gallery_pid_list, query_camid_list, gallery_camid_list): q_pids = np.asarray(query_pid_list) g_pids = np.asarray(gallery_pid_list) q_camids = np.asarray(query_camid_list) g_camids = np.asarray(gallery_camid_list) max = 0 plist = [] global_q_g_dist, global_q_q_dist, global_g_g_dist = dist_list[0:3] local_q_g_dist, local_q_q_dist, local_g_g_dist = dist_list[3:6] flip_global_q_g_dist, flip_global_q_q_dist, flip_global_g_g_dist = dist_list[6:9] flip_local_q_g_dist, flip_local_q_q_dist, flip_local_g_g_dist = dist_list[9:12] for k1 in range(6, 8, 1): for k2 in range(3, 5, 1): for l in [0.77, 0.78, 0.79, 0.80, 0.81, 0.82, 0.83, 0.84, 0.85, 0.86, 0.87, 0.88]: # for l_w in [0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97]: distmat_global = aligned_re_ranking( global_q_g_dist, global_q_q_dist, global_g_g_dist, k1=k1, k2=k2, lambda_value=l) print("Global dismat is computed done") distmat_local = aligned_re_ranking( local_q_g_dist, local_q_q_dist, local_g_g_dist, k1=k1, k2=k2, lambda_value=l) print("Local dismat is computed done") flip_distmat_global = aligned_re_ranking( flip_global_q_g_dist, flip_global_q_q_dist, flip_global_g_g_dist, k1=k1, k2=k2, lambda_value=l) print("Global_flip dismat is computed done") flip_distmat_local = aligned_re_ranking( flip_local_q_g_dist, flip_local_q_q_dist, flip_local_g_g_dist, k1=k1, k2=k2, lambda_value=l) print("Local_flip dismat is computed done") distmat = l_w * distmat_global + (1 - l_w) * distmat_local flip_distmat = l_w * flip_distmat_global + (1 - l_w) * flip_distmat_local distmat = (flip_distmat + distmat) / 2 cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) for r in [1]: if max < (mAP + cmc[r - 1]) / 2: max = (mAP + cmc[r - 1]) / 2 plist = [k1, k2, l, mAP, cmc[r - 1]] print("====k1=%d=====k2=%d=====l=%f=====l_w=%f" % (k1, k2, l, l_w)) print("CMC curve, Rank-%d:%.4f, map:%.4f, final: %.4f" % ( r, cmc[r - 1], mAP, (mAP + cmc[r - 1]) / 2)) print(plist)
def compute(self): feats = torch.cat(self.feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) # m, n = qf.shape[0], gf.shape[0] # distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() # distmat.addmm_(1, -2, qf, gf.t()) # distmat = distmat.cpu().numpy() # NAMGYU print("Saving output features and dataset metadata") os.makedirs("outputs", exist_ok=True) np.save("outputs/qf.npy", qf.cpu().numpy()) np.save("outputs/gf.npy", gf.cpu().numpy()) with open("outputs/other.json", "w") as f: other = { "q_pids": q_pids.tolist(), "g_pids": g_pids.tolist(), "q_camids": q_camids.tolist(), "g_camids": g_camids.tolist(), } json.dump(other, f) gf = gf.cpu() qf = qf.cpu() print("Enter reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3) # NAMGYU print("Saving distmat") np.save("outputs/distmat.npy", distmat) # NAMGYU print("Evaluating...") cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) print("Evaluation complete") return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) feats1 = torch.cat(self.feats1, dim=0) feats2 = torch.cat(self.feats2, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) feats1 = torch.nn.functional.normalize(feats1, dim=1, p=2) feats2 = torch.nn.functional.normalize(feats2, dim=1, p=2) # query # query qf = feats[:self.num_query] qf1 = feats1[:self.num_query] qf2 = feats2[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] gf1 = feats1[self.num_query:] gf2 = feats2[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf1.shape[0], gf1.shape[0] edu_distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat1 = torch.pow(qf1, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf1, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat2 = torch.pow(qf2, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf2, 2).sum(dim=1, keepdim=True).expand(n, m).t() edu_distmat.addmm_(1, -2, qf, gf.t()) distmat1.addmm_(1, -2, qf1, gf1.t()) distmat2.addmm_(1, -2, qf2, gf2.t()) edu_distmat = edu_distmat.cpu().numpy() distmat1 = distmat1.cpu().numpy() distmat2 = distmat2.cpu().numpy() ranks = [1, 3, 5, 10] print("Enter reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) print("Results ----------") print("mAP: {:.2%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1])) print("------------------") return cmc, mAP
def compute(self): scores = torch.cat(self.scores, dim=0).view(1, -1) distmat = scores.cpu().numpy() # print(distmat.shape) if distmat.shape[1] == 101: distmat = distmat[:, 1:] # query q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) if self.if_feat_norm: feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) distmat = euclidean_dist(qf, gf).cpu().numpy() cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.cpu().numpy() cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) local_feats = torch.cat(self.local_feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] qlf = local_feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) q_img_paths = np.asarray(self.img_paths[:self.num_query]) # gallery gf = feats[self.num_query:] glf = local_feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) g_img_paths = np.asarray(self.img_paths[self.num_query:]) m, n = qf.shape[0], gf.shape[0] ### global distmat global_distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() global_distmat.addmm_(1, -2, qf, gf.t()) global_distmat = global_distmat.cpu().numpy() ### local distmat qlf = qlf.permute(0, 2, 1) glf = glf.permute(0, 2, 1) local_distmat = low_memory_local_dist(qlf.cpu().numpy(), glf.cpu().numpy(), aligned=True) dist_mat = global_distmat + 0.4 * local_distmat cmc, mAP = eval_func(dist_mat, q_pids, g_pids, q_camids, g_camids, q_img_paths, g_img_paths) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.cpu().numpy() #dis_save = {'dis': distmat} #scipy.io.savemat('distmat_ori.mat', dis_save) cmc1, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) #cmc0 = cmc(distmat, q_pids, g_pids, q_camids, g_camids) #mAP = mean_ap(distmat, q_pids, g_pids, q_camids, g_camids) return cmc1, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) print("Enter reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids, None, None) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) if self.new_eval: q_ambis = np.asarray(self.ambis[:self.num_query]) else: q_ambis = None # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) if self.new_eval: g_ambis = np.asarray(self.ambis[self.num_query:]) else: g_ambis = None m, n = qf.shape[0], gf.shape[0] # distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() # distmat.addmm_(qf, gf.t(),beta=1,alpha=-2) distmat = -1 * torch.mm(qf, gf.t()) distmat = distmat.cpu().numpy() cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids, q_ambis=q_ambis, g_ambis=g_ambis) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) local_feats = torch.cat(self.local_feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) local_feats = torch.nn.functional.normalize(local_feats, dim=1, p=2) # query qf = feats[:self.num_query] local_qf = local_feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] local_gf = local_feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] distmat_global = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat_global.addmm_(1, -2, qf, gf.t()) distmat_global = distmat_global.cpu().numpy() distmat_local = torch.pow(local_qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(local_gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat_local.addmm_(1, -2, local_qf, local_gf.t()) distmat_local = distmat_local.cpu().numpy() distmat = 0.96 * distmat_global + (1 - 0.96) * distmat_local # self.write_json_results( # distmat, # self.datasets, # save_dir=osp.join('/home/zxh/ReID/reid-strong-baseline/new_experiment/json_output', 'writerank_nrtireid'), # topk=200 # ) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) img_path = self.img_path if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) query_img_paths = np.asarray(img_path[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) gallery_img_paths = np.asarray(img_path[self.num_query:]) m, n = qf.shape[0], gf.shape[0] # globaldist = torch.pow(qf1[:, 0:2047], 2).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf1[:, 0:2047], 2).sum(dim=1, keepdim=True).expand(n, m).t() # globaldist.addmm_(1, -2, qf1[:, 0:2047], gf1[:, 0:2047].t()) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.cpu().numpy() cmc, mAP = eval_func( distmat, q_pids, g_pids, q_camids, g_camids, query_img_paths, gallery_img_paths, max_rank=50, save_dir="/home/rxn/myproject/MGAN/pth/fig/OCCLUDED/P_DUKE", epoch=-1, save_rank=False) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.cpu().numpy() # 保存结果 query_paths = self.paths[:self.num_query] gallery_paths = self.paths[self.num_query:] with open('output.pkl', 'wb') as f: pickle.dump( { 'gallery_paths': gallery_paths, 'query_paths': query_paths, 'gallery_ids': g_pids, 'query_ids': q_pids, 'query_cams': q_camids, 'gallery_cams': g_camids, 'distmat': distmat }, f) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) feats1 = torch.cat(self.feats1, dim=0) feats2 = torch.cat(self.feats2, dim=0) label = self.labels img_path = self.img_path # label=torch.cat(self.labels,dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) feats1 = torch.nn.functional.normalize(feats1, dim=1, p=2) feats2 = torch.nn.functional.normalize(feats2, dim=1, p=2) # query qf = feats[:self.num_query] qf1 = feats1[:self.num_query] qf2 = feats2[:self.num_query] label = label[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) query_img_paths = np.asarray(img_path[:self.num_query]) # gallery gf = feats[self.num_query:] gf1 = feats1[self.num_query:] gf2 = feats2[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) gallery_img_paths = np.asarray(img_path[self.num_query:]) # import pdb # pdb.set_trace() m, n = qf1.shape[0], gf1.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat1 = torch.pow(qf1, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf1, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat2 = torch.pow(qf2, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf2, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat1.addmm_(1, -2, qf1, gf1.t()) distmat2.addmm_(1, -2, qf2, gf2.t()) distmat = distmat.cpu().numpy() distmat1 = distmat1.cpu().numpy() distmat2 = distmat2.cpu().numpy() distmat = (1 - 0.4) * distmat1 + 0.4 * distmat2 ranks = [1, 3, 5, 10] cmc, mAP = eval_func( distmat, q_pids, g_pids, q_camids, g_camids, query_img_paths, gallery_img_paths, max_rank=50, save_dir= '/home/rxn/myproject/MGAN/pth/fig/OCCLUDED/occduke_nocolor', epoch=-1, save_rank=True) # for lam in range(0, 11): # print(lam) # weight = lam * 0.1 # # # print(type(weight)) # # # print(type(distmat)) # # # print(type(parse_distmat)) # # # print(type(globaldist)) # distmat = (1 - weight) * distmat1 + weight * distmat2 # # # distmat = (1 - weight) * torch.from_numpy(parse_distmat)+ weight*torch.from_numpy(distmat2) \ # # # + weight * globaldist.cpu() # # # # +globaldist.cpu() # # # + weight * globaldist.cpu()+\ # # # weight*torch.from_numpy(distmat2) # # # cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids, query_img_paths, gallery_img_paths, # max_rank=50, save_dir='/home/rxn/myproject/MGAN/pth/fig/duke', epoch=-1, # save_rank=False) # # # cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) # # # # wcmc, wmAP = eval_func(globaldist.cpu(), q_pids, g_pids, q_camids, g_camids) # # # # hcmc, hmAP = eval_func(headdist.cpu(), q_pids, g_pids, q_camids, g_camids) # # # # ucmc, umAP = eval_func(upperdist.cpu(), q_pids, g_pids, q_camids, g_camids) # # # # lcmc, lmAP = eval_func(lowerdist.cpu(), q_pids, g_pids, q_camids, g_camids) # # # scmc, smAP = eval_func(shoesdist.cpu(), q_pids, g_pids, q_camids, g_camids) # print("Results ----------") # print("mAP: {:.2%}".format(mAP)) # # # print("hmAP: {:.2%}".format(hmAP)) # # # print("umAP: {:.2%}".format(umAP)) # # # print("lmAP: {:.2%}".format(lmAP)) # # # print("smAP: {:.2%}".format(smAP)) # # # # print("CMC curve") # for r in ranks: # print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1])) # # print("hRank-{:<3}: {:.2%}".format(r, hcmc[r - 1])) # # print("uRank-{:<3}: {:.2%}".format(r, ucmc[r - 1])) # # print("lRank-{:<3}: {:.2%}".format(r, lcmc[r - 1])) # # print("sRank-{:<3}: {:.2%}".format(r, scmc[r - 1])) # # print("------------------") return cmc, mAP
def compute(self): q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) if not self.merge: feats = torch.cat(self.feats, dim=0) flip_feats = torch.cat(self.flip_feat, dim=0) feats = torch.cat((feats, flip_feats), 1) if self.aligned_test or self.pcb_test: local_feats = torch.cat(self.local_feats, dim=0) flip_local_feat = torch.cat(self.flip_local_feat, dim=0) local_feats = torch.cat((local_feats, flip_local_feat), 1) elif self.new_pcb_test: local_feats = torch.cat(self.local_feats, dim=0) local_feats_2 = torch.cat(self.local_feats_2, dim=0) flip_local_feat = torch.cat(self.flip_local_feat, dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) # flip_feats = torch.nn.functional.normalize(flip_feat, dim=1, p=2) if self.aligned_test or self.pcb_test: local_feats = torch.nn.functional.normalize(local_feats, dim=1, p=2) # flip_local_feats = torch.nn.functional.normalize(flip_local_feat, dim=1, p=2) elif self.new_pcb_test: local_feats = torch.nn.functional.normalize(local_feats, dim=1, p=2) local_feats_2 = torch.nn.functional.normalize(local_feats_2, dim=1, p=2) flip_local_feats = torch.nn.functional.normalize(flip_local_feat, dim=1, p=2) if self.aligned_test or self.pcb_test: qf = feats[:self.num_query] gf = feats[self.num_query:] local_qf = local_feats[:self.num_query] local_gf = local_feats[self.num_query:] global_q_g_dist = self.compute_dist( qf.cpu().detach().numpy(), gf.cpu().detach().numpy(), type='euclidean') global_g_g_dist = self.compute_dist( gf.cpu().detach().numpy(), gf.cpu().detach().numpy(), type='euclidean') global_q_q_dist = self.compute_dist( qf.cpu().detach().numpy(), qf.cpu().detach().numpy(), type='euclidean') local_q_g_dist = self.compute_dist( local_qf.cpu().detach().numpy(), local_gf.cpu().detach().numpy(), type='euclidean') # 1061,2233 local_q_q_dist = self.compute_dist( local_qf.cpu().detach().numpy(), local_qf.cpu().detach().numpy(), type='euclidean') local_g_g_dist = self.compute_dist( local_gf.cpu().detach().numpy(), local_gf.cpu().detach().numpy(), type='euclidean') # l_w = 0.85 # global_local_g_g_dist = global_g_g_dist * l_w # global_local_q_g_dist = global_q_g_dist * l_w # global_local_q_q_dist = global_q_q_dist * l_w # # global_local_g_g_dist += local_g_g_dist * (1 - l_w) # global_local_q_g_dist += local_q_g_dist * (1 - l_w) # global_local_q_q_dist += local_q_q_dist * (1 - l_w) elif self.new_pcb_test: qf = feats[:self.num_query] gf = feats[self.num_query:] local_qf = local_feats[:self.num_query] local_gf = local_feats[self.num_query:] global_q_g_dist = self.compute_dist( qf.cpu().detach().numpy(), gf.cpu().detach().numpy(), type='euclidean') global_g_g_dist = self.compute_dist( gf.cpu().detach().numpy(), gf.cpu().detach().numpy(), type='euclidean') global_q_q_dist = self.compute_dist( qf.cpu().detach().numpy(), qf.cpu().detach().numpy(), type='euclidean') local_q_g_dist = self.compute_dist( local_qf.cpu().detach().numpy(), local_gf.cpu().detach().numpy(), type='euclidean') # 1061,2233 local_q_q_dist = self.compute_dist( local_qf.cpu().detach().numpy(), local_qf.cpu().detach().numpy(), type='euclidean') local_g_g_dist = self.compute_dist( local_gf.cpu().detach().numpy(), local_gf.cpu().detach().numpy(), type='euclidean') l_w = 0.85 global_local_g_g_dist = global_g_g_dist * l_w global_local_q_g_dist = global_q_g_dist * l_w global_local_q_q_dist = global_q_q_dist * l_w global_local_g_g_dist += local_g_g_dist * (1 - l_w) global_local_q_g_dist += local_q_g_dist * (1 - l_w) global_local_q_q_dist += local_q_q_dist * (1 - l_w) else: qf = feats[:self.num_query] gf = feats[self.num_query:] global_q_g_dist = self.compute_dist( qf.cpu().detach().numpy(), gf.cpu().detach().numpy(), type='euclidean') global_g_g_dist = self.compute_dist( gf.cpu().detach().numpy(), gf.cpu().detach().numpy(), type='euclidean') global_q_q_dist = self.compute_dist( qf.cpu().detach().numpy(), qf.cpu().detach().numpy(), type='euclidean') global_local_g_g_dist = global_g_g_dist global_local_q_g_dist = global_q_g_dist global_local_q_q_dist = global_q_q_dist print("Enter reranking") if self.adjust_rerank: max = 0 plist = [] for k1 in range(6, 8, 1): for k2 in range(3, 5, 1): for l in [0.77, 0.78, 0.79, 0.80, 0.81, 0.82, 0.83, 0.84, 0.85, 0.86, 0.87, 0.88]: # for l_w in [0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97]: if self.aligned_test or self.pcb_test or self.new_pcb_test: # distmat = aligned_re_ranking( # global_local_q_g_dist, global_local_q_q_dist, global_local_g_g_dist, k1=k1, k2=k2, # lambda_value=l) # distmat_global = re_ranking(qf, gf, k1=6, k2=3, lambda_value=0.80) # distmat_local = re_ranking(local_qf, local_gf, k1=k1, k2=k2, lambda_value=l) distmat_global = aligned_re_ranking( global_q_g_dist, global_q_q_dist, global_g_g_dist, k1=k1, k2=k2, lambda_value=l) del global_q_g_dist, global_q_q_dist, global_g_g_dist gc.collect() distmat_local = aligned_re_ranking( local_q_g_dist, local_q_q_dist, local_g_g_dist, k1=k1, k2=k2, lambda_value=l) del local_q_g_dist, local_q_q_dist, local_g_g_dist gc.collect() distmat = l_w * distmat_global + (1 - l_w) * distmat_local cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) for r in [1]: if max < (mAP + cmc[r - 1]) / 2: max = (mAP + cmc[r - 1]) / 2 plist = [k1, k2, l, mAP, cmc[r - 1]] print("====k1=%d=====k2=%d=====l=%f=====l_w=%f" % (k1, k2, l, l_w)) print("CMC curve, Rank-%d:%.4f, map:%.4f, final: %.4f" % ( r, cmc[r - 1], mAP, (mAP + cmc[r - 1]) / 2)) else: # distmat = re_ranking(qf, gf, k1=k1, k2=k2, lambda_value=l) distmat = aligned_re_ranking( global_local_q_g_dist, global_local_q_q_dist, global_local_g_g_dist, k1=k1, k2=k2, lambda_value=l) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) for r in [1]: if max < (mAP + cmc[r - 1]) / 2: max = (mAP + cmc[r - 1]) / 2 plist = [k1, k2, l, l_w] print("====k1=%d=====k2=%d=====l=%f" % (k1, k2, l)) print("CMC curve, Rank-%d:%.4f, map:%.4f, final: %.4f" % ( r, cmc[r - 1], mAP, (mAP + cmc[r - 1]) / 2)) print(max, plist) else: if self.aligned_test or self.pcb_test or self.new_pcb_test: distmat_global = aligned_re_ranking( global_q_g_dist, global_q_q_dist, global_g_g_dist, k1=6, k2=3, lambda_value=0.80) del global_q_g_dist, global_q_q_dist, global_g_g_dist gc.collect() distmat_local = aligned_re_ranking( local_q_g_dist, local_q_q_dist, local_g_g_dist, k1=6, k2=3, lambda_value=0.80) del local_q_g_dist, local_q_q_dist, local_g_g_dist gc.collect() distmat = 0.96 * distmat_global + (1 - 0.96) * distmat_local # distmat = aligned_re_ranking( # global_q_g_dist, global_q_q_dist, global_g_g_dist, k1=6, k2=3, lambda_value=0.80) # distmat_global = re_ranking(qf, gf, k1=6, k2=3, lambda_value=0.80) # distmat_local = re_ranking(local_qf, local_gf, k1=6, k2=3, lambda_value=0.80) # distmat = 0.96 * distmat_global + (1 - 0.96) * distmat_local else: distmat = re_ranking(qf, gf, k1=7, k2=3, lambda_value=0.85) # path_dist = os.path.join('./model_dist/global_local', 'data') # if not os.path.exists(path_dist): # os.makedirs(path_dist) # print('Distmat_Shape', distmat.shape) # np.save(os.path.join(path_dist, 'dist.npy'), distmat) # print("Save Npy Done") self.write_json_results_2( distmat, self.datasets, save_dir=osp.join('./new_experiment/json_output', 'writerank_nrtireid'), topk=200, cat_num=0 ) else: print('Entering Concated') # distmat_1 = np.load('./model_dist/global_local/diedai/dist.npy') # distmat_2 = np.load('./model_dist/global_local/diedai_2/dist.npy') distmat = np.load('./model_dist/global_local/data/dist.npy') # distmat = np.hstack((distmat_1, distmat_2)) print("Dismat Concated Done") print("Entering Write Json File") self.write_json_results_2( distmat, self.datasets, save_dir=osp.join('./new_experiment/json_output', 'writerank_nrtireid'), topk=200, cat_num=0 )
def compute(self): feats = torch.cat(self.feats, dim=0) feats1 = torch.cat(self.feats1, dim=0) feats2 = torch.cat(self.feats2, dim=0) label = self.labels # label=torch.cat(self.labels,dim=0) if self.feat_norm == 'yes': print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) feats1 = torch.nn.functional.normalize(feats1, dim=1, p=2) feats2 = torch.nn.functional.normalize(feats2, dim=1, p=2) # query qf = feats[:self.num_query] qf1 = feats1[:self.num_query] qf2 = feats2[:self.num_query] label = label[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] gf1 = feats1[self.num_query:] gf2 = feats2[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) # import pdb # pdb.set_trace() m, n = qf1.shape[0], gf1.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() # globaldist = torch.pow(qf1[:,0:2047], 2).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf1[:,0:2047], 2).sum(dim=1, keepdim=True).expand(n, m).t() # globaldist.addmm_(1, -2, qf1[:,0:2047], gf1[:,0:2047].t()) # # headdist = torch.pow(qf1[:,2048:4095], 0).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf1[:, 2048:4095], 2).sum(dim=1, keepdim=True).expand(n, m).t() # headdist.addmm_(1, -2, qf1[:,2048:4095], gf1[:, 2048:4095].t()) # # upperdist = torch.pow(qf1[:,4096:6143], 0).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf1[::, 4096:6143], 2).sum(dim=1, keepdim=True).expand(n, m).t() # upperdist.addmm_(1, -2, qf1[:,4096:6143], gf1[:, 4096:6143].t()) # # lowerdist = torch.pow(qf1[:,6144:8191], 0).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf1[:, 6144:8191], 2).sum(dim=1, keepdim=True).expand(n, m).t() # lowerdist.addmm_(1, -2, qf1[:,6144:8191], gf1[:, 6144:8191].t()) # shoesdist = torch.pow(qf1[:,8192:10239], 0).sum(dim=1, keepdim=True).expand(m, n) + \ # torch.pow(gf1[:, 8192:10239], 2).sum(dim=1, keepdim=True).expand(n, m).t() # shoesdist.addmm_(1, -2, qf1[:,8192:10239], gf1[:, 8192:10239].t()) # parse_distmat=get_parsedistance_matrix(qf1,gf1,label) # import pdb # pdb.set_trace() distmat1 = torch.pow(qf1, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf1, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat2 = torch.pow(qf2, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf2, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat1.addmm_(1, -2, qf1, gf1.t()) distmat2.addmm_(1, -2, qf2, gf2.t()) distmat = distmat.cpu().numpy() distmat1 = distmat1.cpu().numpy() distmat2 = distmat2.cpu().numpy() ranks = [1, 3, 5, 10] cmc, mAP = eval_func(distmat1, q_pids, g_pids, q_camids, g_camids) for lam in range(0, 11): print(lam) weight = lam * 0.1 # print(type(weight)) # print(type(distmat)) # print(type(parse_distmat)) # print(type(globaldist)) distmat = (1 - weight) * distmat1 + weight * distmat2 # distmat = (1 - weight) * torch.from_numpy(parse_distmat)+ weight*torch.from_numpy(distmat2) \ # + weight * globaldist.cpu() # # +globaldist.cpu() # + weight * globaldist.cpu()+\ # weight*torch.from_numpy(distmat2) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) # wcmc, wmAP = eval_func(globaldist.cpu(), q_pids, g_pids, q_camids, g_camids) # hcmc, hmAP = eval_func(headdist.cpu(), q_pids, g_pids, q_camids, g_camids) # ucmc, umAP = eval_func(upperdist.cpu(), q_pids, g_pids, q_camids, g_camids) # lcmc, lmAP = eval_func(lowerdist.cpu(), q_pids, g_pids, q_camids, g_camids) # scmc, smAP = eval_func(shoesdist.cpu(), q_pids, g_pids, q_camids, g_camids) print("Results ----------") print("mAP: {:.2%}".format(mAP)) # print("hmAP: {:.2%}".format(hmAP)) # print("umAP: {:.2%}".format(umAP)) # print("lmAP: {:.2%}".format(lmAP)) # print("smAP: {:.2%}".format(smAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.2%}".format(r, cmc[r - 1])) # print("hRank-{:<3}: {:.2%}".format(r, hcmc[r - 1])) # print("uRank-{:<3}: {:.2%}".format(r, ucmc[r - 1])) # print("lRank-{:<3}: {:.2%}".format(r, lcmc[r - 1])) # print("sRank-{:<3}: {:.2%}".format(r, scmc[r - 1])) print("------------------") return cmc, mAP
def compute(self): # f = open(self.pkl_path, "rb") # feats = pickle.load(f) feats = torch.cat(self.feats, dim=0) fnorm = torch.norm(feats, p=2, dim=1, keepdim=True) feats = feats.div(fnorm.expand_as(feats)) # # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) qf = qf.cpu().numpy() gf = gf.cpu().numpy() distmat_q_g = euclidean_dist_cpu(qf, gf) # raw_data = {"qf":qf, # "gf":gf, # #"distmat_q_g":distmat_q_g, # "q_pids":q_pids, # "g_pids":g_pids, # "q_camids":q_camids, # "g_camids":g_camids # } # #save distmat # f = open('/home/gtp_cgy/ivg/dataset/LRR/msmt_train.pkl','wb+') # pickle.dump(raw_data,f) # f.close() pids = np.asarray(self.pids) camids = np.asarray(self.camids) #print(len(pids)) # raw_data = { # "feats": feats, # "pids": pids, # "camids": camids # } # # # exit() start = time.time() if self.re_rank: #distmat_cos = cos_dist(qf,gf) distmat_q_q = euclidean_dist_cpu(qf, qf) distmat_g_g = euclidean_dist_cpu(gf, gf) # distmat_q_q = distmat_q_q.cpu().numpy() # distmat_g_g = distmat_g_g.cpu().numpy() distmat = re_ranking(distmat_q_g, distmat_q_q, distmat_g_g) duration = time.time() - start print(f"Re-ranking runing in {duration}") else: distmat = distmat_q_g cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP
def compute(self): feats = torch.cat(self.feats, dim=0) camera_feats = torch.cat(self.camera_feats, dim=0) if self.feat_norm == 'yes': #print("The test feature is normalized") feats = torch.nn.functional.normalize(feats, dim=1, p=2) camera_feats = torch.nn.functional.normalize(camera_feats, dim=1, p=2) # query qf = feats[:self.num_query] q_campred = np.asarray(self.camera_scores[:self.num_query], np.int) qcf = camera_feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_campred = np.asarray(self.camera_scores[self.num_query:], np.int) gcf = camera_feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) qf = qf.cpu().numpy().astype(np.float16) gf = gf.cpu().numpy().astype(np.float16) P, neg_vec = compute_P2(qf, gf, g_campred, 0.02) qf = meanfeat_sub(P, neg_vec, qf, q_campred) gf = meanfeat_sub(P, neg_vec, gf, g_campred) gf_new = gf.copy() for _ in range(3): gf_new = mergesetfeat(gf_new, g_campred, gf, g_campred, 0.03, 50) qf_new = qf.copy() for _ in range(3): qf_new = mergesetfeat(qf_new, q_campred, gf_new, g_campred, 0.03, 50) qf = torch.from_numpy(qf_new).cuda() gf = torch.from_numpy(gf_new).cuda() distmat = re_ranking(qf, gf, k1=30, k2=8, lambda_value=0.3) camdistmat = euclidean_dist(qcf, gcf).cpu().numpy() if self.output_flag: np.save(os.path.join(self.output_dir, 'distmat.npy'), distmat) np.save(os.path.join(self.output_dir, 'q_pred.npy'), q_campred) np.save(os.path.join(self.output_dir, 'g_pred.npy'), g_campred) np.save(os.path.join(self.output_dir, 'camdistmat.npy'), camdistmat) distmat -= camdistmat * 0.1 campredmat = np.equal(q_campred.reshape(-1,1), g_campred.T) distmat += campredmat * 1.0 distmat = add_space(distmat, q_campred, g_campred) if self.validation_flag: cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) else: cmc, mAP = np.zeros((self.max_rank,)), 0.0 if self.output_flag: indices = np.argsort(distmat, axis=1) np.savetxt("result.txt", indices[:, :100], fmt="%05d") return cmc, mAP
def compute(self): if self.mode == 'no_fusion': feats = torch.cat(self.feats, dim=0) # if self.feat_norm == 'yes': # print("The test feature is normalized") # feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) # 1 is dismat self distmat = distmat.cpu().numpy() # a = [] # b = [] # c = [] # for i in range(m): # score = [] # new_id = [] # new_camid = [] # for idx, gid in enumerate(g_pids): # index = (gid == g_pids) & (g_camids[idx] == g_camids) # all_score = distmat[i][index] # sum = all_score.sum() # new_score = sum/len(all_score) # if (new_score not in score): # score.append(new_score) # new_id.append(gid) # new_camid.append(g_camids[idx]) # elif(new_score in score) and (gid not in new_id): # score.append(new_score) # new_id.append(gid) # new_camid.append(g_camids[idx]) # a.append(score) # b.append(new_id) # c.append(new_camid) # sco = torch.cat(torch.tensor(a), dim=1) # distmat = np.asarray(sco) # g_pids = np.asarray(b[0]) # g_camids = np.asarray(c[0]) cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP elif self.mode == 'fusion': feats = torch.cat(self.feats, dim=0) feats = torch.cat([feats, self.fusion_feature], dim=0) a = np.asarray(self.fusion_pid) self.pids.extend(list(a)) b = np.asarray(self.fusion_camid) self.camids.extend(list(b)) # if self.feat_norm == 'yes': # print("The test feature is normalized") # feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query feats = feats.detach() qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.cpu().numpy() cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP elif self.mode == 'average': feats = torch.cat(self.feats, dim=0) feats = torch.cat([feats, self.fusion_feature], dim=0) a = np.asarray(self.fusion_pid) self.pids.extend(list(a)) b = np.asarray(self.fusion_camid) self.camids.extend(list(b)) # if self.feat_norm == 'yes': # print("The test feature is normalized") # feats = torch.nn.functional.normalize(feats, dim=1, p=2) # query feats = feats.detach() qf = feats[:self.num_query] q_pids = np.asarray(self.pids[:self.num_query]) q_camids = np.asarray(self.camids[:self.num_query]) # gallery gf = feats[self.num_query:] g_pids = np.asarray(self.pids[self.num_query:]) g_camids = np.asarray(self.camids[self.num_query:]) m, n = qf.shape[0], gf.shape[0] distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() distmat.addmm_(1, -2, qf, gf.t()) distmat = distmat.cpu().numpy() cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids) return cmc, mAP else: assert ('no fusion mode')
for weight in model_pred: weight = root + weight dismat, q_paths, g_paths = main(w=weight) #对得到的矩阵归一化处理 dismat = normalize(dismat, axis=1, norm='l2') q_path = q_paths g_path = g_paths mat += dismat mat /= num_model PATHS = eval_func(distmat=mat, q_paths=q_path, g_paths=g_path, max_rank=pic_num, is_demo=True) print('#' * 100) print('MAKE SUBMISSION.....') result = [] for row in PATHS: r = {} r['query_id'] = int(row[-1].split('/')[-1].split('.')[0]) ans_id = [] for p in row[:-1]: ans_id.append(int(p.split('/')[-1].split('.')[0])) r['ans_ids'] = ans_id result.append(r)
] dist_list = [] for name in model_list: dist_list.append(np.load('./ensemble_b/distmat_' + name + '.npy')) # weight = [1.0/3, 1.0/3, 1.0/3] weight = [0.4, 0.25, 0.25, 0.1] # weight = [0.35, 0.3, 0.3, 0.05] # weight = [0.25, 0.25, 0.25, 0.25] distmat = weight[0] * dist_list[0] for i in range(1, len(model_list)): distmat += weight[i] * dist_list[i] dis_sort = np.argsort(distmat, axis=1) if type(q_pids[0]) is np.int64: cmc, mAP = eval_func(distmat, q_pids, g_pids) print(('rank 1: {:.3%} mAP: {:.3%}, result: {:.3%}'.format( cmc[0], mAP, 0.5 * cmc[0] + 0.5 * mAP))) else: dis_sort = np.argsort(distmat, axis=1) submission = {} for i in range(q_pids.shape[0]): result200 = [] for rank in range(200): result200.append(g_pids[dis_sort[i][rank]]) submission[q_pids[i]] = result200 with open("NAIC_ensemble_submission_b.json", 'w', encoding='utf-8') as json_file: json.dump(submission, json_file)