def evaluate(model, query_loader, test_loader, queryset, testset): model.eval() print('extract features, this may take a few minutes') qf = extract_feature(model, tqdm(query_loader)).numpy() gf = extract_feature(model, tqdm(test_loader)).numpy() def rank(dist): r = cmc(dist, queryset.ids, testset.ids, queryset.cameras, testset.cameras, separate_camera_set=False, single_gallery_shot=False, first_match_break=True) m_ap = mean_ap(dist, queryset.ids, testset.ids, queryset.cameras, testset.cameras) return r, m_ap ######################### evaluation without rank########################## dist = cdist(qf, gf) r, m_ap = rank(dist) print( '[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}' .format(m_ap, r[0], r[2], r[4], r[9]))
def evaluate(self): self.model.eval() print('extract features, this may take a few minutes') qf = extract_feature(self.model, tqdm(self.query_loader)).numpy() gf = extract_feature(self.model, tqdm(self.test_loader)).numpy() def rank(dist): r = cmc(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras, separate_camera_set=False, single_gallery_shot=False, first_match_break=True) m_ap = mean_ap(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras) return r, m_ap ######################### re rank########################## q_g_dist = np.dot(qf, np.transpose(gf)) q_q_dist = np.dot(qf, np.transpose(qf)) g_g_dist = np.dot(gf, np.transpose(gf)) dist = re_ranking(q_g_dist, q_q_dist, g_g_dist) r, m_ap = rank(dist) print('[With Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}' .format(m_ap, r[0], r[2], r[4], r[9])) #########################no re rank########################## dist = cdist(qf, gf) r, m_ap = rank(dist) print('[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}' .format(m_ap, r[0], r[2], r[4], r[9]))
def vis(self): self.model.eval() gallery_path = data.testset.imgs gallery_label = data.testset.ids # Extract feature print('extract features, this may take a few minutes') query_feature = extract_feature( model, tqdm([(torch.unsqueeze(data.query_image, 0), 1)])) gallery_feature = extract_feature(model, tqdm(data.test_loader)) # sort images query_feature = query_feature.view(-1, 1) score = torch.mm(gallery_feature, query_feature) score = score.squeeze(1).cpu() score = score.numpy() index = np.argsort(score) # from small to large index = index[::-1] # from large to small # # Remove junk images # junk_index = np.argwhere(gallery_label == -1) # mask = np.in1d(index, junk_index, invert=True) # index = index[mask] # Visualize the rank result fig = plt.figure(figsize=(16, 4)) ax = plt.subplot(1, 11, 1) ax.axis('off') plt.imshow(plt.imread(opt.query_image)) ax.set_title('query') print('Top 10 images are as follow:') for i in range(10): img_path = gallery_path[index[i]] print(img_path) ax = plt.subplot(1, 11, i + 2) ax.axis('off') plt.imshow(plt.imread(img_path)) ax.set_title(img_path.split('/')[-1][:9]) fig.savefig("show.png") print('result saved to show.png')
def save_features(self): self.model.eval() gallery_path = data.testset.imgs # Extract feature gallery_feature = extract_feature(model, tqdm(data.test_loader)) print(np.shape(gallery_feature.numpy())) gallery_path = np.reshape(gallery_path, (-1, 1)) print(np.shape(gallery_path)) #np.savetxt("drive/My Drive/ReID-MGN/MGNFeatures.csv", result, delimiter=",") outfile = '../../7Data Features/MGNFeatures.csv' with open(outfile, 'w') as csvfile: print("A") wtr = csv.writer(csvfile) titles = ['feat' + str(i) for i in range(2048)] titles.append("imageName") titles = np.array(titles) print(titles) wtr.writerow(titles) print("A") i = 0 for row in gallery_feature.numpy(): print("A") whole_row = np.append(row, gallery_path[i]) i = i + 1 wtr.writerow(whole_row)
def multi_test(self, save_path, epoch=0): print('start evaluate') self.model.eval() qf = extract_feature(self.model, tqdm(self.query_loader)).numpy() rank1 = [] rank3 = [] rank5 = [] rank10 = [] rank20 = [] for i in range(11): data = Data(test=i, dataset="prcc") print('start evaluate', i) r1, r3, r5, r10, r20 = self.evaluate_multi_test(data, qf, save_path, i, epoch) rank1.append(r1) rank3.append(r3) rank5.append(r5) rank10.append(r10) rank20.append(r20) r1 = np.mean(rank1) r3 = np.mean(rank3) r5 = np.mean(rank5) r10 = np.mean(rank10) r20 = np.mean(rank20) print('[Average] rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}\n' .format(r1, r3, r5, r10, r20)) with open(opt.save_path + opt.name + '_accr.txt', 'a') as f: f.write( '[Average] rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}\n' .format(r1, r3, r5, r10, r20)) return r1
def evaluate(self, save_path, epoch=0): self.model.eval() print('extract features, this may take a few minutes') qf = extract_feature(self.model, tqdm(self.query_loader)).numpy() gf = extract_feature(self.model, tqdm(self.test_loader)).numpy() def rank(dist): r = cmc(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras, self.queryset.clothes, self.testset.clothes, separate_camera_set=False, single_gallery_shot=False, first_match_break=True, cloth_changing_settings=False) m_ap = mean_ap( dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras, self.queryset.clothes, self.testset.clothes, cloth_changing_settings=False) return r, m_ap # ######################### re rank########################## # q_g_dist = np.dot(qf, np.transpose(gf)) # q_q_dist = np.dot(qf, np.transpose(qf)) # g_g_dist = np.dot(gf, np.transpose(gf)) # dist = re_ranking(q_g_dist, q_q_dist, g_g_dist) # r, m_ap = rank(dist) # print('[With Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}' # .format(m_ap, r[0], r[2], r[4], r[9])) # #########################no re rank########################## dist = cdist(qf, gf) # from utils.combine_feature import combine_feature # dist = combine_feature(self.model, self.query_loader, self.test_loader) r, m_ap = rank(dist) print('[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}' .format(m_ap, r[0], r[2], r[4], r[9], r[19])) with open(save_path, 'a') as f: f.write( '[Without Re-Ranking] epoch: {:} mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}\n' .format(epoch, m_ap, r[0], r[2], r[4], r[9], r[19]))
def evaluate(self): self.model.eval() qf = extract_feature(self.model, tqdm(self.query_loader, desc='Extracting query features')).numpy() gf = extract_feature(self.model, tqdm(self.test_loader, desc='Extracting test features')).numpy() def rank(dist): r = cmc(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras, separate_camera_set=False, single_gallery_shot=False, first_match_break=True) m_ap = mean_ap(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras) return r, m_ap ######################### re rank########################## q_g_dist = np.dot(qf, np.transpose(gf)) q_q_dist = np.dot(qf, np.transpose(qf)) g_g_dist = np.dot(gf, np.transpose(gf)) dist = re_ranking(q_g_dist, q_q_dist, g_g_dist) r, m_ap = rank(dist) tqdm.write('[With Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}' .format(m_ap, r[0], r[2], r[4], r[9])) retval = { 're-ranking': (m_ap, r[0], r[2], r[4], r[9]) } #########################no re rank########################## dist = cdist(qf, gf) r, m_ap = rank(dist) tqdm.write('[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}' .format(m_ap, r[0], r[2], r[4], r[9])) retval['without'] = (m_ap, r[0], r[2], r[4], r[9]) return retval
def evaluate_ai(self): self.model.load_state_dict(torch.load('weights/AI_mgn/pyramid_v4.pth')) self.model.eval() print('extract features, this may take a few minutes') qf = extract_feature(self.model, tqdm(self.query_loader)).numpy() gf = extract_feature(self.model, tqdm(self.test_loader)).numpy() q_g_dist = np.dot(qf, np.transpose(gf)) q_q_dist = np.dot(qf, np.transpose(qf)) g_g_dist = np.dot(gf, np.transpose(gf)) dist = re_ranking(q_g_dist, q_q_dist, g_g_dist) for i in range(dist.shape[0]): txt_list = np.zeros((100)) for j in range(txt_list.shape[0]): txt_list[j] = int(np.argmin(dist[i])) + 1 dist[i][int(txt_list[j]) - 1] = 100000 txt_list = pd.DataFrame(txt_list, dtype=np.int32) txt_list.to_csv('save/{:0>6d}.txt'.format(i + 1), header=0, index=0)
def evaluate(self): self.model.eval() print('extract features, this may take a few minutes') qf = extract_feature(self.model, tqdm(self.query_loader)) gf = extract_feature(self.model, tqdm(self.test_loader)) ######################### no re rank########################## distmat = euclidean_dist(qf, gf) cmc, mAP = getRank(distmat, self.queryset.pids, self.testset.pids, self.queryset.camids, self.testset.camids) print( '[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}' .format(mAP, cmc[0], cmc[2], cmc[4], cmc[9])) #########################re rank########################## distmat = re_rank(qf, gf) cmc, mAP = getRank(distmat, self.queryset.pids, self.testset.pids, self.queryset.camids, self.testset.camids) print( '[With Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}' .format(mAP, cmc[0], cmc[2], cmc[4], cmc[9]))
def addLabeled(model, train_loader, unlabeled_loader): model.eval() # (choosen_data, _i) = train_loader[0] query_feature = extract_feature(model, tqdm(train_loader)) gallery_feature = extract_feature(model, tqdm(unlabeled_loader)) query_feature_numpy = query_feature.numpy() query_feature_numpy = query_feature_numpy.T query_feature = torch.from_numpy(query_feature_numpy) # sort images # query_feature = query_feature.view(-1, 1) score = torch.mm(gallery_feature, query_feature) score = score.squeeze(1).cpu() score = score.numpy() # index = np.argsort(score) # from small to large # index = index[::-1] # from large to small # min = 100; # linenum = 0; # min_linenum = 0; # min_columnnum = 0; # for line in score: # index = np.argsort(line) # index = index[::-1] # if score[linenum][index[0]] < min: # min = score[linenum][index[0]] # # linenum += 1 # choose some valuable data new_labeled_data = unlabeled_loader.dataset.imgs[0:10] for new_labeled in new_labeled_data: unlabeled_loader.dataset.imgs.remove(new_labeled) train_loader.dataset.imgs.extend(new_labeled_data) return new_labeled_data
def compare(self): self.model.eval() # Extract feature print('extract features, this may take a few time') feature_a = extract_feature( model, tqdm([(torch.unsqueeze(data.compare_img_a, 0), 1)])) feature_b = extract_feature( model, tqdm([(torch.unsqueeze(data.compare_img_b, 0), 1)])) # sort images # feature_a = feature_a.view(-1, 1) feature_b = feature_b.view(-1, 1) print(feature_a) print(feature_b) print(feature_a.size()) score = torch.mm(feature_a, feature_b) score = score.squeeze(1).cpu() score = score.numpy() print(score)
def evaluate(self): self.model.eval() print('extract features, this may take a few minutes') qf = extract_feature(self.model, tqdm(self.query_loader)).numpy() gf = extract_feature(self.model, tqdm(self.test_loader)).numpy() ######################### re rank########################## q_g_dist = np.dot(qf, np.transpose(gf)) q_q_dist = np.dot(qf, np.transpose(qf)) g_g_dist = np.dot(gf, np.transpose(gf)) dist = re_ranking(q_g_dist, q_q_dist, g_g_dist) queryset = self.queryset testset = self.testset m_ap = mean_ap(dist, queryset, testset) print('[With Re-Ranking] mAP: {:.4f}'.format(m_ap)) #########################no re rank########################## dist = cdist(qf, gf) m_ap = mean_ap(dist, queryset, testset) print('[Without Re-Ranking] mAP: {:.4f}'.format(m_ap))
def multi_test(): model = Model() loss = Loss(model) data = Data() main = Main(model, loss, data) print('start evaluate') main.load_model(opt.weight, 0) main.model.eval() qf = extract_feature(main.model, tqdm(main.query_loader)).numpy() rank1 = [] rank3 = [] rank5 = [] rank10 = [] rank20 = [] for i in range(10): data = Data(i) main = Main(model, loss, data) print('start evaluate', i) main.load_model(opt.weight, 0) r1, r3, r5, r10, r20 = main.evaluate_multi_test(qf, opt.save_path + opt.name + '_accr.txt', i) rank1.append(r1) rank3.append(r3) rank5.append(r5) rank10.append(r10) rank20.append(r20) r1 = np.mean(rank1) r3 = np.mean(rank3) r5 = np.mean(rank5) r10 = np.mean(rank10) r20 = np.mean(rank20) print('[Average] rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}' .format(r1, r3, r5, r10, r20)) with open(opt.save_path + opt.name + '_accr.txt', 'a') as f: f.write( '[Average] rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}' .format(r1, r3, r5, r10, r20))
def evaluate(self, save_path, epoch=None): self.model.eval() print('extract features, this may take a few minutes') qf = extract_feature(self.model, tqdm(self.query_loader)).numpy() gf = extract_feature(self.model, tqdm(self.test_loader)).numpy() def rank(dist): r = cmc(dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras, separate_camera_set=False, single_gallery_shot=False, first_match_break=True) m_ap = mean_ap( dist, self.queryset.ids, self.testset.ids, self.queryset.cameras, self.testset.cameras) return r, m_ap # ######################### re rank########################## # q_g_dist = np.dot(qf, np.transpose(gf)) # q_q_dist = np.dot(qf, np.transpose(qf)) # g_g_dist = np.dot(gf, np.transpose(gf)) # dist = re_ranking(q_g_dist, q_q_dist, g_g_dist) # r, m_ap = rank(dist) # print('[With Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f}' # .format(m_ap, r[0], r[2], r[4], r[9])) # #########################no re rank########################## dist = cdist(qf, gf) # from utils.combine_feature import combine_feature # dist = combine_feature(self.model, self.query_loader, self.test_loader) r, m_ap = rank(dist) '''added by Haorui''' if opt.stage == 0 and epoch is not None: if r[0] >= opt.s0_best_r1: opt.s0_best_r1 = r[0] opt.s0_best_epoch = epoch elif opt.stage == 1 and epoch is not None: if r[0] >= opt.s1_best_r1: opt.s1_best_r1 = r[0] opt.s1_best_epoch = epoch elif opt.stage == 2 and epoch is not None: if r[0] >= opt.s2_best_r1: opt.s2_best_r1 = r[0] opt.s2_best_epoch = epoch elif opt.stage == 3 and epoch is not None: if r[0] >= opt.s3_best_r1: opt.s3_best_r1 = r[0] opt.s3_best_epoch = epoch '''added by Haorui''' print('[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}' .format(m_ap, r[0], r[2], r[4], r[9], r[19])) with open(save_path, 'a') as f: if epoch is not None: f.write( '[Without Re-Ranking] epoch: {:} mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}\n' .format(epoch, m_ap, r[0], r[2], r[4], r[9], r[19])) else: f.write( '[Without Re-Ranking] mAP: {:.4f} rank1: {:.4f} rank3: {:.4f} rank5: {:.4f} rank10: {:.4f} rank20:{:.4f}\n' .format(m_ap, r[0], r[2], r[4], r[9], r[19]))
def main(): model = MGN() model.load_state_dict(torch.load(opt.weight)) cuda_model = model.to("cuda") embeddings = None file_names = None cuda_model.eval() for dataset in opt.data_path: test_transform = transforms.Compose([ transforms.Resize((384, 128), interpolation=3), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) testset = Market1501(test_transform, 'test', dataset) data_loader = dataloader.DataLoader(testset, batch_size=16, num_workers=8, pin_memory=True) print("generating embeddings for", dataset) features = extract_feature(cuda_model, tqdm(data_loader)).numpy() names = np.array(testset.imgs) if embeddings is None: embeddings = features file_names = names else: embeddings = np.concatenate((embeddings, features)) file_names = np.concatenate((file_names, names)) if opt.embeddings_path is not None: np.save(opt.embeddings_path, embeddings) np.save(opt.embeddings_path + "_names", file_names) print("performing clustering") print("finding number of clusters") embeddings = StandardScaler().fit_transform(embeddings) clusterings = {} scores = {} def get_score(x): x = int(x) if x < 2: return 0 if x not in clusterings: clusterings[x] = KMeans(x).fit(embeddings) scores[x] = silhouette_score(embeddings, clusterings[x].labels_) return scores[x] return scores[x] candidates = [2**x for x in range(1, 7)] candidate = candidates[np.argmax( np.array([get_score(a) for a in candidates]))] possible_ks = [x for x in range(candidate // 2, (candidate * 2) + 1)] k = possible_ks[np.argmax( np.array([get_score(a) for a in tqdm(possible_ks)]))] print("found best k", k, "with silhouette score of", scores[k]) clustering = clusterings[k] with open(opt.output_file, "w") as csvfile: csv_writer = csv.writer(csvfile) for path, cluster_id, embedding in zip(file_names, clustering.labels_, embeddings): csv_writer.writerow([path, str(cluster_id)]) if not os.path.isdir(str(cluster_id)): os.mkdir(str(cluster_id)) shutil.copyfile(path, str(cluster_id) + "/" + path.split("\\")[-1])
def evaluate(self, epoch): self.model.eval() print('extract features, this may take a few minutes') qf = extract_feature(self.model, tqdm(self.query_loader)).numpy() gf = extract_feature(self.model, tqdm(self.test_loader)).numpy() epoch_json = 'metric/metric_epoch' + str(epoch) os.makedirs(epoch_json) def result(distmat, query_ids=None, gallery_ids=None, query_cams=None, gallery_cams=None, title=None): m, n = distmat.shape # Fill up default values if query_ids is None: query_ids = np.arange(m) if gallery_ids is None: gallery_ids = np.arange(n) if query_cams is None: query_cams = np.zeros(m).astype(np.int32) if gallery_cams is None: gallery_cams = np.ones(n).astype(np.int32) # Ensure numpy array query_ids = np.asarray(query_ids) gallery_ids = np.asarray(gallery_ids) query_cams = np.asarray(query_cams) gallery_cams = np.asarray(gallery_cams) # Sort and find correct matches indices = np.argsort(distmat, axis=1) dd = [] for i in range(m): # Filter out the same id and same camera d = {} d['query_id'] = query_ids[i].astype(np.int32).tolist() valid = ((gallery_ids[indices[i]] != query_ids[i]) & (gallery_cams[indices[i]] != query_cams[i])) ans_ids = gallery_ids[indices[i]][valid] d['ans_ids'] = ans_ids.astype(np.int32).tolist() dd.append(d) with open(epoch_json + '/' + title + '.json', 'w', encoding='utf-8') as json_file: json.dump(dd, json_file, ensure_ascii=False) print('json finished') #########################no re rank########################## dist = cdist(qf, gf) result(dist, self.queryset.ids, self.testset.ids, title='without rerank') ######################### re rank########################## q_g_dist = np.dot(qf, np.transpose(gf)) q_q_dist = np.dot(qf, np.transpose(qf)) g_g_dist = np.dot(gf, np.transpose(gf)) dist = re_ranking(q_g_dist, q_q_dist, g_g_dist) result(dist, self.queryset.ids, self.testset.ids, title='rerank') ######################### query expansion########################## qf_new = [] T = 9 for i in range(len(dist)): indice = np.argsort(dist[i])[:T] temp = np.concatenate((qf[i][np.newaxis, :], gf[indice]), axis=0) qf_new.append(np.mean(temp, axis=0, keepdims=True)) qf = np.squeeze(np.array(qf_new)) # feature norm q_n = np.linalg.norm(qf, axis=1, keepdims=True) qf = qf / q_n q_g_dist = np.dot(qf, np.transpose(gf)) q_q_dist = np.dot(qf, np.transpose(qf)) g_g_dist = np.dot(gf, np.transpose(gf)) dist = re_ranking(q_g_dist, q_q_dist, g_g_dist) result(dist, self.queryset.ids, self.testset.ids, title='query_expansion')
def test_my(self): self.model.eval() features = extract_feature(self.model, tqdm(self.mydata_loader)).numpy() dist = cdist(features, features) embed()
data_jitter = Data() model_pose = MGN_pose().to('cuda') model_jitter = MGN().to('cuda') print('start evaluate') model_pose.load_state_dict(torch.load(pose_path)) model_jitter.load_state_dict(torch.load(jitter_path)) model_pose.eval() model_jitter.eval() qf_pose = extract_feature_pose(model_pose, tqdm(data_pose.query_loader)).numpy() gf_pose = extract_feature_pose(model_pose, tqdm(data_pose.test_loader)).numpy() qf_jitter = extract_feature(model_jitter, tqdm(data_jitter.query_loader)).numpy() gf_jitter = extract_feature(model_jitter, tqdm(data_jitter.test_loader)).numpy() qf = np.concatenate((qf_pose, qf_jitter), 1) gf = np.concatenate((gf_pose, gf_jitter), 1) qf = np.squeeze(np.array(qf)) # feature norm q_n = np.linalg.norm(qf, axis=1, keepdims=True) qf = qf / q_n gf = np.squeeze(np.array(gf)) # feature norm g_n = np.linalg.norm(gf, axis=1, keepdims=True) gf = gf / g_n