def grid_result_eval(predict_path, log_path='grid_eval.log'): pids4probes = np.genfromtxt(predict_path, delimiter=' ') probe_shoot = [0, 0, 0, 0, 0] for i, pids in enumerate(pids4probes): for j, pid in enumerate(pids): if pid - i == 775: if j == 0: for k in range(5): probe_shoot[k] += 1 elif j < 5: for k in range(1, 5): probe_shoot[k] += 1 elif j < 10: for k in range(2, 5): probe_shoot[k] += 1 elif j < 20: for k in range(3, 5): probe_shoot[k] += 1 elif j < 50: for k in range(4, 5): probe_shoot[k] += 1 break probe_acc = [shoot / len(pids4probes) for shoot in probe_shoot] write(log_path, predict_path + '\n') write(log_path, '%.2f\t%.2f\t%.2f\n' % (probe_acc[0], probe_acc[1], probe_acc[2])) print(predict_path) print(probe_acc)
def market_result_eval(result_argsort, test_info, query_info, log_path='market_result_eval.log'): #res = np.genfromtxt(predict_path, delimiter=' ') res = result_argsort print('start evaluate map and rank acc') rank1, mAP = map_rank_quick_eval(query_info, test_info, res) write(log_path, '%f\t%f\n' % (rank1, mAP))
def result_eval(predict_path, log_path='market_result_eval.log', TEST='Market-1501/test', QUERY='Market-1501/probe'): res = np.genfromtxt(predict_path, delimiter=' ') print('predict info get, extract gallery info start') gallery_cam, gallery_label = get_id(TEST) gallery_cam = np.array(gallery_cam) gallery_label = np.array(gallery_label) query_cam, query_label = get_id(QUERY) CMC = torch.IntTensor(len(gallery_label)).zero_() ap = 0.0 for i in range(len(query_label)): ap_tmp, CMC_tmp = evaluate(res[i], query_label[i], query_cam[i], gallery_label, gallery_cam) if CMC_tmp[0] == -1: continue CMC = CMC + CMC_tmp ap += ap_tmp CMC = CMC.float() CMC = CMC / len(query_label) # average CMC print(datetime.now().strftime("%Y.%m.%d-%H:%M:%S\t") + predict_path + '\nRank@1:%f Rank@5:%f Rank@10:%f mAP:%f\n' % (CMC[0], CMC[4], CMC[9], ap / len(query_label))) write( log_path, datetime.now().strftime("%Y.%m.%d-%H:%M:%S\t") + predict_path + '\tRank@1 Rank@5 Rank@10 mAP:\n%f\t%f\t%f\t%f\n' % (CMC[0], CMC[4], CMC[9], ap / len(query_label)))
def market_result_eval(result_argsort, test_info, query_info, log_path='market_result_eval.log'): res = result_argsort print('start evaluate map and rank acc') rank1_acc, rank5_acc, rank10_acc, mAP = map_rank_quick_eval( query_info, test_info, res) write(log_path, '%f\t%f\t%f\t%f\n' % (rank1_acc, rank5_acc, rank10_acc, mAP))
def market_result_eval(predict_path, log_path='market_result_eval.log', TEST='Market-1501/test', QUERY='Market-1501/probe'): res = np.genfromtxt(predict_path, delimiter=' ') print('predict info get, extract gallery info start') test_info = extract_info(TEST) print('extract probe info start') query_info = extract_info(QUERY) print('start evaluate map and rank acc') rank1_acc, rank5_acc, rank10_acc, mAP = map_rank_quick_eval(query_info, test_info, res) write(log_path, predict_path + '\n') write(log_path, '& %f & %f & %f & %f\n' % (rank1_acc, rank5_acc, rank10_acc, mAP))
def market_result_eval(sort_gallery_idx, log_path='market_result_eval.log', TEST='Market-1501/test', QUERY='Market-1501/probe'): print('predict info get, extract gallery info start') test_info = extract_info(TEST) print('extract probe info start') query_info = extract_info(QUERY) print('start evaluate map and rank acc') rank1, rank5, rank10, mAP = map_rank_quick_eval(query_info, test_info, sort_gallery_idx) write(log_path, '%f\t%f\t%f\t%f\n' % (rank1, rank5, rank10, mAP))
query_feature = query_feature.cuda() gallery_feature = gallery_feature.cuda() # print(query_feature.shape) CMC = torch.IntTensor(len(gallery_label)).zero_() ap = 0.0 #print(query_label) scores, indexs = [], [] for i in range(len(query_label)): ap_tmp, CMC_tmp, index, score = evaluate(query_feature[i],query_label[i],query_cam[i],gallery_feature,gallery_label,gallery_cam) if CMC_tmp[0]==-1: continue CMC = CMC + CMC_tmp ap += ap_tmp scores.append(score) indexs.append(index) # print(i, CMC_tmp[0]) scores = np.array(scores) indexs = np.array(indexs) score_path = os.path.join('eval', opt.name, 'score.txt') pid_path = os.path.join('eval', opt.name, 'pid.txt') np.savetxt(score_path, scores, fmt='%.4f') np.savetxt(pid_path, indexs, fmt='%d') CMC = CMC.float() CMC = CMC/len(query_label) #average CMC print(datetime.now().strftime("%Y.%m.%d-%H:%M:%S\t") + 'Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f\n'%(CMC[0],CMC[4],CMC[9],ap/len(query_label))) write(os.path.join('eval', opt.name, 'acc.txt'), datetime.now().strftime("%Y.%m.%d-%H:%M:%S\t") + 'Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f\n'%(CMC[0],CMC[4],CMC[9],ap/len(query_label)))
y = model.predict_generator(pig_generator(test_imgs, batch_size), 3000 / batch_size + 1, use_multiprocessing=True) # ys = np.sum(y,axis=1) # y_i = ys > 0 y /= 1.001 predict_path = 'predict.csv' if path.exists(predict_path): remove(predict_path) np.savetxt(predict_path, y, fmt='%6f', delimiter='\t') # y = np.genfromtxt('predict.csv', delimiter='\t') y = y.reshape(-1) ids = np.array(test_ids) cls = np.arange(1, 31) ids = ids.repeat(30) cls = np.tile(cls, 3000) rst = np.append(ids, cls) rst = np.append(rst, y).reshape(3, 90000) rst = np.rot90(rst) rst_str = '' for line in rst: rst_str += '%d,%d,%5f\n' % (int(line[0]), int(line[1]), float(line[2])) rst_path = 'pig_rst.csv' if path.exists(rst_path): remove(rst_path) write(rst_path, rst_str) # np.savetxt('pig_rst.csv', rst, fmt='%d,%d,%6f', delimiter=',')