def interval_scores(fusion_param): # fusion_param = get_fusion_param() camera_delta_s = viz_data_for_market(fusion_param) for camera_delta in camera_delta_s: for delta_s in camera_delta: delta_s.sort() gap_cnt = 5 camera_pair_travel_probs = [[ list() for _ in range(len(camera_delta_s[0])) ] for _ in range(len(camera_delta_s))] for i, camera_delta in enumerate(camera_delta_s): for j, delta_s in enumerate(camera_delta): gap_width = (delta_s[-1] - delta_s[0]) / float(gap_cnt) for k in range(gap_cnt): left_bound = delta_s[0] + gap_width * k right_bound = delta_s[1] + gap_width * (k + 1) total_cnt = sum(map(len, camera_delta)) sp_cnt = len(delta_s) camera_pair_travel_probs[i][j].append({ 'left': left_bound, 'right': right_bound, 'prob': sp_cnt / float(total_cnt) # 'prob': (binary_search(delta_s, right_bound) - binary_search(delta_s, left_bound)) / float(total_cnt) }) pickle_save(fusion_param['interval_pickle_path'], camera_pair_travel_probs) return camera_pair_travel_probs
def compute_trainset_deltas(train_imgs_path, pickle_path): train_tracks, camera_cnt = load_train_img_infos(train_imgs_path) mid = calculate_mid_frame(train_tracks) head_train_tracks = filter(lambda x: x[2] < mid, train_tracks) deltas = [[list() for _ in range(camera_cnt)] for _ in range(camera_cnt)] for query_track in head_train_tracks: for gallery_track in head_train_tracks: if query_track[0] == gallery_track[ 0] and query_track[1] != gallery_track[1]: delta = query_track[2] - gallery_track[2] if abs(delta) < 60000: deltas[query_track[1] - 1][gallery_track[1] - 1].append(delta) pickle_save('head_' + pickle_path, deltas) tail_train_tracks = filter(lambda x: x[2] > mid, train_tracks) deltas = [[list() for _ in range(camera_cnt)] for _ in range(camera_cnt)] for query_track in tail_train_tracks: for gallery_track in tail_train_tracks: if query_track[0] == gallery_track[ 0] and query_track[1] != gallery_track[1]: delta = query_track[2] - gallery_track[2] if abs(delta) < 60000: deltas[query_track[1] - 1][gallery_track[1] - 1].append(delta) pickle_save('tail_' + pickle_path, deltas)
def save_market_test_truth(): ctrl_msg['data_folder_path'] = 'market_market-test' fusion_param = get_fusion_param() answer_path = fusion_param['answer_path'] answer_lines = read_lines(answer_path) query_tracks = list() for answer in answer_lines: info = answer.split('_') if 'bmp' in info[2]: info[2] = info[2].split('.')[0] if len(info) > 4 and 'jpe' in info[6]: query_tracks.append([info[0], int(info[1][0]), int(info[2])]) else: query_tracks.append( [info[0], int(info[1][1]), int(info[2]), int(info[1][3])]) gallery_path = fusion_param['gallery_path'] gallery_lines = read_lines(gallery_path) gallery_tracks = list() for gallery in gallery_lines: info = gallery.split('_') if 'bmp' in info[2]: info[2] = info[2].split('.')[0] if len(info) > 4 and 'jpe' in info[6]: gallery_tracks.append([info[0], int(info[1][0]), int(info[2])]) else: gallery_tracks.append( [info[0], int(info[1][1]), int(info[2]), int(info[1][3])]) gallery_tracks.extend(query_tracks) print(len(gallery_tracks)) deltas = [[list() for j in range(6)] for i in range(6)] for i, market_probe_track in enumerate(gallery_tracks): if gallery_tracks[i][0] == 0 or gallery_tracks[i][0] == -1: continue for j in range(len(gallery_tracks)): if gallery_tracks[i][0] == gallery_tracks[j][0] \ and i != j \ and gallery_tracks[i][3] == gallery_tracks[j][3] \ and gallery_tracks[i][1] != gallery_tracks[j][1]: if gallery_tracks[i][1] == 4 and gallery_tracks[j][1] - 1 == 5: if j >= 19732: print gallery_tracks[i][2] - gallery_tracks[j][2] deltas[gallery_tracks[i][1] - 1][gallery_tracks[j][1] - 1].append(gallery_tracks[i][2] - gallery_tracks[j][2]) for camera_delta in deltas: for delta_s in camera_delta: delta_s.sort() pickle_save('true_market_pg.pck', deltas)
def compute_trainset_deltas(train_imgs_path, pickle_path): train_tracks, camera_cnt = load_train_img_infos(train_imgs_path) deltas = [[list() for _ in range(camera_cnt)] for _ in range(camera_cnt)] for query_track in train_tracks: for gallery_track in train_tracks: if query_track[0] == gallery_track[0]: delta = query_track[2] - gallery_track[2] if abs(delta) < 300000: deltas[query_track[1]-1][gallery_track[1] - 1].append(delta) pickle_save(pickle_path, deltas)
def store_sorted_deltas(fusion_param): # 时空模型构建核心函数, # 存储每对摄像头的时间差分布, # 共6×6个数组,每个数组长度为该对摄像头统计到的时间差数目 # 这个函数运行前会删除distribution_pickle_path: sorted_deltas.pickle,因此也不会有缓存问题 camera_delta_s = viz_data_for_market(fusion_param) # 对时间差做排序,在预测的时候能快速定位时间差位置,得到时间差的概率 for camera_delta in camera_delta_s: for delta_s in camera_delta: delta_s.sort() # for python safe_remove(fusion_param['distribution_pickle_path']) pickle_save(fusion_param['distribution_pickle_path'], camera_delta_s)
def save_grid_train_truth(): ctrl_msg['data_folder_path'] = 'market_grid-cv0-train' fusion_param = get_fusion_param() market_train_tracks = train_tracks(fusion_param) deltas = [[list() for j in range(6)] for i in range(6)] for i, market_train_track in enumerate(market_train_tracks): for j in range(0, len(market_train_tracks)): if market_train_tracks[i][0] == market_train_tracks[j][0] \ and i != j \ and market_train_tracks[i][1] != market_train_tracks[j][1]: deltas[market_train_tracks[i][1] - 1][market_train_tracks[j][1] - 1].append(market_train_tracks[i][2] - market_train_tracks[j][2]) for camera_delta in deltas: for delta_s in camera_delta: delta_s.sort() pickle_save('true_grid-cv0_train.pck', deltas)
def split_trainset_deltas(train_imgs_path, pickle_path, slice_cnt): train_tracks, camera_cnt = load_train_img_infos(train_imgs_path) slice_bounds = calculate_part_frame(train_tracks, slice_cnt) split_tracks = list() for i in range(slice_cnt / 2 + 1): split_tracks.append( filter( lambda x: slice_bounds[i] < x[2] < slice_bounds[ i + slice_cnt / 2], train_tracks)) for i, split_track in enumerate(split_tracks): deltas = [[list() for _ in range(camera_cnt)] for _ in range(camera_cnt)] for query_track in split_track: for gallery_track in split_track: if query_track[0] == gallery_track[ 0] and query_track[1] != gallery_track[1]: delta = query_track[2] - gallery_track[2] if abs(delta) < 60000: deltas[query_track[1] - 1][gallery_track[1] - 1].append(delta) pickle_save(('part%d_' % i) + pickle_path, deltas)
def get_predict_delta_tracks(fusion_param, useful_predict_limit=10, random=False, diff_person=False, use_real_st=False): # 获取左图列表 answer_path = fusion_param['answer_path'] answer_lines = read_lines(answer_path) camera_cnt = 6 real_tracks = list() for answer in answer_lines: info = answer.split('_') if 'bmp' in info[2]: # info[2] = info[2].split('.')[0] if len(info) > 4 and 'jpe' in info[6]: # grid real_tracks.append([info[0], int(info[1][0]), int(info[2]), 1]) elif 'f' in info[2]: real_tracks.append([info[0], int(info[1][1]), int(info[2][1:-5]), 1]) camera_cnt = 8 else: # market real_tracks.append([info[0], int(info[1][1]), int(info[2]), int(info[1][3])]) print 'left image ready' # 获取右图列表 renew_pid_path = fusion_param['renew_pid_path'] predict_lines = read_lines(renew_pid_path) print 'predict images ready' # 左图中的人在右图可能出现在6个摄像头中 camera_delta_s = [[list() for j in range(camera_cnt)] for i in range(camera_cnt)] person_cnt = len(answer_lines) # market1501数据集有六个序列,只有同一个序列才能计算delta if random: useful_predict_limit = max(len(predict_lines)/100, 10) for i, line in enumerate(predict_lines): predict_pids = line.split(' ') useful_cnt = 0 for j, predict_pid in enumerate(predict_pids): if useful_cnt > useful_predict_limit: break if random: predict_pid = randint(0, person_cnt - 1) elif diff_person: predict_pid = randint(10, person_cnt - 1) else: # todo transfer: if predict by python, start from 0, needn't minus 1 predict_pid = int(predict_pid) predict_pid = int(predict_pid) # same seq # todo ignore same camera track if real_tracks[i][3] == real_tracks[predict_pid][3] and real_tracks[i][1] != real_tracks[predict_pid][1]: # and pid equal: real st # if use_real_st and random or real_tracks[i][0] == real_tracks[predict_pid][0]: if True: useful_cnt += 1 delta = real_tracks[i][2] - real_tracks[predict_pid][2] if abs(delta) < 1000000: camera_delta_s[real_tracks[i][1] - 1][real_tracks[predict_pid][1] - 1].append(delta) print 'deltas collected' for camera_delta in camera_delta_s: for delta_s in camera_delta: delta_s.sort() print 'deltas sorted' # for python safe_remove(fusion_param['distribution_pickle_path']) pickle_save(fusion_param['distribution_pickle_path'], camera_delta_s) print 'deltas saved' return camera_delta_s
print('kmeans:') print(self.kmeans_score / self.tracklet_cnt) print('ap:') print(self.spectral_score / self.tracklet_cnt) def arg_parse(): parser = argparse.ArgumentParser(description='eval on txt') parser.add_argument('--train_list', default='../data/duke/train.list', type=str, help='') parser.add_argument( '--transfer_feature', default= '/home/cwh/coding/taudl_pyt/baseline/eval/grid_duke-train/train_ft.mat', type=str, help='') parser.add_argument('--transfer', default='grid_duke', type=str, help='') opt = parser.parse_args() return opt if __name__ == '__main__': opt = arg_parse() # a = pickle_load(opt.transfer+'_cluster.pck') pseudo_camera_tracks = single_camera_time_cluster(opt.train_list) c = TrackFeatureCluster(pseudo_camera_tracks, opt.transfer_feature) #0.31 c.fit() pickle_save(opt.transfer + '_cluster.pck', c.cameras_tracks)