Ejemplo n.º 1
0
def camera_intervals(fusion_param, camera_num):
    # fusion_param = get_fusion_param()
    intervals = list()
    cur_values = {'id': 0, 'start': 0, 'end': 0}

    def count_interval(img_name):
        if '.' not in img_name:
            return
        track_info = img_name.split('.')[0].split('_')
        person_id = track_info[0]
        track_time = int(track_info[2])
        if person_id != cur_values['id']:
            intervals.append(
                [cur_values['id'], cur_values['start'], cur_values['end']])
            cur_values['id'] = person_id
            cur_values['start'] = track_time
            cur_values['end'] = track_time
        else:
            if track_time > cur_values['end']:
                cur_values['end'] = track_time

    if data_type == 0:
        # read_lines_and('market_s1/track_c%ds1.txt' % (camera_num), count_interval)
        read_lines_and(
            fusion_param['predict_camera_path'] + camera_num + '.txt',
            count_interval)
    else:
        read_lines_and(
            fusion_param['predict_camera_path'] + camera_num + '.txt',
            count_interval)
    return intervals[1:]
Ejemplo n.º 2
0
def count_person_in_camera(camera_num):
    persons = list()

    def count_person(img_name):
        person_id = img_name.split('.')[0].split('_')[0]
        if person_id not in persons:
            persons.append(person_id)

    read_lines_and('market/c%d_tracks.txt' % (camera_num + 1), count_person)
    # print(persons)
    return persons
Ejemplo n.º 3
0
def get_tracks(fusion_param):
    # 这个函数是用来获取probe图片列表的,training的probe和gallery相同
    # answer_path: data/top-m2g-std0-train/test_track.txt
    test_path = fusion_param['answer_path']
    tracks = list()

    def add_track(line):
        tracks.append(line)

    read_lines_and(test_path, add_track)
    return tracks
Ejemplo n.º 4
0
def camera_distribute(fusion_param, camera_num):
    # 左图中的人在右图可能出现在6个摄像头中
    deltas = [list() for i in range(camera_cnt)]
    # market1501数据集有六个序列,只有同一个序列才能计算delta
    seq_s = [1, 2, 3, 4, 5, 6]
    # 每个序列统计一遍deltas,合并到总的deltas中
    for i in range(len(seq_s)):
        # 得到右图的时空信息和人物信息6×[time, pid]*n
        intervals = track_infos(fusion_param, camera_num, seq_s[i])

        # print('get intervals for c%d' % camera_num)

        def shuffle_person(img_name):
            if '.' not in img_name:
                return
            track_info = img_name.split('.')[0].split('_')
            person_id = track_info[0]
            # 每个左图统计一遍deltas,合并到总的deltas中
            track_deltas = find_id_delta(intervals, person_id,
                                         int(track_info[2]))
            if data_type == 2 or data_type == 3 or data_type == 4:
                camera_id = int(track_info[1])
            else:
                camera_id = int(track_info[1][1])
            if len(track_deltas) == 0:
                return
            for delta in track_deltas:
                if person_id != 0:
                    # exclude first zero record and not found id records
                    # deltas.append([cur_delta['id'], cur_delta['camera'], cur_delta['delta']])
                    # ignore large data
                    if abs(delta) < 1000000:
                        deltas[camera_id - 1].append(delta)

        # data type为1足够应对所有情况,
        # shuffle person实际上是根据person id是否对应来决定是否计算时间差,并最终返回6串时间差
        if data_type == 0:
            # read_lines_and('market_s1/track_s1.txt', shuffle_person)
            read_lines_and(fusion_param['predict_track_path'], shuffle_person)
        elif data_type == 2:
            # read_lines_and('grid/tracks.txt', shuffle_person)
            read_lines_and(fusion_param['predict_track_path'], shuffle_person)
        elif data_type == 3 or data_type == 4:
            # read_lines_and('grid_predict/grid_tracks.txt', shuffle_person)
            read_lines_and(fusion_param['predict_track_path'], shuffle_person)
        elif data_type == 5:
            # read_lines_and('3dpes/test_tracks.txt', shuffle_person)
            read_lines_and(fusion_param['predict_track_path'], shuffle_person)
        else:
            read_lines_and(fusion_param['predict_track_path'], shuffle_person)
    return deltas
Ejemplo n.º 5
0
def distribute_with_camera(persons_in_cameras):
    camera_distribution = [list() for i in range(camera_cnt)]

    def shuffle_person(img_name):
        if '.' not in img_name:
            return
        track_info = img_name.split('.')[0].split('_')
        person_id = track_info[0]
        for i in range(camera_cnt):
            if person_id in persons_in_cameras[i]:
                camera_distribution[i].append(
                    [float(track_info[1][1]), float(track_info[1][3]) + float(track_info[2]) / 100000])

    read_lines_and(train_track_path, shuffle_person)
    return camera_distribution
Ejemplo n.º 6
0
def count_with_camera(persons_in_cameras):
    camera_distribution = [[[0 for i in range(camera_cnt)] for i in range(camera_cnt)] for i in range(camera_cnt)]

    def shuffle_person(img_name):
        if '.' not in img_name:
            return
        track_info = img_name.split('.')[0].split('_')
        person_id = track_info[0]
        for i in range(camera_cnt):
            if person_id in persons_in_cameras[i]:
                camera_distribution[i][int(track_info[1][1]) - 1][int(track_info[1][3]) - 1] += 1

    read_lines_and(train_track_path, shuffle_person)
    for i in range(camera_cnt):
        for j in range(camera_cnt):
            print(camera_distribution[i][j])
        print('=' * 40)
    return camera_distribution
Ejemplo n.º 7
0
def camera_distribute(camera_num):
    fusion_param = get_fusion_param()
    intervals = camera_intervals(fusion_param, camera_num)
    print('get intervals for c%d' % camera_num)
    deltas = [list() for i in range(6)]
    cur_delta = {'id': 0, 'delta': 1000000, 'camera': -1}

    def shuffle_person(img_name):
        if '.' not in img_name:
            return
        track_info = img_name.split('.')[0].split('_')
        person_id = track_info[0]
        track_delta = find_id_delta(intervals, person_id, int(track_info[2]))
        camera_id = int(track_info[1][1])
        if track_delta == -0.1:
            # id not found
            cur_delta['id'] = 0
            return
        # new id, has appeared in camera -camera_num
        cur_delta['id'] = person_id
        cur_delta['delta'] = track_delta
        cur_delta['camera'] = camera_id

        if cur_delta['id'] != 0:
            # exclude first zero record and not found id records
            # deltas.append([cur_delta['id'], cur_delta['camera'], cur_delta['delta']])
            # ignore large data
            if abs(cur_delta['delta']) < 2000:
                deltas[cur_delta['camera'] - 1].append(cur_delta['delta'])

    if data_type == 0:
        # read_lines_and('market_s1/track_s1.txt', shuffle_person)
        read_lines_and(fusion_param['predict_track_path'], shuffle_person)
    else:
        read_lines_and(fusion_param['predict_track_path'], shuffle_person)
    return deltas
Ejemplo n.º 8
0
def track_infos(fusion_param, camera_num, s_num):
    # fusion_param = get_fusion_param()
    camera_num = str(camera_num)
    tracks = list()

    def count_interval(img_name):
        # 字符串转track信息,包含person id(即图片id),seq num,track time(时间点,不是时间差)
        if '.' not in img_name:
            return
        track_info = img_name.split('.')[0].split('_')
        person_id = track_info[0]
        track_time = int(track_info[2])
        seq_num = int(track_info[1][3])
        if seq_num == s_num:
            tracks.append([person_id, track_time])

    # 现在data_type 1已经能处理所有情况了
    if data_type == 0:
        # read_lines_and('market_s1/track_c%ds1.txt' % camera_num, count_interval)
        read_lines_and(
            fusion_param['predict_camera_path'] + camera_num + '.txt',
            count_interval)
    elif data_type == 2:
        # read_lines_and('grid/trackc%d.txt' % camera_num, count_interval)
        read_lines_and(
            fusion_param['predict_camera_path'] + camera_num + '.txt',
            count_interval)
    elif data_type == 3:
        read_lines_and('grid_predict/grid_c%d.txt' % camera_num,
                       count_interval)
        read_lines_and(
            fusion_param['predict_camera_path'] + camera_num + '.txt',
            count_interval)
    elif data_type == 4:
        # read_lines_and('grid_predict/rand/grid_c%d.txt' % camera_num, count_interval)
        read_lines_and(
            fusion_param['predict_camera_path'] + camera_num + '.txt',
            count_interval)
    elif data_type == 5:
        # read_lines_and('3dpes/c%d_tracks.txt' % camera_num, count_interval)
        read_lines_and(
            fusion_param['predict_camera_path'] + camera_num + '.txt',
            count_interval)
    else:
        if os.path.exists(fusion_param['predict_camera_path'] + camera_num +
                          '.txt'):
            read_lines_and(
                fusion_param['predict_camera_path'] + camera_num + '.txt',
                count_interval)

    return tracks
Ejemplo n.º 9
0
def percent_shot_eval(target_path, top_cnt, test_mode=False):
    global gallery_cnt
    gallery_cnt = 0
    answer_path = folder(target_path) + '/test_tracks.txt'
    predict_path = target_path
    answer_lines = read_lines(answer_path)
    real_pids = [int(answer.split('_')[0]) for answer in answer_lines]

    def is_shot(line):
        global line_idx
        global shot_line_cnt
        global shot_cnt
        global predict_cnt
        global predict_line_cnt
        global gallery_cnt

        predict_idx_es = line.split()
        has_shot = False
        if len(predict_idx_es) > top_cnt:
            predict_cnt += top_cnt
        else:
            predict_cnt += len(predict_idx_es)

        if len(predict_idx_es) > 0:
            predict_line_cnt += 1
        # line_idx > 774 means label img,
        # gallery_idxs[(line_idx - 775)/2] means iseven in gallery,
        # if iseven is equal, means gallery img
        if test_mode and line_idx > 774 and (line_idx - 774) % 2 == 1:
            gallery_cnt += 1
            line_idx += 1
            return
        for i, predict_idx in enumerate(predict_idx_es):
            if i >= top_cnt:
                break
            # print(line_idx)

            if real_pids[int(predict_idx) - 1] == real_pids[line_idx]:
                if not has_shot:
                    shot_line_cnt += 1
                    has_shot = True
                shot_cnt += 1
        line_idx += 1

    read_lines_and(predict_path, is_shot)
    global line_idx
    global shot_line_cnt
    global shot_cnt
    global predict_cnt
    global predict_line_cnt
    # print('all predict shot(ac1): %f' % (float(shot_cnt) / predict_cnt))
    if test_mode:
        valid_line_cnt = 125
    else:
        valid_line_cnt = 250
    shot_rate = shot_line_cnt / float(valid_line_cnt)
    print('top%d shot: %f' % (top_cnt, shot_rate))
    print('gallery cnt: %d' % gallery_cnt)
    line_idx = 0
    shot_cnt = 0
    shot_line_cnt = 0
    predict_cnt = 0
    predict_line_cnt = 0
    return shot_rate
Ejemplo n.º 10
0
def get_predict_tracks(fusion_param, useful_predict_cnt=10):
    # 这个函数根据预测的pid,生成更多的图片名,从而构造时空模型,
    # 例如,左图1_c1_t2的匹配图片id是2_c2_t3,3_c3_t3,6_c4_t1,
    # 则生成1_c2_t3, 1_c3_t3, 1_c4_t1,用于构造时空概率模型
    # useful_predcit_cnt为10,实际上会对每张左图的名字产生10个右图的名字,加上原图就是11个新的名字
    # 为了加快后续检索速度,将生成的图片名中,摄像头相同的,写到同一个文件里,即predict_camera_path: predict_c%d.txt
    # 每次运行这个函数都会删除predict_c%d.txt和predict_tracks.txt,所以不会有缓存旧结果的情况
    # todo: 实际上可以在这一步直接生成cameras_deltas,之前是出于重用可视化代码考虑才使用了delta_track.py中的代码

    # renew_pid_path: data/top-m2g-std0-train/renew_pid.log',包含左图预测的图片id, 250*249
    renew_pid_path = fusion_param['renew_pid_path']
    # predict_track_path:data/top-m2g-std0-train/predict_tracks.txt,存储get predict tracks结果
    predict_track_path = fusion_param['predict_track_path']
    # 获取左图列表
    origin_tracks = get_tracks(fusion_param)
    #
    safe_remove(predict_track_path)
    camera_cnt = 8
    global predict_line_idx
    predict_line_idx = 0
    for i in range(camera_cnt):
        safe_remove(fusion_param['predict_camera_path'] + str(i) + '.txt')

    def add_predict_track(line):
        global predict_line_idx
        # print predict_line_idx
        if line == '\n':
            predict_line_idx += 1
            return
        if predict_line_idx >= 248:
            print(predict_line_idx)
        if origin_tracks[predict_line_idx].startswith('-1'):
            tail = origin_tracks[predict_line_idx][2:-1]
        else:
            tail = origin_tracks[predict_line_idx][4:-1]
        if 's' in tail:
            s_num = int(tail[4])
        else:
            s_num = 1
        if predict_line_idx == 499:
            print(predict_line_idx)
        if 'jpe' in tail:
            camera = tail[1]
        else:
            camera = tail[2]
        track_time = tail.split('_')[2]
        mids = line.split()
        # 这里写入的是predict_line_idx,而非原来的person id,保证了无监督无标签
        write_line(
            predict_track_path,
            ('%04d_c%ds%d_%d_n.jpg' %
             (int(predict_line_idx) + 1, int(camera), s_num, int(track_time))))
        write_line(
            fusion_param['predict_camera_path'] + str(camera) + '.txt',
            ('%04d_c%ds%d_%d_n.jpg' %
             (int(predict_line_idx) + 1, int(camera), s_num, int(track_time))))

        for i, mid in enumerate(mids):
            if i >= useful_predict_cnt:
                break
            write_line(predict_track_path,
                       ('%04d_c%ds%d_%d_n.jpg' %
                        (int(mid), int(camera), s_num, int(track_time))))
            write_line(
                fusion_param['predict_camera_path'] + str(camera) + '.txt',
                ('%04d_c%ds%d_%d_n.jpg' %
                 (int(mid), int(camera), s_num, int(track_time))))
        predict_line_idx += 1
        # print('done')

    read_lines_and(renew_pid_path, add_predict_track)