示例#1
0
def sample_cls():
    """sample query images according to cls of test images with occlusion"""
    import numpy as np

    query_ori_lst = '/home/chencp/data/market-list/query2.lst'
    query_lst_new = '/home/chencp/dataset/binary-annotation-market1501/lst_dir/query_cls_test.lst'
    query_lst_new_mask = '/home/chencp/dataset/binary-annotation-market1501/lst_dir/query_cls_test-mask.lst'
    test_fuse_lst = '/home/chencp/dataset/binary-annotation-market1501/lst_dir/test_fuse_v2.lst'

    query_lst = load_lst(query_ori_lst)
    test_lst = load_lst(test_fuse_lst)
    query_cls = np.array([ins.class_id for ins in query_lst])
    test_cls = np.unique([ins.class_id for ins in test_lst])
    print('num of cls in test set with occlusion: {}'.format(len(test_cls)))

    fout = open(query_lst_new, 'w')
    fout_mask = open(query_lst_new_mask, 'w')
    cnt = 0
    for cls in test_cls:
        ind = np.where(query_cls == cls)[0]
        # if np.alen(ind) > 2:
        #     ind = np.random.choice(ind, replace=False, size=2)
        cnt += np.alen(ind)
        for i in ind:
            ins = query_lst[i]
            line = '{}\t{}\t{}\n'.format(ins.index, ins.class_id, ins.img_path)
            fout.writelines(line)
            # mask info
            mask_line = line.replace(rgb_root + 'query', mask_root + 'query_seg')
            mask_line = mask_line.replace('.jpg', '.png')
            fout_mask.writelines(mask_line)
    fout.close()
    fout_mask.close()
    print('sampled {} images, and save in lst file:\n\t{}'.format(cnt, query_lst_new))
示例#2
0
def eval_rank_list(rank_list, query_lst_path, gallery_lst_path):
    query_lst = load_lst(query_lst_path)
    gallery_lst = load_lst(gallery_lst_path)
    gallery_cam_lst = np.array([x.cam_id for x in gallery_lst], dtype=np.int32)
    gallery_id_lst = np.array([x.class_id for x in gallery_lst],
                              dtype=np.int32)

    num_query = len(query_lst)
    num_gallery = len(gallery_lst)

    ap = np.zeros((num_query, ))  # average precision
    cmc = np.zeros((num_query, num_gallery))
    for i in tqdm(range(num_query)):
        index = np.arange(num_gallery)
        good_flag = np.logical_and((gallery_cam_lst != query_lst[i].cam_id),
                                   (gallery_id_lst == query_lst[i].class_id))
        junk_flag_1 = (gallery_id_lst == 0)
        junk_flag_2 = np.logical_and((gallery_cam_lst == query_lst[i].cam_id),
                                     (gallery_id_lst == query_lst[i].class_id))

        good_index = index[good_flag]
        junk_index = index[np.logical_or(junk_flag_1, junk_flag_2)]

        sort_index = rank_list[i]

        ap[i], cmc[i, :] = compute_ap(good_index, junk_index, sort_index)

    map = np.mean(ap)
    r1 = np.mean(cmc, axis=0)[0]
    r5 = np.mean(np.clip(np.sum(cmc[:, :5], axis=1), 0, 1), axis=0)

    print('mAP = %f, r1 precision = %f, r5 precision = %f' % (map, r1, r5))
示例#3
0
def sample_clean_img():
    """to sample clean img according images with occlusion in query and test list"""
    query_lst_ori = '/home/chencp/data/market-list/query2.lst'
    test_lst_ori = '/home/chencp/data/market-list/test2.lst'
    query_lst_fuse = '/home/chencp/dataset/binary-annotation-market1501/lst_dir/query_fuse_v2.lst'
    test_lst_fuse = '/home/chencp/dataset/binary-annotation-market1501/lst_dir/test_fuse_v2.lst'
    query_lst_new = '/home/chencp/dataset/binary-annotation-market1501/lst_dir/query_fuse_verify.lst'
    test_lst_new = '/home/chencp/dataset/binary-annotation-market1501/lst_dir/test_fuse_verify.lst'
    query_lst_mask_new = '/home/chencp/dataset/binary-annotation-market1501/lst_dir/query_fuse_verify-mask.lst'
    test_lst_mask_new = '/home/chencp/dataset/binary-annotation-market1501/lst_dir/test_fuse_verify-mask.lst'

    query_lst_ori = load_lst(query_lst_ori)
    test_lst_ori = load_lst(test_lst_ori)
    query_lst_fuse = load_lst(query_lst_fuse)
    test_lst_fuse = load_lst(test_lst_fuse)

    # sample test set according to query
    fout = open(test_lst_new, 'w')
    fout_mask = open(test_lst_mask_new, 'w')
    cnt = 0
    for ins in query_lst_fuse:
        ins_sample = [i for i in test_lst_ori if i.class_id == ins.class_id and i.cam_id != ins.cam_id]
        cnt += len(ins_sample)
        for i_s in ins_sample:
            line = '{}\t{}\t{}\n'.format(i_s.index, i_s.class_id, i_s.img_path)
            fout.writelines(line)
            # mask info
            mask_line = line.replace(rgb_root + 'bounding_box_test', mask_root + 'bounding_box_test_seg')
            mask_line = mask_line.replace('.jpg', '.png')
            fout_mask.writelines(mask_line)
    print('sample {} images from test set according to occlusion query set'.format(cnt))
    print('\tand save the list file to {}'.format(test_lst_new))
    fout.close()
    fout_mask.close()

    # sample query set according to test
    fout = open(query_lst_new, 'w')
    fout_mask = open(query_lst_mask_new, 'w')
    cnt = 0
    for ins in test_lst_fuse:
        ins_sample = [i for i in query_lst_ori if i.class_id == ins.class_id and i.cam_id != ins.cam_id]
        cnt += len(ins_sample)
        for i_s in ins_sample:
            line = '{}\t{}\t{}\n'.format(i_s.index, i_s.class_id, i_s.img_path)
            fout.writelines(line)
            # mask info
            mask_line = line.replace(rgb_root + 'query', mask_root + 'query_seg')
            mask_line = mask_line.replace('.jpg', '.png')
            fout_mask.writelines(mask_line)
    print('sample {} images from query set according to occlusion test set'.format(cnt))
    print('\tand save the list file to {}'.format(query_lst_new))
    fout.close()
    fout_mask.close()
示例#4
0
              "--crop-size 128 --dataset %s" % (
            sys.version[0], prefix, gpu, epoch_idx, query_prefix, gallery_prefix, dataset)
        cmd = cmd + ' --data_type %s' % data_type
        if force_resize:
            cmd = cmd + " --force-resize"
        if mask_label:
            cmd = cmd + ' --mask_label'
        if soft_or_hard:
            cmd = cmd + ' --soft_or_hard'
        if rgbm_iter:
            cmd = cmd + ' --rgbm_iter'
        if fea_tag:
            cmd = cmd + ' --fea_tag %s' % fea_tag

        subprocess.check_call(cmd.split(" "))

    assert os.path.exists(query_features_path) and os.path.exists(
        gallery_features_path)

    query_features = sio.loadmat(query_features_path)["feat"]
    gallery_features = sio.loadmat(gallery_features_path)["feat"]

    query_lst = load_lst(query_prefix + ".lst")
    gallery_lst = load_lst(gallery_prefix + ".lst")

    eval_feature_verify(query_features,
                        gallery_features,
                        query_lst,
                        gallery_lst,
                        metric="cosine")
示例#5
0
def tsne_viz_query2gallery(prefix, dataset, viz_range):
    if not isinstance(viz_range, (list, tuple, int)):
        raise ValueError("viz_range must be list or tuple or int")
    if isinstance(viz_range, int):
        viz_range = list(range(viz_range))

    query_features_path = 'features/%s/query-%s.mat' % (dataset, prefix)
    gallery_features_path = "features/%s/gallery-%s.mat" % (dataset, prefix)
    query_lst_path = "/mnt/truenas/scratch/chuanchen.luo/data/reid/%s-list/query.lst" % dataset
    gallery_lst_path = "/mnt/truenas/scratch/chuanchen.luo/data/reid/%s-list/test.lst" % dataset

    query_lst = np.array(load_lst(query_lst_path))
    gallery_lst = np.array(load_lst(gallery_lst_path))

    query_features = sio.loadmat(query_features_path)["feat"]
    gallery_features = sio.loadmat(gallery_features_path)["feat"]

    query_features = normalize(query_features)
    gallery_features = normalize(gallery_features)

    for i in viz_range:
        q_feat = query_features[i]
        dist = -np.dot(gallery_features, q_feat)
        rank_list = np.argsort(dist)[:150]
        g_feats = gallery_features[rank_list]

        q_record = Record(*query_lst[i])
        g_records = [Record(*item) for item in gallery_lst[rank_list]]

        same_list = [
            i for i in range(g_feats.shape[0])
            if q_record.class_id == g_records[i].class_id
        ]
        diff_list = [
            i for i in range(g_feats.shape[0])
            if q_record.class_id != g_records[i].class_id
        ]

        init_embed = TSNE(n_components=2,
                          init="pca",
                          perplexity=10,
                          metric="cosine").fit_transform(g_feats)

        W = np.dot(g_feats, g_feats.T)
        W = np.exp(W / TEMPERATURE)
        W = W / np.sum(W, axis=1, keepdims=True)

        g_feats = np.dot(W, g_feats)
        res_embed = TSNE(n_components=2,
                         init="pca",
                         perplexity=10,
                         metric="cosine").fit_transform(g_feats)

        plt.figure(0)
        plt.scatter(init_embed[same_list, 0],
                    init_embed[same_list, 1],
                    label="same")
        plt.scatter(init_embed[diff_list, 0],
                    init_embed[diff_list, 1],
                    label="diff")
        plt.savefig("%d_tsne_orignal.png" % i)
        plt.close(0)

        plt.figure(1)
        plt.scatter(res_embed[same_list, 0],
                    res_embed[same_list, 1],
                    label="same")
        plt.scatter(res_embed[diff_list, 0],
                    res_embed[diff_list, 1],
                    label="diff")
        plt.savefig("%d_tsne_transformed_%.2f.png" % (i, TEMPERATURE))
        plt.close(1)

        print(i)
示例#6
0
def tsne_viz_iteration(prefix, dataset, query_id):
    query_features_path = 'features/%s/query-%s.mat' % (dataset, prefix)
    gallery_features_path = "features/%s/gallery-%s.mat" % (dataset, prefix)
    query_lst_path = "/mnt/truenas/scratch/chuanchen.luo/data/reid/%s-list/query.lst" % dataset
    gallery_lst_path = "/mnt/truenas/scratch/chuanchen.luo/data/reid/%s-list/test.lst" % dataset

    query_lst = np.array(load_lst(query_lst_path))
    gallery_lst = np.array(load_lst(gallery_lst_path))

    query_features = sio.loadmat(query_features_path)["feat"]
    gallery_features = sio.loadmat(gallery_features_path)["feat"]

    query_features = normalize(query_features)
    gallery_features = normalize(gallery_features)

    q_feat = query_features[query_id]
    dist = -np.dot(gallery_features, q_feat)
    rank_list = np.argsort(dist)[:150]
    g_feats = gallery_features[rank_list]

    q_record = Record(*query_lst[query_id])
    g_records = [Record(*item) for item in gallery_lst[rank_list]]

    same_list = [
        i for i in range(g_feats.shape[0])
        if q_record.class_id == g_records[i].class_id
    ]
    diff_list = [
        i for i in range(g_feats.shape[0])
        if q_record.class_id != g_records[i].class_id
    ]

    init_embed = TSNE(n_components=2,
                      init="pca",
                      perplexity=8,
                      metric="cosine").fit_transform(g_feats)

    W = np.dot(g_feats, g_feats.T)
    W = np.exp(W / TEMPERATURE)
    W = W / np.sum(W, axis=1, keepdims=True)

    plt.figure(0)
    plt.scatter(init_embed[same_list, 0],
                init_embed[same_list, 1],
                label="same")
    plt.scatter(init_embed[diff_list, 0],
                init_embed[diff_list, 1],
                label="diff")
    plt.savefig("%d_tsne_orignal.png" % query_id)
    plt.close(0)

    for iteration in range(10):
        g_feats = np.dot(W, g_feats)
        res_embed = TSNE(n_components=2,
                         init="pca",
                         perplexity=8,
                         metric="cosine").fit_transform(g_feats)

        plt.figure(iteration)
        plt.scatter(res_embed[same_list, 0],
                    res_embed[same_list, 1],
                    label="same")
        plt.scatter(res_embed[diff_list, 0],
                    res_embed[diff_list, 1],
                    label="diff")
        plt.savefig("%d_%02d_tsne_transformed_%.2f.png" %
                    (query_id, iteration, TEMPERATURE))
        plt.close(iteration)

        print("%d-%d" % (query_id, iteration))