Esempio n. 1
0
 def __call__(self, img):
     assert isinstance(img, str) or isinstance(img,
                                               Image.Image), "type error"
     if isinstance(img, str):
         img = read_image(img)  #
     img = self.norm(img).unsqueeze(0)
     with torch.no_grad():
         img = img.to(self.device)
         feature = self.net(img)
     if self.cfg.TEST.FEAT_NORM == 'yes':
         feature = torch.nn.functional.normalize(feature, dim=1, p=2)
     return feature
Esempio n. 2
0
def inference_samples(model, batch_size):  # 传入模型,数据预处理方法,batch_size
    query_list = list()
    with open(r'/home/zxh/reid-baseline/query_b_list.txt', 'r') as f:
        # 测试集中txt文件
        lines = f.readlines()
        for i, line in enumerate(lines):
            data = line.split(" ")
            image_name = data[0].split("/")[1]
            img_file = os.path.join(r'/home/zxh/naic_data/ori_data/初赛B榜测试集/query_b',
                                    image_name)  # 测试集query文件夹
            query_list.append(img_file)

    gallery_list = [os.path.join(r'/home/zxh/naic_data/ori_data/初赛B榜测试集/gallery_b', x) for x in
                    # 测试集gallery文件夹
                    os.listdir(r'/home/zxh/naic_data/ori_data/初赛B榜测试集/gallery_b')]
    query_num = len(query_list)
    # img_list = list()
    # flip_img_list = list()
    # for q_img in query_list:
    #     q_img = read_image(q_img)
    #     flip_q_img = transforms.RandomHorizontalFlip(p=1.0)(q_img)
    #     q_img = transform(q_img)
    #     flip_q_img = transform(flip_q_img)
    #     img_list.append(q_img)
    #     flip_img_list.append(flip_q_img)
    # for g_img in gallery_list:
    #     g_img = read_image(g_img)
    #     flip_g_img = transforms.RandomHorizontalFlip(p=1.0)(g_img)
    #     g_img = transform(g_img)
    #     flip_g_img = transform(flip_g_img)
    #     img_list.append(g_img)
    #     flip_img_list.append(flip_g_img)
    # img_data = torch.Tensor([t.numpy() for t in img_list])
    # flip_img_data = torch.Tensor([t.numpy() for t in flip_img_list])
    # img_data=img_data.to(device)
    # flip_img_data=flip_img_data.to(device)

    # model = nn.DataParallel(model, device_ids=device_ids)
    model = model.to(device)
    model.eval()
    iter_n_query = (len(query_list)) // batch_size
    iter_n_gallery = (len(gallery_list)) // batch_size
    if (len(query_list)) % batch_size != 0:
        iter_n_query += 1
    if (len(gallery_list)) % batch_size != 0:
        iter_n_gallery += 1
    all_feature = list()
    local_feature = list()
    flip_all_feature = list()
    flip_local_feature = list()
    # pca = PCA(n_components=512)
    for i in range(iter_n_query):
        img_list = list()
        flip_img_list = list()
        for q_img in query_list[i * batch_size:(i + 1) * batch_size]:
            q_img = read_image(q_img)
            flip_q_img = transforms.RandomHorizontalFlip(p=1.0)(q_img)
            q_img = transform(q_img)
            flip_q_img = transform(flip_q_img)
            img_list.append(q_img)
            flip_img_list.append(flip_q_img)
        img_data = torch.Tensor([t.numpy() for t in img_list]).to(device)
        flip_img_data = torch.Tensor([t.numpy() for t in flip_img_list]).to(device)
        print("batch ----%d----" % (i))
        batch_data = img_data
        flip_batch_data = flip_img_data
        with torch.no_grad():
            batch_data = batch_data.to(device)
            # print(batch_data.shape,"ooooooo")
            # batch_data.to(device)
            batch_feature = model(batch_data, None)[0].detach().cuda()
            batch_feature1 = model(batch_data, None)[1].detach().cuda()
            all_feature.append(batch_feature)
            local_feature.append(batch_feature1)

            flip_batch_data = flip_batch_data.to(device)
            flip_batch_feature = model(flip_batch_data, None)[0].detach().cuda()
            flip_batch_feature1 = model(flip_batch_data, None)[1].detach().cuda()
            flip_all_feature.append(flip_batch_feature)
            flip_local_feature.append(flip_batch_feature1)
        # flip_local_feature.append(flip_batch_feature1)
    for i in range(iter_n_gallery):
        img_list = list()
        flip_img_list = list()
        for g_img in gallery_list[i * batch_size:(i + 1) * batch_size]:
            g_img = read_image(g_img)
            flip_g_img = transforms.RandomHorizontalFlip(p=1.0)(g_img)
            g_img = transform(g_img)
            flip_g_img = transform(flip_g_img)
            img_list.append(g_img)
            flip_img_list.append(flip_g_img)
        img_data = torch.Tensor([t.numpy() for t in img_list]).to(device)
        flip_img_data = torch.Tensor([t.numpy() for t in flip_img_list]).to(device)
        batch_data = img_data
        flip_batch_data = flip_img_data
        with torch.no_grad():
            print("batch ----%d----" % (i))
            # print(batch_data.shape,"ooooooo")
            # batch_data.to(device)
            batch_feature = model(batch_data, None)[0].detach().cuda()
            batch_feature1 = model(batch_data, None)[1].detach().cuda()
            all_feature.append(batch_feature)
            local_feature.append(batch_feature1)
            flip_batch_feature = model(flip_batch_data, None)[0].detach().cuda()
            flip_batch_feature1 = model(flip_batch_data, None)[1].detach().cuda()
            flip_all_feature.append(flip_batch_feature)
            flip_local_feature.append(flip_batch_feature1)

    all_feature = torch.cat(all_feature, dim=0)
    local_feature = torch.cat(local_feature, dim=0)
    flip_all_feature = torch.cat(flip_all_feature, dim=0)
    flip_local_feature = torch.cat(flip_local_feature, dim=0)
    # feat_norm == 'yes':
    all_feature = torch.nn.functional.normalize(all_feature, dim=1, p=2)
    local_feature = torch.nn.functional.normalize(local_feature, dim=1, p=2)
    flip_all_feature = torch.nn.functional.normalize(flip_all_feature, dim=1, p=2)
    flip_local_feature = torch.nn.functional.normalize(flip_local_feature, dim=1, p=2)
    gallery_feat = all_feature[query_num:]
    query_feat = all_feature[:query_num]
    flip_gallery_feat = flip_all_feature[query_num:]
    flip_query_feat = flip_all_feature[:query_num]

    local_gallery_feat = local_feature[query_num:]
    local_query_feat = local_feature[:query_num]
    flip_local_gallery_feat = flip_local_feature[query_num:]
    flip_local_query_feat = flip_local_feature[:query_num]

    # gallery_feat=pca.fit_transform(gallery_feat)
    # query_feat=pca.fit_transform(query_feat)
    # flip_gallery_feat=pca.fit_transform(flip_gallery_feat)
    # flip_query_feat=pca.fit_transform(flip_query_feat)

    # local_gallery_feat=pca.fit_transform(local_gallery_feat)
    # local_query_feat=pca.fit_transform(local_query_feat)
    # flip_local_gallery_feat=pca.fit_transform(flip_local_gallery_feat)
    # flip_local_query_feat=pca.fit_transform(flip_local_query_feat)

    global_q_g_dist = compute_dist(
        query_feat, gallery_feat, type='euclidean')
    global_g_g_dist = compute_dist(
        gallery_feat, gallery_feat, type='euclidean')
    global_q_q_dist = compute_dist(
        query_feat, query_feat, type='euclidean')
    flip_global_q_g_dist = compute_dist(
        flip_query_feat, flip_gallery_feat, type='euclidean')
    flip_global_g_g_dist = compute_dist(
        flip_gallery_feat, flip_gallery_feat, type='euclidean')
    flip_global_q_q_dist = compute_dist(
        flip_query_feat, flip_query_feat, type='euclidean')

    local_q_g_dist_all = []
    local_q_q_dist_all = []
    local_g_g_dist_all = []
    flip_local_q_g_dist_all = []
    flip_local_q_q_dist_all = []
    flip_local_g_g_dist_all = []
    # pcb_test or aligned_test:
    for i in range(local_query_feat.shape[2]):
        local_q_g_dist = compute_dist(
            local_query_feat[:, :, i], local_gallery_feat[:, :, i],
            type='euclidean')
        local_q_g_dist_all.append(local_q_g_dist)

        local_q_q_dist = compute_dist(
            local_query_feat[:, :, i], local_query_feat[:, :, i],
            type='euclidean')
        local_q_q_dist_all.append(local_q_q_dist)

        local_g_g_dist = compute_dist(
            local_gallery_feat[:, :, i], local_gallery_feat[:, :, i],
            type='euclidean')
        local_g_g_dist_all.append(local_g_g_dist)

        # ---------
        flip_local_q_g_dist = compute_dist(
            flip_local_query_feat[:, :, i], flip_local_gallery_feat[:, :, i],
            type='euclidean')
        flip_local_q_g_dist_all.append(flip_local_q_g_dist)

        flip_local_q_q_dist = compute_dist(
            flip_local_query_feat[:, :, i], flip_local_query_feat[:, :, i],
            type='euclidean')
        flip_local_q_q_dist_all.append(flip_local_q_q_dist)

        flip_local_g_g_dist = compute_dist(
            flip_local_gallery_feat[:, :, i], flip_local_gallery_feat[:, :, i],
            type='euclidean')
        flip_local_g_g_dist_all.append(flip_local_g_g_dist)

    global_local_g_g_dist = global_g_g_dist
    global_local_q_g_dist = global_q_g_dist
    global_local_q_q_dist = global_q_q_dist

    flip_global_local_g_g_dist = flip_global_g_g_dist
    flip_global_local_q_g_dist = flip_global_q_g_dist
    flip_global_local_q_q_dist = flip_global_q_q_dist

    for i in range(len(local_g_g_dist_all)):  # /len(local_g_g_dist_all)

        global_local_g_g_dist += local_g_g_dist_all[i] / (len(local_g_g_dist_all))
        global_local_q_g_dist += local_q_g_dist_all[i] / (len(local_g_g_dist_all))
        global_local_q_q_dist += local_q_q_dist_all[i] / (len(local_g_g_dist_all))

        flip_global_local_g_g_dist += flip_local_g_g_dist_all[i] / (len(flip_local_g_g_dist_all))
        flip_global_local_q_g_dist += flip_local_q_g_dist_all[i] / (len(flip_local_g_g_dist_all))
        flip_global_local_q_q_dist += flip_local_q_q_dist_all[i] / (len(flip_local_g_g_dist_all))

    global_local_q_g_dist += flip_global_local_q_g_dist
    global_local_q_q_dist += flip_global_local_q_q_dist
    global_local_g_g_dist += flip_global_local_g_g_dist
    # distmat = aligned_re_ranking(global_q_g_dist, global_q_q_dist, global_g_g_dist, k1=6, k2=3, lambda_value=0.80)
    distmat = aligned_re_ranking(global_local_q_g_dist, global_local_q_q_dist, global_local_g_g_dist, k1=6, k2=3,
                                 lambda_value=0.85)
    # distmat = re_rank(query_feat, gallery_feat) # rerank方法
    # distmat=euclidean_dist(query_feat, gallery_feat)
    # distmat = distmat # 如果使用 euclidean_dist,不使用rerank改为:distamt = distamt.numpy()
    # distmat = distmat.numpy()
    np.save('distmat_82json_rank1_pcb_erase.npy', distmat)
    write_json(dist=distmat, query_name=query_list, gallery_name=gallery_list)
Esempio n. 3
0
def inference_samples(model, batch_size, query_list, gallery_list, query_pid_list, gallery_pid_list, query_camid_list, \
                          gallery_camid_list, adjust_rerank, npy_save_path):  # 传入模型,数据预处理方法,batch_size
    query_num = len(query_list)
    model = nn.DataParallel(model)
    model = model.to(device)
    model.eval()
    iter_n_query = (len(query_list)) // batch_size
    iter_n_gallery = (len(gallery_list)) // batch_size
    if (len(query_list)) % batch_size != 0:
        iter_n_query += 1
    if (len(gallery_list)) % batch_size != 0:
        iter_n_gallery += 1
    all_feature = list()
    local_feature = list()
    flip_all_feature = list()
    flip_local_feature = list()
    print("ALL_QUERY_BACTH:", str(iter_n_query))
    for i in range(iter_n_query):
        img_list = list()
        flip_img_list = list()
        for q_img in query_list[i * batch_size:(i + 1) * batch_size]:
            q_img = read_image(q_img)
            flip_q_img = transforms.RandomHorizontalFlip(p=1.0)(q_img)
            q_img = transform(q_img)
            flip_q_img = transform(flip_q_img)
            img_list.append(q_img)
            flip_img_list.append(flip_q_img)
        img_data = torch.Tensor([t.numpy() for t in img_list]).to(device)
        flip_img_data = torch.Tensor([t.numpy() for t in flip_img_list]).to(device)
        print("batch ----%d----" % (i))
        batch_data = img_data
        flip_batch_data = flip_img_data
        with torch.no_grad():
            batch_feature = model(batch_data, None)[0].detach().cuda()
            local_batch_feature = model(batch_data, None)[1].detach().cuda()
            all_feature.append(batch_feature)
            local_feature.append(local_batch_feature)

            flip_batch_feature = model(flip_batch_data, None)[0].detach().cuda()
            local_flip_batch_feature = model(flip_batch_data, None)[1].detach().cuda()
            flip_all_feature.append(flip_batch_feature)
            flip_local_feature.append(local_flip_batch_feature)
    print("ALL_GALLERY_BACTH:", str(iter_n_gallery))
    for i in range(iter_n_gallery):
        img_list = list()
        flip_img_list = list()
        for g_img in gallery_list[i * batch_size:(i + 1) * batch_size]:
            g_img = read_image(g_img)
            flip_g_img = transforms.RandomHorizontalFlip(p=1.0)(g_img)
            g_img = transform(g_img)
            flip_g_img = transform(flip_g_img)
            img_list.append(g_img)
            flip_img_list.append(flip_g_img)
        img_data = torch.Tensor([t.numpy() for t in img_list]).to(device)
        flip_img_data = torch.Tensor([t.numpy() for t in flip_img_list]).to(device)
        batch_data = img_data
        flip_batch_data = flip_img_data
        with torch.no_grad():
            print("batch ----%d----" % (i))
            batch_feature = model(batch_data, None)[0].detach().cuda()
            local_batch_feature = model(batch_data, None)[1].detach().cuda()
            all_feature.append(batch_feature)
            local_feature.append(local_batch_feature)

            flip_batch_feature = model(flip_batch_data, None)[0].detach().cuda()
            local_flip_batch_feature = model(flip_batch_data, None)[1].detach().cuda()
            flip_all_feature.append(flip_batch_feature)
            flip_local_feature.append(local_flip_batch_feature)

    all_feature = torch.cat(all_feature, dim=0)
    local_feature = torch.cat(local_feature, dim=0)
    flip_all_feature = torch.cat(flip_all_feature, dim=0)
    flip_local_feature = torch.cat(flip_local_feature, dim=0)

    #Flip Cat
    # all_feature = torch.cat((all_feature, flip_all_feature), dim=1)
    # local_feature = torch.cat((local_feature, flip_local_feature), dim=1)
    ######
    all_feature = torch.nn.functional.normalize(all_feature, dim=1, p=2)
    local_feature = torch.nn.functional.normalize(local_feature, dim=1, p=2)
    flip_all_feature = torch.nn.functional.normalize(flip_all_feature, dim=1, p=2)
    flip_local_feature = torch.nn.functional.normalize(flip_local_feature, dim=1, p=2)

    global_q_g_dist, global_g_g_dist, global_q_q_dist = compute_qg_dist(all_feature, query_num)
    local_q_g_dist, local_g_g_dist, local_q_q_dist = compute_qg_dist(local_feature, query_num)
    flip_global_q_g_dist, flip_global_g_g_dist, flip_global_q_q_dist = compute_qg_dist(flip_all_feature, query_num)
    flip_local_q_g_dist, flip_local_g_g_dist, flip_local_q_q_dist = compute_qg_dist(flip_local_feature, query_num)

    if adjust_rerank:
        dist_list = [global_q_g_dist, global_g_g_dist, global_q_q_dist, local_q_g_dist, local_g_g_dist, local_q_q_dist, \
                      flip_global_q_g_dist, flip_global_g_g_dist, flip_global_q_q_dist, flip_local_q_g_dist,
                      flip_local_g_g_dist, flip_local_q_q_dist]
        adjust_rerank_function(dist_list, query_pid_list, gallery_pid_list, query_camid_list, gallery_camid_list)
    else:
        l_w = 0.95
        k1, k2, l = 6, 3, 0.80
        distmat_global = aligned_re_ranking(
            global_q_g_dist, global_q_q_dist, global_g_g_dist, k1=k1, k2=k2,
            lambda_value=l)
        print("Global dismat is computed done")

        distmat_local = aligned_re_ranking(
            local_q_g_dist, local_q_q_dist, local_g_g_dist, k1=k1, k2=k2,
            lambda_value=l)

        print("Local dismat is computed done")

        flip_distmat_global = aligned_re_ranking(
            flip_global_q_g_dist, flip_global_q_q_dist, flip_global_g_g_dist, k1=k1, k2=k2,
            lambda_value=l)

        print("Global_flip dismat is computed done")

        flip_distmat_local = aligned_re_ranking(
            flip_local_q_g_dist, flip_local_q_q_dist, flip_local_g_g_dist, k1=k1, k2=k2,
            lambda_value=l)

        print("Local_flip dismat is computed done")

        distmat = l_w * distmat_global + (1 - l_w) * distmat_local

        flip_distmat = l_w * flip_distmat_global + (1 - l_w) * flip_distmat_local

        distmat = (flip_distmat + distmat) / 2
        np.save(npy_save_path, distmat)
Esempio n. 4
0
def my_inference(model, transform, batch_size):  # 传入模型,数据预处理方法,batch_size
    query_list = list()
    # with open(data_root + 'query_a_list.txt', 'r') as f:
    #             # 测试集中txt文件
    #     lines = f.readlines()
    #     for i, line in enumerate(lines):
    #         data = line.split(" ")
    #         image_name = data[0].split("/")[1]
    #         img_file = os.path.join(data_root + 'query_b', image_name)  # 测试集query文件夹
    #         query_list.append(img_file)

    query_list = [
        os.path.join(data_root + 'query_b', x) for x in  # 测试集gallery文件夹
        os.listdir(data_root + 'query_b')
    ]
    gallery_list = [
        os.path.join(data_root + 'gallery_b', x) for x in  # 测试集gallery文件夹
        os.listdir(data_root + 'gallery_b')
    ]
    query_num = len(query_list)
    img_list = list()
    for q_img in query_list:
        q_img = read_image(q_img)
        q_img = transform(q_img)
        img_list.append(q_img)
    for g_img in gallery_list:
        g_img = read_image(g_img)
        g_img = transform(g_img)
        img_list.append(g_img)
    # img_list = img_list[:1000]
    iter_n = int(len(img_list) / batch_size)  # batch_size
    if len(img_list) % batch_size != 0:
        iter_n += 1
    # img_list = img_list[0:iter_n*batch_size]
    print(iter_n)

    img_data = torch.Tensor([t.numpy() for t in img_list]).cuda()
    # img_data = torch.Tensor([t.numpy() for t in img_list]).cpu
    model = model.to(device)
    model.eval()

    all_feature = list()
    for i in range(iter_n):
        print("batch ----%d----" % (i))
        batch_data = img_data[i * batch_size:(i + 1) * batch_size]
        with torch.no_grad():
            batch_feature = model(batch_data).detach().cpu()
            # print(batch_feature)
            # batch_feature = model( batch_data ).detach().cuda()
            all_feature.append(batch_feature)

    print('done')
    all_feature = torch.cat(all_feature)
    gallery_feat = all_feature[query_num:]
    query_feat = all_feature[:query_num]

    distmat = re_ranking(query_feat,
                         gallery_feat,
                         k1=20,
                         k2=6,
                         lambda_value=0.3)  # rerank方法
    # distmat = distmat # 如果使用 euclidean_dist,不使用rerank改为:distamt = distamt.numpy()
    num_q, num_g = distmat.shape
    print(num_q)
    indices = np.argsort(distmat, axis=1)
    max_200_indices = indices[:, :200]
    print(max_200_indices)

    res_dict = dict()
    for q_idx in range(num_q):
        print(query_list[q_idx])
        filename = query_list[q_idx][query_list[q_idx].rindex("/") + 1:]
        max_200_files = [
            gallery_list[i][gallery_list[i].rindex("/") + 1:]
            for i in max_200_indices[q_idx]
        ]
        res_dict[filename] = max_200_files

    with open(r'submission_B_4.json', 'w', encoding='utf-8') as f:  # 提交文件
        json.dump(res_dict, f)